Systems Library / Marketing Automation / How to Build an AI Content Quality Scorer
Marketing Automation content marketing

How to Build an AI Content Quality Scorer

Automatically score content quality against your standards before publishing.

Jay Banlasan

Jay Banlasan

The AI Systems Guy

Publishing weak content is worse than publishing nothing. Every low-quality article dilutes your domain authority and trains your audience to stop reading. This ai content quality score automated grading system runs every article through a multi-dimension quality check before it touches your CMS. It scores readability, depth, structure, SEO signals, and originality, then flags exactly what the writer needs to fix.

I run this as a gate before approval. The writer gets the score and the specific notes. They fix what the system flags. The editor reviews the second draft with the context that it already passed the automated check. Review time drops by half.

What You Need Before Starting

Step 1: Define Your Quality Rubric

The rubric is what makes this system yours rather than generic AI feedback. Encode your actual standards:

# quality_rubric.py

QUALITY_RUBRIC = {
    "dimensions": {
        "structure": {
            "weight": 0.20,
            "description": "Logical flow, clear hierarchy, proper use of H2/H3, intro-body-conclusion structure.",
            "criteria": [
                "Opening paragraph delivers on the headline promise within 50 words",
                "Each section has a clear single focus",
                "Transitions between sections are smooth",
                "Conclusion summarizes value and provides a next step"
            ]
        },
        "depth": {
            "weight": 0.25,
            "description": "How thoroughly the topic is covered. No surface-level coverage. Specific over general.",
            "criteria": [
                "Claims are supported with evidence, data, or examples",
                "Complex ideas are explained with concrete analogies or examples",
                "The article covers the topic at least as thoroughly as the top-ranking competitor",
                "Practical takeaways are included, not just information"
            ]
        },
        "readability": {
            "weight": 0.20,
            "description": "Grade 6-8 reading level. Short paragraphs. Active voice. No jargon without explanation.",
            "criteria": [
                "Average sentence length under 20 words",
                "Paragraphs 3-4 sentences maximum",
                "Active voice dominates",
                "Technical terms are explained on first use"
            ]
        },
        "originality": {
            "weight": 0.15,
            "description": "Unique perspective, original examples, specific insights beyond the obvious.",
            "criteria": [
                "Contains at least one insight not found in the top 3 competitor articles",
                "Examples are specific, not generic",
                "Author perspective is present",
                "Avoids cliche phrases and AI-pattern writing"
            ]
        },
        "seo_signals": {
            "weight": 0.10,
            "description": "Natural keyword usage, metadata readiness, internal link opportunities.",
            "criteria": [
                "Primary keyword appears in title, first paragraph, and at least one H2",
                "Keywords are used naturally, not forced",
                "Article answers the likely search intent completely"
            ]
        },
        "cta_and_next_steps": {
            "weight": 0.10,
            "description": "Clear call to action or next step for the reader.",
            "criteria": [
                "Article ends with a clear next step",
                "CTA matches the reader's likely stage of awareness",
                "CTA is not overly promotional"
            ]
        }
    },
    "minimum_passing_score": 70,
    "auto_reject_triggers": [
        "article contains placeholder text like [INSERT] or TBD",
        "article is under 400 words for a target over 800",
        "article has no subheadings",
        "article contains AI-pattern phrases like 'In conclusion' or 'It is important to note'"
    ]
}

Step 2: Build the Scorer

import os
import json
import anthropic
from dotenv import load_dotenv
from quality_rubric import QUALITY_RUBRIC

load_dotenv()
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))

def score_content(
    content: str,
    target_keyword: str = "",
    target_word_count: int = 1000
) -> dict:
    
    rubric = QUALITY_RUBRIC
    dimensions_str = json.dumps(rubric["dimensions"], indent=2)
    reject_triggers = "\n".join(f"- {t}" for t in rubric["auto_reject_triggers"])
    
    actual_word_count = len(content.split())
    
    prompt = f"""Score this article against the quality rubric. Return JSON only.

TARGET KEYWORD: {target_keyword or "Not specified"}
TARGET WORD COUNT: {target_word_count}
ACTUAL WORD COUNT: {actual_word_count}

QUALITY RUBRIC:
{dimensions_str}

AUTO-REJECT TRIGGERS:
{reject_triggers}

ARTICLE TO SCORE:
---
{content[:5000]}
---

Return this exact JSON structure:
{{
  "auto_reject": false,
  "auto_reject_reason": "",
  "overall_score": 0,
  "weighted_score": 0,
  "grade": "A/B/C/D/F",
  "pass": false,
  "dimensions": {{
    "structure": {{"raw_score": 0, "weighted_score": 0, "notes": "", "specific_issues": []}},
    "depth": {{"raw_score": 0, "weighted_score": 0, "notes": "", "specific_issues": []}},
    "readability": {{"raw_score": 0, "weighted_score": 0, "notes": "", "specific_issues": []}},
    "originality": {{"raw_score": 0, "weighted_score": 0, "notes": "", "specific_issues": []}},
    "seo_signals": {{"raw_score": 0, "weighted_score": 0, "notes": "", "specific_issues": []}},
    "cta_and_next_steps": {{"raw_score": 0, "weighted_score": 0, "notes": "", "specific_issues": []}}
  }},
  "top_strengths": [],
  "required_fixes": [],
  "word_count_status": "ok/short/over",
  "editor_summary": ""
}}

Scores are 0-100 for each dimension. Weighted scores = raw_score * weight.
Overall score = sum of weighted scores.
Pass = overall_score >= {rubric['minimum_passing_score']}."""

    message = client.messages.create(
        model="claude-opus-4-5",
        max_tokens=2000,
        messages=[{"role": "user", "content": prompt}]
    )
    
    raw = message.content[0].text.strip()
    if raw.startswith("```"):
        raw = raw.split("```")[1]
        if raw.startswith("json"):
            raw = raw[4:]
    
    return json.loads(raw)

Step 3: Print the Score Report

def print_score_report(score: dict):
    status = "PASS" if score["pass"] else "FAIL"
    grade = score["grade"]
    overall = score["weighted_score"]
    
    print(f"\n{'='*55}")
    print(f"CONTENT QUALITY SCORE")
    print(f"{'='*55}")
    print(f"Overall Score: {overall:.1f}/100  |  Grade: {grade}  |  {status}")
    print(f"{'='*55}")
    
    if score["auto_reject"]:
        print(f"\nAUTO-REJECTED: {score['auto_reject_reason']}")
        return
    
    print(f"\nDIMENSION BREAKDOWN:")
    for dim, data in score["dimensions"].items():
        bar_length = int(data["raw_score"] / 10)
        bar = "=" * bar_length + "-" * (10 - bar_length)
        print(f"  {dim.upper():25} {data['raw_score']:3}/100  [{bar}]")
        if data.get("specific_issues"):
            for issue in data["specific_issues"][:2]:
                print(f"    - {issue}")
    
    print(f"\nSTRENGTHS:")
    for strength in score.get("top_strengths", []):
        print(f"  + {strength}")
    
    print(f"\nREQUIRED FIXES:")
    for fix in score.get("required_fixes", []):
        print(f"  ! {fix}")
    
    print(f"\nEDITOR SUMMARY: {score['editor_summary']}")

if __name__ == "__main__":
    with open("article-to-review.md", "r") as f:
        content = f.read()
    
    result = score_content(
        content=content,
        target_keyword="ai content brief generator",
        target_word_count=1500
    )
    
    print_score_report(result)
    
    with open("article-to-review-score.json", "w") as f:
        json.dump(result, f, indent=2)

Step 4: Batch Score Multiple Articles

import glob

def batch_score(content_dir: str, keyword_map: dict = {}) -> list:
    files = glob.glob(f"{content_dir}/*.md")
    results = []
    
    for filepath in files:
        filename = os.path.basename(filepath)
        keyword = keyword_map.get(filename, "")
        
        with open(filepath, "r", encoding="utf-8") as f:
            content = f.read()
        
        score = score_content(content, target_keyword=keyword)
        results.append({
            "file": filename,
            "score": score["weighted_score"],
            "grade": score["grade"],
            "pass": score["pass"],
            "top_issue": score["required_fixes"][0] if score["required_fixes"] else "None"
        })
        
        print(f"{filename}: {score['grade']} ({score['weighted_score']:.0f}/100)")
    
    return sorted(results, key=lambda x: x["score"])

What to Build Next

Related Reading

Want this system built for your business?

Get a free assessment. We will map every system your business needs and show you the ROI.

Get Your Free Assessment

Related Systems