tiktokinfluencerapi

TikTok Creator Vetting: API vs Manual

Manual TikTok creator vetting: 45 min/creator. API vetting: 2 min/creator with quantified engagement, bot rate, and comment quality.

8 min

Manual TikTok creator vetting takes roughly 45 minutes per creator: scroll their profile, eyeball engagement, check comment quality, look for brand safety red flags. API vetting takes under 2 minutes per creator with quantified metrics instead of gut feelings. For agencies vetting 50+ creators per campaign, the API approach turns a week of work into an afternoon.

Manual vetting: what it actually takes

The manual process for a single creator: open their TikTok profile (2 min), scroll through recent 20 posts noting views and likes (10 min), read comments on 5 posts checking for bots and spam (10 min), check follower count growth by comparing to cached data or third-party estimates (5 min), review bio and links for brand safety (3 min), cross-reference on other platforms (5 min), write up assessment notes (10 min). Total: about 45 minutes of focused work per creator, and the assessment is subjective.

API vetting: the pipeline

Python
import requests, os
from datetime import datetime, timedelta

API_KEY = os.environ["SCAVIO_API_KEY"]
BASE = "https://api.scavio.dev/api/v1/tiktok"
HEADERS = {"Authorization": f"Bearer {API_KEY}",
           "Content-Type": "application/json"}

def tiktok_api(endpoint, payload):
    resp = requests.post(f"{BASE}/{endpoint}",
                         headers=HEADERS, json=payload)
    return resp.json()["data"]

Step 1: Profile lookup

Python
def get_creator_profile(username):
    """Pull full profile data for a creator."""
    data = tiktok_api("profile", {"username": username})
    user = data["user"]
    return {
        "username": username,
        "sec_uid": user["sec_uid"],
        "followers": user["follower_count"],
        "following": user["following_count"],
        "videos": user["aweme_count"],
        "total_likes": user["total_favorited"],
        "verified": user.get("verification_type", 0) > 0,
        "bio": user.get("signature", ""),
        "bio_link": bool(user.get("bio_url", "")),
    }
# 1 credit

Step 2: Engagement calculation from recent posts

Python
def analyze_engagement(sec_uid, count=20):
    """Calculate real engagement metrics from recent posts."""
    data = tiktok_api("user/posts",
        {"sec_user_id": sec_uid, "count": count})
    posts = data["aweme_list"]

    engagements = []
    post_dates = []
    for p in posts:
        stats = p["statistics"]
        total_engagement = (stats["digg_count"] + stats["comment_count"]
                           + stats["share_count"])
        views = max(stats["play_count"], 1)
        engagements.append({
            "views": views,
            "likes": stats["digg_count"],
            "comments": stats["comment_count"],
            "shares": stats["share_count"],
            "engagement_rate": total_engagement / views,
        })
        post_dates.append(p["create_time"])

    avg_views = sum(e["views"] for e in engagements) / len(engagements)
    avg_engagement = (sum(e["engagement_rate"] for e in engagements)
                      / len(engagements))
    view_consistency = min(e["views"] for e in engagements) / max(
        max(e["views"] for e in engagements), 1)

    # Post frequency: days between oldest and newest post
    if len(post_dates) > 1:
        time_span = max(post_dates) - min(post_dates)
        days = max(time_span / 86400, 1)
        posts_per_week = (len(post_dates) / days) * 7
    else:
        posts_per_week = 0

    return {
        "avg_views": int(avg_views),
        "avg_engagement_rate": round(avg_engagement * 100, 2),
        "view_consistency": round(view_consistency, 2),
        "posts_per_week": round(posts_per_week, 1),
        "post_count_analyzed": len(posts),
    }
# 1 credit

Step 3: Follower quality check

Python
def check_follower_quality(sec_uid, sample_size=50):
    """Sample followers to detect bot networks."""
    data = tiktok_api("user/followers",
        {"sec_user_id": sec_uid, "count": sample_size})
    followers = data.get("followers", [])

    suspicious = 0
    for f in followers:
        red_flags = 0
        # No profile picture
        if not f.get("avatar_thumb", {}).get("url_list"):
            red_flags += 1
        # No videos posted
        if f.get("aweme_count", 0) == 0:
            red_flags += 1
        # Following way more than followers (bot pattern)
        following = f.get("following_count", 0)
        follower_count = max(f.get("follower_count", 0), 1)
        if following > follower_count * 10 and following > 500:
            red_flags += 1
        # Generic or empty bio
        if not f.get("signature", "").strip():
            red_flags += 1

        if red_flags >= 3:
            suspicious += 1

    bot_rate = suspicious / max(len(followers), 1)
    return {
        "sample_size": len(followers),
        "suspicious_count": suspicious,
        "estimated_bot_rate": round(bot_rate * 100, 1),
        "quality": "good" if bot_rate < 0.15 else
                   "warning" if bot_rate < 0.30 else "poor"
    }
# 1 credit

Step 4: Comment sentiment and quality

Python
def analyze_comments(sec_uid, video_count=3, comments_per=20):
    """Check comment quality on recent videos."""
    posts = tiktok_api("user/posts",
        {"sec_user_id": sec_uid, "count": video_count})
    videos = posts["aweme_list"][:video_count]

    all_comments = []
    for video in videos:
        video_id = video["aweme_id"]
        comments = tiktok_api("video/comments",
            {"video_id": video_id, "count": comments_per})
        for c in comments.get("comments", []):
            text = c.get("text", "")
            all_comments.append({
                "text": text,
                "likes": c.get("digg_count", 0),
                "length": len(text),
            })

    if not all_comments:
        return {"quality": "no data", "avg_length": 0}

    avg_length = sum(c["length"] for c in all_comments) / len(all_comments)
    # Short generic comments ("nice", "wow", single emoji) = bot signals
    generic = sum(1 for c in all_comments if c["length"] < 10)
    generic_rate = generic / len(all_comments)

    return {
        "total_comments_analyzed": len(all_comments),
        "avg_comment_length": round(avg_length, 1),
        "generic_comment_rate": round(generic_rate * 100, 1),
        "quality": "good" if generic_rate < 0.40 else
                   "warning" if generic_rate < 0.60 else "poor"
    }
# 1 credit for posts + 3 credits for comments = 4 credits

Step 5: Full vetting report

Python
def vet_creator(username):
    """Complete creator vetting pipeline."""
    print(f"Vetting @{username}...")

    profile = get_creator_profile(username)
    engagement = analyze_engagement(profile["sec_uid"])
    followers = check_follower_quality(profile["sec_uid"])
    comments = analyze_comments(profile["sec_uid"])

    # Scoring
    score = 0
    flags = []

    if engagement["avg_engagement_rate"] > 3.0:
        score += 25
    elif engagement["avg_engagement_rate"] > 1.5:
        score += 15
    else:
        flags.append("low engagement rate")

    if engagement["view_consistency"] > 0.3:
        score += 20
    else:
        flags.append("inconsistent views (possible viral-only creator)")

    if engagement["posts_per_week"] >= 2:
        score += 15
    else:
        flags.append("infrequent posting")

    if followers["quality"] == "good":
        score += 20
    elif followers["quality"] == "warning":
        score += 10
        flags.append(f"bot rate: {followers['estimated_bot_rate']}%")
    else:
        flags.append(f"high bot rate: {followers['estimated_bot_rate']}%")

    if comments["quality"] == "good":
        score += 20
    elif comments["quality"] == "warning":
        score += 10
        flags.append("generic comments suggest low-quality engagement")

    verdict = ("RECOMMENDED" if score >= 70 else
               "PROCEED WITH CAUTION" if score >= 45 else
               "NOT RECOMMENDED")

    report = {
        "username": username,
        "followers": profile["followers"],
        "engagement_rate": engagement["avg_engagement_rate"],
        "view_consistency": engagement["view_consistency"],
        "posts_per_week": engagement["posts_per_week"],
        "bot_rate": followers["estimated_bot_rate"],
        "comment_quality": comments["quality"],
        "score": score,
        "flags": flags,
        "verdict": verdict,
    }

    print(f"  Score: {score}/100 [{verdict}]")
    print(f"  Engagement: {engagement['avg_engagement_rate']}%")
    print(f"  Bot rate: {followers['estimated_bot_rate']}%")
    if flags:
        print(f"  Flags: {', '.join(flags)}")

    return report

# Full vet: 1 + 1 + 1 + 4 = 7 credits = $0.035 per creator
# Manual: 45 minutes of staff time
report = vet_creator("example_creator")

Cost comparison at campaign scale

  • Manual vetting (50 creators): 50 x 45 min = 37.5 hours of staff time. At $30/hour = $1,125.
  • API vetting (50 creators): 50 x 7 credits = 350 credits = $1.75 on Scavio. Plus 30 min to review reports.
  • Time saved: 36+ hours per campaign cycle.
  • Quality difference: API gives quantified metrics (3.2% engagement, 12% bot rate) vs gut feeling ("seems good").

The API does not replace human judgment -- you still decide which creators fit your brand. It replaces the hours of manual data gathering that precede that judgment. Seven API credits per creator gives you a quantified vetting report that would take 45 minutes to assemble by hand, and the numbers do not lie the way eyeballed estimates do.