tiktokbrand-safetyaudit
TikTok Brand Safety Audit via API
Run brand safety audits on TikTok creators for $0.01-0.05 per creator via API before signing influencer contracts.
8 min
A TikTok brand safety audit via API checks what content appears alongside your brand mentions, flags creators with controversial histories, and monitors comment sentiment on branded content. Running this before signing an influencer contract costs $0.05 per creator versus finding out after a $10K campaign that your brand ambassador posts problematic content.
What brand safety means on TikTok
- Content adjacency: what topics does the creator also post about
- Comment quality: are comments toxic, spammy, or bot-driven
- Audience authenticity: real followers vs purchased engagement
- Historical risk: has the creator been involved in controversies
- Brand mention context: is your brand mentioned positively or negatively
Automated brand safety check
Python
import os, requests, json
TOKEN = os.environ["SCAVIO_API_KEY"]
AUTH = {"Authorization": f"Bearer {TOKEN}"}
BASE = "https://api.scavio.dev/api/v1/tiktok"
RISK_KEYWORDS = [
"scam", "fake", "controversy", "cancelled", "exposed",
"problematic", "lawsuit", "banned", "offensive", "hate"
]
def brand_safety_audit(username: str) -> dict:
"""Run a brand safety audit on a TikTok creator."""
audit = {"username": username, "risks": [], "score": 100}
# Check 1: Recent video content analysis
videos_resp = requests.post(f"{BASE}/user/videos", headers=AUTH,
json={"username": username, "count": 20})
videos = videos_resp.json().get("videos", [])
risky_content = []
for v in videos:
desc = (v.get("desc", "") or "").lower()
for kw in RISK_KEYWORDS:
if kw in desc:
risky_content.append({
"video": v.get("id"),
"keyword": kw,
"desc": v.get("desc", "")[:80],
})
if risky_content:
audit["risks"].append({
"type": "content_risk",
"details": risky_content,
"severity": "high" if len(risky_content) > 3 else "medium",
})
audit["score"] -= len(risky_content) * 10
# Check 2: Engagement authenticity
if videos:
plays = [v.get("stats", {}).get("playCount", 0) for v in videos]
likes = [v.get("stats", {}).get("diggCount", 0) for v in videos]
avg_plays = sum(plays) / len(plays)
avg_likes = sum(likes) / len(likes)
# Suspicious: very high likes-to-plays ratio (possible bought engagement)
if avg_plays > 0 and avg_likes / avg_plays > 0.3:
audit["risks"].append({
"type": "engagement_anomaly",
"details": f"Like/play ratio {avg_likes/avg_plays:.2%} unusually high",
"severity": "medium",
})
audit["score"] -= 15
# Check 3: Web search for creator controversies
search_resp = requests.post(
"https://api.scavio.dev/api/v1/search",
headers={"x-api-key": TOKEN},
json={"query": f"@{username} tiktok controversy OR cancelled OR scam",
"num_results": 5},
)
web_results = search_resp.json().get("organic_results", [])
controversy_hits = [r for r in web_results
if any(kw in r.get("snippet", "").lower()
for kw in RISK_KEYWORDS)]
if controversy_hits:
audit["risks"].append({
"type": "web_reputation",
"details": [r["snippet"][:100] for r in controversy_hits],
"severity": "high",
})
audit["score"] -= len(controversy_hits) * 20
audit["score"] = max(audit["score"], 0)
audit["recommendation"] = (
"approve" if audit["score"] >= 70
else "review" if audit["score"] >= 40
else "reject"
)
return auditRunning the audit
Python
# Audit costs: 1 credit (videos) + 1 credit (web search) = $0.01
# For deeper audit, add profile lookup: +1 credit = $0.015 total
candidates = ["creator_a", "creator_b", "creator_c"]
results = []
for username in candidates:
audit = brand_safety_audit(username)
results.append(audit)
print(f"@{username}: Score {audit['score']}/100 -> {audit['recommendation']}")
for risk in audit["risks"]:
print(f" [{risk['severity']}] {risk['type']}")
# Output:
# @creator_a: Score 85/100 -> approve
# @creator_b: Score 35/100 -> reject
# [high] content_risk
# [high] web_reputation
# @creator_c: Score 70/100 -> approveMonitoring brand mentions
Python
def monitor_brand_mentions(brand_name: str) -> dict:
"""Check TikTok for brand mentions and sentiment."""
resp = requests.post(f"{BASE}/search", headers=AUTH,
json={"query": brand_name, "count": 20})
videos = resp.json().get("videos", [])
positive_signals = ["love", "best", "recommend", "favorite", "amazing"]
negative_signals = ["worst", "terrible", "scam", "avoid", "waste"]
mentions = {"positive": 0, "negative": 0, "neutral": 0, "total": len(videos)}
for v in videos:
desc = (v.get("desc", "") or "").lower()
if any(s in desc for s in positive_signals):
mentions["positive"] += 1
elif any(s in desc for s in negative_signals):
mentions["negative"] += 1
else:
mentions["neutral"] += 1
mentions["sentiment_score"] = (
(mentions["positive"] - mentions["negative"])
/ max(mentions["total"], 1) * 100
)
return mentions
brand = monitor_brand_mentions("Your Brand Name")
print(f"Sentiment: {brand['sentiment_score']:.0f} "
f"({brand['positive']} pos / {brand['negative']} neg)")Cost of proactive brand safety
- Pre-campaign creator audit (10 candidates): 20 credits = $0.10
- Weekly brand mention monitoring: 4 credits/week = $0.08/month
- Monthly full audit (50 creators): 100 credits = $0.50
- Compare: Brand24 TikTok monitoring at $119/month
- Compare: manual research at 30 min per creator
Key takeaway
Brand safety on TikTok is a $0.01-0.05 per creator API call, not a $119/month monitoring subscription. Automate the check before every influencer contract. The API catches the risks that a quick scroll through someone's profile page misses.