Reddit discussions provide unfiltered user sentiment about products, brands, and services. This tutorial builds a sentiment tracker that searches Reddit for brand mentions via Scavio's Reddit endpoint, extracts discussion themes, and tracks sentiment over time. Unlike Reddit's native API (which requires OAuth and has strict rate limits), Scavio's Reddit search returns structured results with a simple API call.
Prerequisites
- Python 3.8+ installed
- requests library installed
- A Scavio API key from scavio.dev
- Brand or product names to track
Walkthrough
Step 1: Search Reddit for brand mentions
Query Scavio's Reddit endpoint for mentions of your brand or product.
import requests, os
H = {'x-api-key': os.environ['SCAVIO_API_KEY']}
def search_reddit(query: str) -> list:
resp = requests.post('https://api.scavio.dev/api/v1/search', headers=H,
json={'platform': 'reddit', 'query': query}, timeout=10)
return resp.json().get('organic', [])Step 2: Extract sentiment signals
Analyze post titles and snippets for positive, negative, and neutral signals.
POSITIVE = ['love', 'great', 'best', 'amazing', 'recommend', 'switched to', 'works well']
NEGATIVE = ['hate', 'worst', 'terrible', 'broken', 'avoid', 'switched from', 'stopped using']
def classify_sentiment(text: str) -> str:
text_lower = text.lower()
pos = sum(1 for w in POSITIVE if w in text_lower)
neg = sum(1 for w in NEGATIVE if w in text_lower)
if pos > neg: return 'positive'
if neg > pos: return 'negative'
return 'neutral'
def analyze_mentions(results: list) -> dict:
sentiments = {'positive': [], 'negative': [], 'neutral': []}
for r in results:
text = f"{r.get('title', '')} {r.get('snippet', '')}"
sentiment = classify_sentiment(text)
sentiments[sentiment].append({'title': r.get('title', ''), 'url': r.get('link', '')})
return sentimentsStep 3: Generate a sentiment summary
Calculate sentiment ratios and highlight notable discussions.
def sentiment_summary(sentiments: dict) -> dict:
total = sum(len(v) for v in sentiments.values())
return {
'total_mentions': total,
'positive_pct': round(len(sentiments['positive']) / max(total, 1) * 100, 1),
'negative_pct': round(len(sentiments['negative']) / max(total, 1) * 100, 1),
'neutral_pct': round(len(sentiments['neutral']) / max(total, 1) * 100, 1),
'top_positive': sentiments['positive'][:3],
'top_negative': sentiments['negative'][:3],
}Step 4: Run and save daily reports
Execute the tracker daily and store results for trend analysis.
import json, datetime
def daily_sentiment(brand: str):
results = search_reddit(brand)
sentiments = analyze_mentions(results)
summary = sentiment_summary(sentiments)
date = datetime.date.today().isoformat()
report = {'date': date, 'brand': brand, **summary}
with open(f'sentiment_{brand}_{date}.json', 'w') as f:
json.dump(report, f, indent=2)
print(f"{brand}: {report['positive_pct']}% positive, {report['negative_pct']}% negative ({report['total_mentions']} mentions)")
return report
daily_sentiment('YourBrand')Python Example
import requests, os
H = {'x-api-key': os.environ['SCAVIO_API_KEY']}
def reddit_sentiment(brand):
data = requests.post('https://api.scavio.dev/api/v1/search', headers=H,
json={'platform': 'reddit', 'query': brand}, timeout=10).json()
results = data.get('organic', [])
print(f'{brand}: {len(results)} Reddit mentions found')
return resultsJavaScript Example
async function redditSentiment(brand) {
const data = await fetch('https://api.scavio.dev/api/v1/search', {
method: 'POST', headers: {'x-api-key': process.env.SCAVIO_API_KEY, 'Content-Type': 'application/json'},
body: JSON.stringify({platform: 'reddit', query: brand})
}).then(r => r.json());
return data.organic || [];
}Expected Output
Daily Reddit sentiment reports with positive/negative/neutral breakdowns and notable discussion highlights.