Marketing claims from SERP API providers are unreliable. The only way to know which provider fits your use case is to benchmark them yourself with your actual queries. This tutorial builds a reproducible benchmark that tests latency, result count, SERP feature coverage, and uptime across multiple providers.
Prerequisites
- Python 3.8+
- API keys for providers you want to test (free tiers work)
- A Scavio API key
Walkthrough
Step 1: Set up the benchmark framework
Create a standardized test harness that measures each provider consistently.
import requests, time, os, json
from datetime import datetime
class SERPBenchmark:
def __init__(self):
self.results = []
def test_provider(self, name: str, search_fn, queries: list) -> dict:
metrics = {'name': name, 'queries': len(queries), 'successes': 0, 'failures': 0,
'latencies': [], 'result_counts': []}
for query in queries:
start = time.time()
try:
results = search_fn(query)
latency = time.time() - start
metrics['latencies'].append(latency)
metrics['result_counts'].append(len(results))
metrics['successes'] += 1
except Exception as e:
metrics['failures'] += 1
metrics['latencies'].append(None)
metrics['avg_latency'] = sum(l for l in metrics['latencies'] if l) / max(metrics['successes'], 1)
metrics['avg_results'] = sum(metrics['result_counts']) / max(len(metrics['result_counts']), 1)
metrics['success_rate'] = metrics['successes'] / len(queries)
return metricsStep 2: Define provider search functions
Create a search function for each provider you want to test.
# Scavio
def scavio_search(query: str) -> list:
resp = requests.post('https://api.scavio.dev/api/v1/search',
headers={'x-api-key': os.environ['SCAVIO_API_KEY'], 'Content-Type': 'application/json'},
json={'platform': 'google', 'query': query}, timeout=15)
return resp.json().get('organic', [])
# Serper
def serper_search(query: str) -> list:
resp = requests.post('https://google.serper.dev/search',
headers={'X-API-KEY': os.environ.get('SERPER_API_KEY',''), 'Content-Type': 'application/json'},
json={'q': query}, timeout=15)
return resp.json().get('organic', [])
# Brave
def brave_search(query: str) -> list:
resp = requests.get('https://api.search.brave.com/res/v1/web/search',
headers={'X-Subscription-Token': os.environ.get('BRAVE_API_KEY','')},
params={'q': query}, timeout=15)
return resp.json().get('web', {}).get('results', [])Step 3: Run the benchmark
Execute tests across all providers with the same queries.
TEST_QUERIES = [
'best python web framework 2026',
'react vs vue performance',
'kubernetes deployment tutorial',
'machine learning interview questions',
'postgres vs mysql for startups',
'api rate limiting best practices',
'docker compose production setup',
'typescript generics tutorial',
'aws lambda cold start optimization',
'graphql vs rest api comparison',
]
bench = SERPBenchmark()
providers = [
('Scavio', scavio_search),
('Serper', serper_search),
('Brave', brave_search),
]
results = []
for name, fn in providers:
if os.environ.get(f'{name.upper()}_API_KEY') or name == 'Scavio':
result = bench.test_provider(name, fn, TEST_QUERIES)
results.append(result)
print(f"{name}: {result['avg_latency']:.2f}s avg, {result['success_rate']:.0%} success, {result['avg_results']:.0f} avg results")Step 4: Generate comparison report
Format benchmark results into a comparison table.
def benchmark_report(results: list) -> str:
report = f"SERP API Benchmark - {datetime.now().isoformat()}\n\n"
report += f"{'Provider':<12} {'Latency':<10} {'Success':<10} {'Results':<10} {'Cost/1K':<10}\n"
report += '-' * 52 + '\n'
costs = {'Scavio': '$5', 'Serper': '$0.10-1', 'Brave': '$5', 'Tavily': '$3-8', 'SerpAPI': '$15'}
for r in sorted(results, key=lambda x: x['avg_latency']):
report += f"{r['name']:<12} {r['avg_latency']:.2f}s{'':<5} {r['success_rate']:.0%}{'':<6} {r['avg_results']:.0f}{'':<7} {costs.get(r['name'], '?'):<10}\n"
report += f"\nQueries tested: {results[0]['queries']}\n"
report += f"Winner (latency): {min(results, key=lambda x: x['avg_latency'])['name']}\n"
report += f"Winner (reliability): {max(results, key=lambda x: x['success_rate'])['name']}\n"
return report
print(benchmark_report(results))Python Example
import requests, time, os
def benchmark_search(provider_fn, queries):
results = []
for q in queries:
start = time.time()
try:
r = provider_fn(q)
results.append({'query': q, 'latency': time.time()-start, 'count': len(r), 'success': True})
except: results.append({'query': q, 'success': False})
return resultsJavaScript Example
async function benchmarkSearch(providerFn, queries) {
const results = [];
for (const q of queries) {
const start = Date.now();
try {
const r = await providerFn(q);
results.push({query: q, latency: (Date.now()-start)/1000, count: r.length, success: true});
} catch { results.push({query: q, success: false}); }
}
return results;
}Expected Output
A reproducible SERP API benchmark script that compares providers on latency, reliability, result quality, and cost per query.