Fix agent failed-to-fetch errors by systematically diagnosing the failure mode and applying the correct fix. The most common causes are timeout errors from slow connections, CORS errors from client-side calls that should be server-side, invalid or expired API keys, rate limit responses that lack retry logic, and malformed request bodies. Each failure mode has a specific diagnostic pattern and fix. This tutorial covers all five and provides drop-in code for each solution.
Prerequisites
- Python 3.8+ installed
- requests library installed
- A Scavio API key from scavio.dev
- An agent with failing search calls
Walkthrough
Step 1: Diagnose the failure mode
Run a diagnostic that tests each common failure mode to identify the root cause.
import os, requests, time, json
API_KEY = os.environ['SCAVIO_API_KEY']
API_URL = 'https://api.scavio.dev/api/v1/search'
def diagnose(api_key: str = None) -> dict:
key = api_key or API_KEY
issues = []
# Test 1: Basic connectivity
try:
resp = requests.post(API_URL,
headers={'x-api-key': key},
json={'platform': 'google', 'query': 'test'}, timeout=5)
if resp.status_code == 401:
issues.append('INVALID_API_KEY: Check your API key is correct and active')
elif resp.status_code == 429:
issues.append('RATE_LIMITED: You are sending too many requests')
elif resp.status_code >= 500:
issues.append(f'SERVER_ERROR: Status {resp.status_code}')
elif resp.status_code == 200:
data = resp.json()
if not data.get('organic_results'):
issues.append('EMPTY_RESULTS: Query returned no results (not an error)')
except requests.Timeout:
issues.append('TIMEOUT: Request took >5s, increase timeout or check connection')
except requests.ConnectionError:
issues.append('CONNECTION_ERROR: Cannot reach API server')
except json.JSONDecodeError:
issues.append('MALFORMED_RESPONSE: Response is not valid JSON')
if not issues:
issues.append('NO_ISSUES: API is working correctly')
return {'issues': issues}
result = diagnose()
for issue in result['issues']:
print(f' {issue}')Step 2: Fix timeout errors
Add proper timeout handling with exponential backoff retry.
def search_with_timeout(query: str, max_retries: int = 3) -> dict:
"""Fix timeout errors with progressive timeout and retry."""
for attempt in range(max_retries):
timeout = 10 + (attempt * 5) # 10s, 15s, 20s
try:
resp = requests.post(API_URL,
headers={'x-api-key': API_KEY},
json={'platform': 'google', 'query': query},
timeout=timeout)
resp.raise_for_status()
return resp.json()
except requests.Timeout:
print(f'Timeout on attempt {attempt + 1} ({timeout}s), retrying...')
time.sleep(2 ** attempt)
except requests.RequestException as e:
print(f'Error on attempt {attempt + 1}: {e}')
time.sleep(2 ** attempt)
return {'organic_results': [], 'error': 'All retries exhausted'}
result = search_with_timeout('test query')
print(f"Results: {len(result.get('organic_results', []))}")Step 3: Fix rate limit errors
Implement rate limit detection and automatic backoff.
class RateLimitedClient:
"""Client with automatic rate limit handling."""
def __init__(self, api_key: str):
self.api_key = api_key
self.min_delay = 0.2 # Minimum delay between requests
self.last_request = 0
def search(self, query: str, platform: str = 'google') -> dict:
# Enforce minimum delay
elapsed = time.time() - self.last_request
if elapsed < self.min_delay:
time.sleep(self.min_delay - elapsed)
for attempt in range(3):
self.last_request = time.time()
resp = requests.post(API_URL,
headers={'x-api-key': self.api_key},
json={'platform': platform, 'query': query}, timeout=15)
if resp.status_code == 429:
wait = 2 ** (attempt + 1)
print(f'Rate limited, waiting {wait}s...')
time.sleep(wait)
continue
resp.raise_for_status()
return resp.json()
return {'organic_results': [], 'error': 'Rate limit persists'}
client = RateLimitedClient(API_KEY)
result = client.search('test query')
print(f"Results: {len(result.get('organic_results', []))}")Step 4: Fix malformed request errors
Validate request payload before sending to catch common formatting issues.
VALID_PLATFORMS = ['google', 'amazon', 'youtube', 'walmart', 'reddit']
def validated_search(query: str, platform: str = 'google') -> dict:
"""Search with input validation to prevent malformed requests."""
# Validate platform
if platform not in VALID_PLATFORMS:
print(f'Invalid platform "{platform}". Valid: {VALID_PLATFORMS}')
platform = 'google'
# Validate query
if not query or not query.strip():
return {'organic_results': [], 'error': 'Empty query'}
query = query.strip()[:500] # Trim and limit length
# Validate API key
if not API_KEY or len(API_KEY) < 10:
return {'organic_results': [], 'error': 'Invalid API key format'}
payload = {'platform': platform, 'query': query}
try:
resp = requests.post(API_URL,
headers={'x-api-key': API_KEY, 'Content-Type': 'application/json'},
json=payload, timeout=15)
resp.raise_for_status()
return resp.json()
except Exception as e:
return {'organic_results': [], 'error': str(e)}
result = validated_search('test query')
print(f"Results: {len(result.get('organic_results', []))}")Step 5: Build a resilient search wrapper
Combine all fixes into a single resilient search function for your agent.
def resilient_search(query: str, platform: str = 'google') -> dict:
"""Production-grade search with all error handling built in."""
# Input validation
if platform not in VALID_PLATFORMS:
platform = 'google'
if not query or not query.strip():
return {'organic_results': [], 'error': 'empty_query'}
query = query.strip()[:500]
# Retry with backoff
for attempt in range(3):
timeout = 10 + (attempt * 5)
try:
resp = requests.post(API_URL,
headers={'x-api-key': API_KEY, 'Content-Type': 'application/json'},
json={'platform': platform, 'query': query},
timeout=timeout)
if resp.status_code == 429:
time.sleep(2 ** (attempt + 1))
continue
if resp.status_code == 401:
return {'organic_results': [], 'error': 'invalid_api_key'}
resp.raise_for_status()
return resp.json()
except requests.Timeout:
time.sleep(2 ** attempt)
except requests.ConnectionError:
time.sleep(2 ** attempt)
except Exception as e:
return {'organic_results': [], 'error': str(e)}
return {'organic_results': [], 'error': 'all_retries_exhausted'}
# Test the resilient wrapper
result = resilient_search('test query')
print(f"Results: {len(result.get('organic_results', []))}")
print(f"Error: {result.get('error', 'none')}")Python Example
import requests, os, time
H = {'x-api-key': os.environ['SCAVIO_API_KEY']}
def search_safe(query, retries=2):
for i in range(retries + 1):
try:
r = requests.post('https://api.scavio.dev/api/v1/search', headers=H,
json={'platform': 'google', 'query': query}, timeout=10 + i*5)
if r.status_code == 429: time.sleep(2**i); continue
r.raise_for_status()
return r.json().get('organic_results', [])
except: time.sleep(2**i)
return []
print(len(search_safe('test')))JavaScript Example
const H = {'x-api-key': process.env.SCAVIO_API_KEY, 'Content-Type': 'application/json'};
async function searchSafe(query, retries = 2) {
for (let i = 0; i <= retries; i++) {
try {
const r = await fetch('https://api.scavio.dev/api/v1/search', {
method: 'POST', headers: H,
body: JSON.stringify({platform: 'google', query})
});
if (r.status === 429) { await new Promise(r => setTimeout(r, 1000 * 2**i)); continue; }
return (await r.json()).organic_results || [];
} catch(e) { await new Promise(r => setTimeout(r, 1000 * 2**i)); }
}
return [];
}
searchSafe('test').then(r => console.log(r.length));Expected Output
A resilient search wrapper that handles timeouts, rate limits, invalid keys, and malformed requests with automatic retry and clear error reporting.