Access economic data from FRED, World Bank, BLS, BEA, Eurostat, and more through our unified REST API
REST API for programmatic access to 6M+ economic and financial datasets from FRED, IMF, World Bank, and 16+ providers
Make your first API call in seconds
import requests
API_KEY = "dsiq_xxxxx"
headers = {"Authorization": f"Bearer {API_KEY}"}
# Search datasets
response = requests.get(
"https://datasetiq.com/api/public/search?q=gdp",
headers=headers
)
print(response.json())All API requests require an API key. Include it in the Authorization header:
Authorization: Bearer dsiq_xxxxxxxxxxxxxGet Your API KeyRate limit headers included in every response: X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset
/api/public/searchSearch datasets by keyword, provider, or topic
q (string, required) - Search querylimit (number, optional) - Results per page (max 10, default 10)offset (number, optional) - Pagination offset (default 0)import requests
response = requests.get(
'https://datasetiq.com/api/public/search',
params={'q': 'GDP', 'limit': 10}
)
results = response.json()
print(f"Found {len(results['results'])} datasets"){
"results": [
{
"id": "fred-GDPC1",
"slug": "fred-gdpc1",
"title": "Real Gross Domestic Product",
"provider": "fred",
"frequency": "Quarterly",
"units": "Billions of Chained 2017 Dollars"
}
],
"count": 1,
"limit": 10,
"offset": 0
}/api/public/series/{id}Get detailed metadata for a specific dataset
import requests
series_id = 'fred-GDPC1'
response = requests.get(
f'https://datasetiq.com/api/public/series/{series_id}'
)
metadata = response.json()
print(f"Title: {metadata['dataset']['title']}")
print(f"Observations: {metadata['dataset']['observationCount']}"){
"dataset": {
"id": "fred-GDPC1",
"slug": "fred-gdpc1",
"title": "Real Gross Domestic Product",
"description": "Billions of Chained 2017 Dollars...",
"provider": "fred",
"frequency": "Quarterly",
"units": "Billions of Chained 2017 Dollars",
"startDate": "1947-01-01",
"endDate": "2024-07-01",
"observationCount": 310,
"hasObservations": true,
"insightScoreCurrent": 89.5
}
}/api/public/series/{id}/dataNo Auth RequiredFetch historical time series observations (limited to latest 100 observations without authentication)
limit (number, optional) - Max observations (max 100 without auth, default 100)start (string, optional) - Start date (YYYY-MM-DD)end (string, optional) - End date (YYYY-MM-DD)cursor (string, optional) - Pagination cursor (ISO date)import requests
import pandas as pd
series_id = 'fred-GDPC1'
# No authentication - limited to 100 observations
response = requests.get(
f'https://datasetiq.com/api/public/series/{series_id}/data',
params={'start': '2020-01-01'}
)
data = response.json()
# Convert to pandas DataFrame
df = pd.DataFrame(data['data'])
df['date'] = pd.to_datetime(df['date'])
print(f"Fetched {len(df)} observations")/api/public/series/{id}/dataAuth Required (for full data)Fetch up to 1,000 observations with authentication
limit (number, optional) - Max observations (max 1000 with auth, default 100)start (string, optional) - Start date (YYYY-MM-DD)end (string, optional) - End date (YYYY-MM-DD)cursor (string, optional) - Pagination cursor (ISO date)import requests
import pandas as pd
series_id = 'fred-GDPC1'
headers = {'Authorization': f'Bearer {API_KEY}'}
# With auth - can fetch up to 1000 observations
response = requests.get(
f'https://datasetiq.com/api/public/series/{series_id}/data',
headers=headers,
params={'start': '2020-01-01', 'limit': 500}
)
data = response.json()
# Convert to pandas DataFrame
df = pd.DataFrame(data['data'])
df['date'] = pd.to_datetime(df['date'])
print(f"Fetched {len(df)} observations"){
"seriesId": "fred-GDPC1",
"data": [
{ "date": "2020-01-01", "value": 19032.591 },
{ "date": "2020-04-01", "value": 17291.627 },
{ "date": "2020-07-01", "value": 18589.821 }
],
"nextCursor": "2020-10-01T00:00:00.000Z",
"hasMore": true
}/api/public/series/{id}/csvAuth RequiredExport full dataset as CSV file (Auth required)
import requests
series_id = 'fred-GDPC1'
headers = {'Authorization': f'Bearer {API_KEY}'}
response = requests.get(
f'https://datasetiq.com/api/public/series/{series_id}/csv',
headers=headers
)
# Save to file
with open(f'{series_id}.csv', 'wb') as f:
f.write(response.content)
print(f"Downloaded {series_id}.csv")date,value
1947-01-01,2033.061
1947-04-01,2027.639
1947-07-01,2023.452
.../api/public/series/{id}/insightAuth RequiredGenerate AI-powered insights (basic or advanced) - Auth required, quota enforced
import requests
series_id = 'fred-GDPC1'
headers = {
'Authorization': f'Bearer {API_KEY}',
'Content-Type': 'application/json'
}
payload = {'type': 'basic'} # or 'advanced'
response = requests.post(
f'https://datasetiq.com/api/public/series/{series_id}/insight',
headers=headers,
json=payload
)
result = response.json()
if result['status'] == 'queued':
print(f"Insight queued. Job ID: {result['jobId']}")
elif result['status'] == 'cached':
print(f"Cached insight: {result['insight']['content']}")// 202 Accepted (queued)
{
"status": "queued",
"jobId": "basic_fred-GDPC1_v3.2_abc123"
}
// 200 OK (cached)
{
"status": "cached",
"insight": {
"content": "This dataset tracks Real GDP...",
"generatedAt": "2024-01-15T10:30:00Z"
}
}/api/public/jobs/{jobId}Auth RequiredPoll job status for async operations (insights) - Auth required
import requests
import time
job_id = 'basic_fred-GDPC1_v3.2_abc123'
headers = {'Authorization': f'Bearer {API_KEY}'}
# Poll until complete
while True:
response = requests.get(
f'https://datasetiq.com/api/public/jobs/{job_id}',
headers=headers
)
job = response.json()
if job['status'] == 'completed':
print(f"Insight: {job['content']}")
break
elif job['status'] == 'failed':
print(f"Error: {job['error']['message']}")
break
time.sleep(2) # Wait 2 seconds before polling again{
"jobId": "basic_fred-GDPC1_v3.2_abc123",
"status": "completed",
"type": "basic",
"content": "This dataset tracks Real Gross Domestic Product...",
"generatedAt": "2024-01-15T10:30:15Z"
}