curl --request GET \
--url https://api.helicone.ai/v1/request/{requestId}{
"data": {
"request_id": "<string>",
"request_created_at": "<string>",
"request_body": {},
"request_path": "<string>",
"request_user_id": {},
"request_properties": {},
"request_model": {},
"response_id": {},
"response_created_at": {},
"response_body": {},
"response_status": 123,
"response_model": {},
"model_override": {},
"helicone_user": {},
"provider": "<string>",
"delay_ms": {},
"time_to_first_token": {},
"total_tokens": {},
"prompt_tokens": {},
"completion_tokens": {},
"reasoning_tokens": {},
"prompt_cache_write_tokens": {},
"prompt_cache_read_tokens": {},
"prompt_audio_tokens": {},
"completion_audio_tokens": {},
"cost": {},
"costUSD": {},
"prompt_id": {},
"prompt_version": {},
"feedback_created_at": {},
"feedback_id": {},
"feedback_rating": {},
"signed_body_url": {},
"llmSchema": {},
"country_code": {},
"asset_ids": {},
"asset_urls": {},
"scores": {},
"properties": {},
"assets": [
"<string>"
],
"target_url": "<string>",
"model": "<string>",
"cache_reference_id": {},
"cache_enabled": true,
"updated_at": "<string>",
"request_referrer": {},
"storage_location": "<string>"
},
"error": {}
}Retrieve a specific request by its ID
curl --request GET \
--url https://api.helicone.ai/v1/request/{requestId}{
"data": {
"request_id": "<string>",
"request_created_at": "<string>",
"request_body": {},
"request_path": "<string>",
"request_user_id": {},
"request_properties": {},
"request_model": {},
"response_id": {},
"response_created_at": {},
"response_body": {},
"response_status": 123,
"response_model": {},
"model_override": {},
"helicone_user": {},
"provider": "<string>",
"delay_ms": {},
"time_to_first_token": {},
"total_tokens": {},
"prompt_tokens": {},
"completion_tokens": {},
"reasoning_tokens": {},
"prompt_cache_write_tokens": {},
"prompt_cache_read_tokens": {},
"prompt_audio_tokens": {},
"completion_audio_tokens": {},
"cost": {},
"costUSD": {},
"prompt_id": {},
"prompt_version": {},
"feedback_created_at": {},
"feedback_id": {},
"feedback_rating": {},
"signed_body_url": {},
"llmSchema": {},
"country_code": {},
"asset_ids": {},
"asset_urls": {},
"scores": {},
"properties": {},
"assets": [
"<string>"
],
"target_url": "<string>",
"model": "<string>",
"cache_reference_id": {},
"cache_enabled": true,
"updated_at": "<string>",
"request_referrer": {},
"storage_location": "<string>"
},
"error": {}
}Retrieve detailed information about a specific request using its unique identifier. This endpoint returns the full request and response data, including metadata, tokens, costs, and custom properties.Documentation Index
Fetch the complete documentation index at: https://mintlify.com/helicone/helicone/llms.txt
Use this file to discover all available pages before exploring further.
Helicone-Id response header when making requests through Helicone, or in the request_id field when querying requests.Example: req_abc123def456true - Returns full request/response bodies (may be large)false - Returns metadata only, with signed URLs for accessing bodiestrue may significantly increase response size for requests with large payloads.Show HeliconeRequest object
"2024-01-15T14:30:00.000Z"includeBody=true."/v1/chat/completions"{
"Environment": "production",
"Conversation": "support_123",
"App": "mobile"
}
"gpt-4"includeBody=true.200, 400, 500"OPENAI", "ANTHROPIC", "AZURE", "GOOGLE", "TOGETHER_AI"includeBody=false).{
"accuracy": 95,
"relevance": 87,
"helpfulness": 92
}
curl --request GET \
--url https://api.helicone.ai/v1/request/req_abc123def456 \
--header 'Authorization: Bearer <HELICONE_API_KEY>'
const requestId = 'req_abc123def456';
const response = await fetch(
`https://api.helicone.ai/v1/request/${requestId}`,
{
method: 'GET',
headers: {
'Authorization': `Bearer ${process.env.HELICONE_API_KEY}`
}
}
);
const result = await response.json();
console.log(result.data);
import os
import requests
request_id = "req_abc123def456"
response = requests.get(
f"https://api.helicone.ai/v1/request/{request_id}",
headers={
"Authorization": f"Bearer {os.environ['HELICONE_API_KEY']}"
}
)
result = response.json()
print(result["data"])
curl --request GET \
--url 'https://api.helicone.ai/v1/request/req_abc123def456?includeBody=true' \
--header 'Authorization: Bearer <HELICONE_API_KEY>'
const requestId = 'req_abc123def456';
const response = await fetch(
`https://api.helicone.ai/v1/request/${requestId}?includeBody=true`,
{
method: 'GET',
headers: {
'Authorization': `Bearer ${process.env.HELICONE_API_KEY}`
}
}
);
const result = await response.json();
console.log(result.data.request_body);
console.log(result.data.response_body);
import os
import requests
request_id = "req_abc123def456"
response = requests.get(
f"https://api.helicone.ai/v1/request/{request_id}",
params={"includeBody": True},
headers={
"Authorization": f"Bearer {os.environ['HELICONE_API_KEY']}"
}
)
result = response.json()
print(result["data"]["request_body"])
print(result["data"]["response_body"])
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'https://gateway.helicone.ai/v1',
defaultHeaders: {
'Helicone-Auth': `Bearer ${process.env.HELICONE_API_KEY}`
}
});
// Make a request and get the response with headers
const { data, response } = await client.chat.completions
.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Hello!' }]
})
.withResponse();
// Extract the request ID
const requestId = response.headers.get('helicone-id');
console.log('Request ID:', requestId);
// Now you can retrieve this request later
const requestDetails = await fetch(
`https://api.helicone.ai/v1/request/${requestId}`,
{
headers: {
'Authorization': `Bearer ${process.env.HELICONE_API_KEY}`
}
}
);
from openai import OpenAI
import os
client = OpenAI(
base_url="https://gateway.helicone.ai/v1",
default_headers={
"Helicone-Auth": f"Bearer {os.environ['HELICONE_API_KEY']}"
}
)
# Make a request
response = client.with_raw_response.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)
# Extract the request ID
request_id = response.headers.get("helicone-id")
print(f"Request ID: {request_id}")
# Now you can retrieve this request later
import requests
request_details = requests.get(
f"https://api.helicone.ai/v1/request/{request_id}",
headers={
"Authorization": f"Bearer {os.environ['HELICONE_API_KEY']}"
}
)
const debugRequest = async (requestId: string) => {
const response = await fetch(
`https://api.helicone.ai/v1/request/${requestId}?includeBody=true`,
{
headers: {
'Authorization': `Bearer ${process.env.HELICONE_API_KEY}`
}
}
);
const result = await response.json();
const request = result.data;
if (request.response_status >= 400) {
console.log('Failed request details:');
console.log('Status:', request.response_status);
console.log('Model:', request.model);
console.log('Error:', request.response_body?.error);
console.log('Request body:', request.request_body);
}
};
const analyzeRequest = async (requestId: string) => {
const response = await fetch(
`https://api.helicone.ai/v1/request/${requestId}`,
{
headers: {
'Authorization': `Bearer ${process.env.HELICONE_API_KEY}`
}
}
);
const result = await response.json();
const request = result.data;
console.log('Cost Analysis:');
console.log('Model:', request.model);
console.log('Total cost:', `$${request.cost}`);
console.log('Prompt tokens:', request.prompt_tokens);
console.log('Completion tokens:', request.completion_tokens);
console.log('Total tokens:', request.total_tokens);
console.log('Latency:', `${request.delay_ms}ms`);
};