Discover model capabilities programmatically using the /v1/models API and features field.
Every model at TheRouter includes an architecture.features array that describes its capabilities. This allows you to programmatically select models based on your requirements.
| Feature | Description | Example Models |
|---|---|---|
vision | Accepts image inputs (URLs or base64) | Claude Opus 4.6, GPT-4.1, Gemini 3 Flash, Grok 4, Llama 4 Maverick |
audio_input | Accepts audio inputs | gpt-audio-1.5, gpt-realtime-1.5 |
audio_output | Generates audio responses | gpt-audio-1.5, gpt-realtime-1.5, tts-1-hd |
video_input | Accepts video inputs (detection only) | Gemini Pro, Gemini Flash |
pdf | Accepts PDF document inputs | Claude Opus 4.6, Gemini 3 Flash |
tools | Supports function/tool calling | Claude Sonnet 4.6, GPT-5.2, Gemini 2.5 Pro, DeepSeek V3.2, Qwen3 235B |
reasoning | Has reasoning/extended thinking capabilities | o3, o4-mini, Claude Sonnet 3.7, Gemini 2.5 Pro, DeepSeek R1 |
streaming | Supports streamed responses | All chat models |
json_mode | Supports JSON response format | GPT models, Claude models, Gemini models, DeepSeek models |
thinking | Supports extended thinking mode | Claude Sonnet 3.7, Gemini 2.5 Pro/Flash |
long_context | Context window >= 200K tokens | Claude Opus 4.6 (1M), GPT-4.1 (1M), Gemini 3 Flash (1M), Llama 4 Scout (10M) |
prompt_caching | Supports prompt caching for cost savings | Claude models, GPT models, Gemini models, xAI models |
curl https://api.therouter.ai/v1/models \
-H "Authorization: Bearer ${THEROUTER_API_KEY}"from openai import OpenAI
client = OpenAI(
api_key="your_therouter_api_key",
base_url="https://api.therouter.ai/v1"
)
# Get all models
models = client.models.list()
# Filter for vision-capable models
vision_models = [
model for model in models.data
if "vision" in model.architecture.get("features", [])
]
print("Vision-capable models:")
for model in vision_models:
print(f" - {model.id}")
print(f" Features: {', '.join(model.architecture.get('features', []))}")
print(f" Context: {model.context_length:,} tokens")
print()import OpenAI from "openai";
const client = new OpenAI({
apiKey: process.env.THEROUTER_API_KEY,
baseURL: "https://api.therouter.ai/v1",
});
async function findModelsByCapabilities(
requiredFeatures: string[]
): Promise<string[]> {
const models = await client.models.list();
return models.data
.filter((model) => {
const features = model.architecture?.features || [];
return requiredFeatures.every((required) =>
features.includes(required)
);
})
.map((model) => model.id);
}
// Find models with vision, tools, and reasoning
const matches = await findModelsByCapabilities([
"vision",
"tools",
"reasoning",
]);
console.log("Models with vision + tools + reasoning:");
matches.forEach((id) => console.log(` - ${id}`));
// Example output:
// Models with vision + tools + reasoning:
// - anthropic/claude-opus-4.6
// - anthropic/claude-sonnet-4.6
// - openai/gpt-5.4
// - openai/gpt-5.2
// - openai/o3from openai import OpenAI
client = OpenAI(
api_key="your_therouter_api_key",
base_url="https://api.therouter.ai/v1"
)
def find_cheapest_with_feature(feature: str):
models = client.models.list()
# Filter for models with the feature
matching = [
model for model in models.data
if feature in model.architecture.get("features", [])
and hasattr(model, "pricing")
]
# Sort by input token price
cheapest = min(matching, key=lambda m: m.pricing.input)
return {
"id": cheapest.id,
"input_price": cheapest.pricing.input,
"output_price": cheapest.pricing.output,
"features": cheapest.architecture.get("features", []),
}
# Find cheapest vision model
result = find_cheapest_with_feature("vision")
print("Cheapest vision model:", result["id"])
print("Input: $" + str(result["input_price"]) + "/MTok")
print("Output: $" + str(result["output_price"]) + "/MTok")
# Example output:
# Cheapest vision model: google/gemini-2.5-flash-lite
# Input: $0.10/MTok
# Output: $0.40/MTokThe /v1/models endpoint returns models with this structure:
{
"object": "list",
"data": [
{
"id": "anthropic/claude-opus-4.6",
"object": "model",
"created": 1735603200,
"owned_by": "anthropic",
"architecture": {
"modality": "text->text",
"features": [
"vision",
"reasoning",
"tools",
"long_context",
"prompt_caching",
"streaming",
"json_mode"
]
},
"context_length": 1000000,
"max_completion_tokens": 128000,
"pricing": {
"input": 5,
"output": 25,
"cache_read": 0.5
}
},
{
"id": "openai/gpt-audio-1.5",
"object": "model",
"created": 1735603200,
"owned_by": "openai",
"architecture": {
"modality": "text+audio->text+audio",
"features": [
"vision",
"audio_input",
"audio_output",
"tools",
"prompt_caching",
"streaming",
"json_mode"
]
},
"context_length": 128000,
"max_completion_tokens": 16384,
"pricing": {
"input": 4,
"output": 16,
"cache_read": 0.4
}
}
]
}Best practices for selecting models by capability:
features.includes("vision")"reasoning" or "thinking""tools" feature"audio_input" and "audio_output""long_context" and check context_lengthpricing.input within feature setUse capabilities to build intelligent fallback chains:
def get_fallback_chain(required_features: list[str]) -> list[str]:
"""
Build a fallback chain of models with required features,
ordered by cost (cheapest first)
"""
models = client.models.list()
matching = [
model for model in models.data
if all(feat in model.architecture.get("features", [])
for feat in required_features)
]
# Sort by input price (cheapest first for fallbacks)
sorted_models = sorted(matching, key=lambda m: m.pricing.input)
return [m.id for m in sorted_models[:3]] # Top 3
# Example: Get vision + tools fallback chain
chain = get_fallback_chain(["vision", "tools"])
print("Fallback chain:", " -> ".join(chain))
# Example output:
# Fallback chain: google/gemini-3.1-flash-lite-preview -> google/gemini-3-flash-preview -> anthropic/claude-haiku-4.5