mirror of
https://github.com/mii443/openai-api-rs.git
synced 2025-08-22 23:25:39 +00:00
Merge pull request #173 from rq3glr16/main
feat: add reasoning parameter support for OpenRouter API
This commit is contained in:
64
examples/openrouter_reasoning.rs
Normal file
64
examples/openrouter_reasoning.rs
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
use openai_api_rs::v1::api::OpenAIClient;
|
||||||
|
use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest, Reasoning, ReasoningMode, ReasoningEffort};
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let api_key = env::var("OPENROUTER_API_KEY").unwrap().to_string();
|
||||||
|
let mut client = OpenAIClient::builder()
|
||||||
|
.with_endpoint("https://openrouter.ai/api/v1")
|
||||||
|
.with_api_key(api_key)
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
// Example 1: Using reasoning with effort
|
||||||
|
let mut req = ChatCompletionRequest::new(
|
||||||
|
"x-ai/grok-3-mini".to_string(), // Grok model that supports reasoning
|
||||||
|
vec![chat_completion::ChatCompletionMessage {
|
||||||
|
role: chat_completion::MessageRole::user,
|
||||||
|
content: chat_completion::Content::Text(String::from("Explain quantum computing in simple terms.")),
|
||||||
|
name: None,
|
||||||
|
tool_calls: None,
|
||||||
|
tool_call_id: None,
|
||||||
|
}],
|
||||||
|
);
|
||||||
|
|
||||||
|
// Set reasoning with high effort
|
||||||
|
req.reasoning = Some(Reasoning {
|
||||||
|
mode: Some(ReasoningMode::Effort {
|
||||||
|
effort: ReasoningEffort::High,
|
||||||
|
}),
|
||||||
|
exclude: Some(false), // Include reasoning in response
|
||||||
|
enabled: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = client.chat_completion(req).await?;
|
||||||
|
println!("Content: {:?}", result.choices[0].message.content);
|
||||||
|
|
||||||
|
// Example 2: Using reasoning with max_tokens
|
||||||
|
let mut req2 = ChatCompletionRequest::new(
|
||||||
|
"anthropic/claude-4-sonnet".to_string(), // Claude model that supports max_tokens
|
||||||
|
vec![chat_completion::ChatCompletionMessage {
|
||||||
|
role: chat_completion::MessageRole::user,
|
||||||
|
content: chat_completion::Content::Text(String::from("What's the most efficient sorting algorithm?")),
|
||||||
|
name: None,
|
||||||
|
tool_calls: None,
|
||||||
|
tool_call_id: None,
|
||||||
|
}],
|
||||||
|
);
|
||||||
|
|
||||||
|
// Set reasoning with max_tokens
|
||||||
|
req2.reasoning = Some(Reasoning {
|
||||||
|
mode: Some(ReasoningMode::MaxTokens {
|
||||||
|
max_tokens: 2000,
|
||||||
|
}),
|
||||||
|
exclude: None,
|
||||||
|
enabled: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let result2 = client.chat_completion(req2).await?;
|
||||||
|
println!("Content: {:?}", result2.choices[0].message.content);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// OPENROUTER_API_KEY=xxxx cargo run --package openai-api-rs --example openrouter_reasoning
|
@ -15,6 +15,35 @@ pub enum ToolChoiceType {
|
|||||||
ToolChoice { tool: Tool },
|
ToolChoice { tool: Tool },
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum ReasoningEffort {
|
||||||
|
Low,
|
||||||
|
Medium,
|
||||||
|
High,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum ReasoningMode {
|
||||||
|
Effort {
|
||||||
|
effort: ReasoningEffort,
|
||||||
|
},
|
||||||
|
MaxTokens {
|
||||||
|
max_tokens: i64,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
|
pub struct Reasoning {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub mode: Option<ReasoningMode>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub exclude: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub enabled: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
pub struct ChatCompletionRequest {
|
pub struct ChatCompletionRequest {
|
||||||
pub model: String,
|
pub model: String,
|
||||||
@ -50,6 +79,8 @@ pub struct ChatCompletionRequest {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(serialize_with = "serialize_tool_choice")]
|
#[serde(serialize_with = "serialize_tool_choice")]
|
||||||
pub tool_choice: Option<ToolChoiceType>,
|
pub tool_choice: Option<ToolChoiceType>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub reasoning: Option<Reasoning>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChatCompletionRequest {
|
impl ChatCompletionRequest {
|
||||||
@ -72,6 +103,7 @@ impl ChatCompletionRequest {
|
|||||||
tools: None,
|
tools: None,
|
||||||
parallel_tool_calls: None,
|
parallel_tool_calls: None,
|
||||||
tool_choice: None,
|
tool_choice: None,
|
||||||
|
reasoning: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -92,7 +124,8 @@ impl_builder_methods!(
|
|||||||
seed: i64,
|
seed: i64,
|
||||||
tools: Vec<Tool>,
|
tools: Vec<Tool>,
|
||||||
parallel_tool_calls: bool,
|
parallel_tool_calls: bool,
|
||||||
tool_choice: ToolChoiceType
|
tool_choice: ToolChoiceType,
|
||||||
|
reasoning: Reasoning
|
||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
|
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
|
||||||
@ -318,3 +351,80 @@ pub struct Tool {
|
|||||||
pub enum ToolType {
|
pub enum ToolType {
|
||||||
Function,
|
Function,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_reasoning_effort_serialization() {
|
||||||
|
let reasoning = Reasoning {
|
||||||
|
mode: Some(ReasoningMode::Effort {
|
||||||
|
effort: ReasoningEffort::High,
|
||||||
|
}),
|
||||||
|
exclude: Some(false),
|
||||||
|
enabled: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let serialized = serde_json::to_value(&reasoning).unwrap();
|
||||||
|
let expected = json!({
|
||||||
|
"effort": "high",
|
||||||
|
"exclude": false
|
||||||
|
});
|
||||||
|
|
||||||
|
assert_eq!(serialized, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_reasoning_max_tokens_serialization() {
|
||||||
|
let reasoning = Reasoning {
|
||||||
|
mode: Some(ReasoningMode::MaxTokens {
|
||||||
|
max_tokens: 2000,
|
||||||
|
}),
|
||||||
|
exclude: None,
|
||||||
|
enabled: Some(true),
|
||||||
|
};
|
||||||
|
|
||||||
|
let serialized = serde_json::to_value(&reasoning).unwrap();
|
||||||
|
let expected = json!({
|
||||||
|
"max_tokens": 2000,
|
||||||
|
"enabled": true
|
||||||
|
});
|
||||||
|
|
||||||
|
assert_eq!(serialized, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_reasoning_deserialization() {
|
||||||
|
let json_str = r#"{"effort": "medium", "exclude": true}"#;
|
||||||
|
let reasoning: Reasoning = serde_json::from_str(json_str).unwrap();
|
||||||
|
|
||||||
|
match reasoning.mode {
|
||||||
|
Some(ReasoningMode::Effort { effort }) => {
|
||||||
|
assert_eq!(effort, ReasoningEffort::Medium);
|
||||||
|
}
|
||||||
|
_ => panic!("Expected effort mode"),
|
||||||
|
}
|
||||||
|
assert_eq!(reasoning.exclude, Some(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_chat_completion_request_with_reasoning() {
|
||||||
|
let mut req = ChatCompletionRequest::new(
|
||||||
|
"gpt-4".to_string(),
|
||||||
|
vec![],
|
||||||
|
);
|
||||||
|
|
||||||
|
req.reasoning = Some(Reasoning {
|
||||||
|
mode: Some(ReasoningMode::Effort {
|
||||||
|
effort: ReasoningEffort::Low,
|
||||||
|
}),
|
||||||
|
exclude: None,
|
||||||
|
enabled: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let serialized = serde_json::to_value(&req).unwrap();
|
||||||
|
assert_eq!(serialized["reasoning"]["effort"], "low");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Reference in New Issue
Block a user