From d00e386600b08f1facbe601a1f3fb8750e24c368 Mon Sep 17 00:00:00 2001 From: Jeremy Chone Date: Mon, 6 Jan 2025 13:01:08 -0800 Subject: [PATCH] . proof-read comments (from devai) --- src/adapter/adapter_kind.rs | 2 +- .../adapters/anthropic/adapter_impl.rs | 20 +++++++++---------- src/adapter/adapters/anthropic/streamer.rs | 4 ++-- src/adapter/adapters/cohere/adapter_impl.rs | 2 +- src/adapter/adapters/cohere/streamer.rs | 4 ++-- src/adapter/adapters/deepseek/adapter_impl.rs | 2 +- src/adapter/adapters/gemini/streamer.rs | 8 ++++---- src/adapter/adapters/ollama/adapter_impl.rs | 8 ++++---- src/adapter/adapters/ollama/mod.rs | 2 +- src/adapter/adapters/openai/adapter_impl.rs | 4 ++-- src/adapter/adapters/openai/streamer.rs | 4 ++-- src/adapter/adapters/support.rs | 2 +- src/adapter/dispatcher.rs | 4 ++-- src/adapter/inter_stream.rs | 2 +- src/adapter/mod.rs | 2 +- src/chat/chat_options.rs | 6 +++--- src/chat/chat_req_response_format.rs | 6 +++--- src/chat/chat_request.rs | 12 +++++------ .../{chat_respose.rs => chat_response.rs} | 0 src/chat/chat_stream.rs | 6 +++--- src/chat/message_content.rs | 18 ++++++++--------- src/chat/mod.rs | 5 +++-- src/chat/printer.rs | 2 +- src/chat/tool/tool_base.rs | 6 +++--- src/chat/tool/tool_call.rs | 2 +- src/chat/tool/tool_response.rs | 4 ++-- src/client/client_impl.rs | 2 +- src/client/client_types.rs | 2 +- src/client/config.rs | 12 +++++------ src/common/model_iden.rs | 2 +- src/common/model_name.rs | 2 +- src/resolver/endpoint.rs | 2 +- src/resolver/mod.rs | 2 +- src/resolver/service_target_resolver.rs | 2 +- src/webc/mod.rs | 4 ++-- src/webc/web_stream.rs | 4 ++-- 36 files changed, 86 insertions(+), 85 deletions(-) rename src/chat/{chat_respose.rs => chat_response.rs} (100%) diff --git a/src/adapter/adapter_kind.rs b/src/adapter/adapter_kind.rs index 92df37e..8253b4c 100644 --- a/src/adapter/adapter_kind.rs +++ b/src/adapter/adapter_kind.rs @@ -109,7 +109,7 @@ impl AdapterKind { } else if GROQ_MODELS.contains(&model) { return Ok(Self::Groq); } - // for now, fallback to Ollama + // For now, fallback to Ollama else { Ok(Self::Ollama) } diff --git a/src/adapter/adapters/anthropic/adapter_impl.rs b/src/adapter/adapters/anthropic/adapter_impl.rs index 8cf23f9..456599c 100644 --- a/src/adapter/adapters/anthropic/adapter_impl.rs +++ b/src/adapter/adapters/anthropic/adapter_impl.rs @@ -22,7 +22,7 @@ pub struct AnthropicAdapter; const MAX_TOKENS_8K: u32 = 8192; const MAX_TOKENS_4K: u32 = 4096; -const ANTRHOPIC_VERSION: &str = "2023-06-01"; +const ANTHROPIC_VERSION: &str = "2023-06-01"; const MODELS: &[&str] = &[ "claude-3-5-sonnet-20241022", "claude-3-5-haiku-20241022", @@ -74,7 +74,7 @@ impl Adapter for AnthropicAdapter { let headers = vec![ // headers ("x-api-key".to_string(), api_key), - ("anthropic-version".to_string(), ANTRHOPIC_VERSION.to_string()), + ("anthropic-version".to_string(), ANTHROPIC_VERSION.to_string()), ]; let model_name = model.model_name.clone(); @@ -135,17 +135,17 @@ impl Adapter for AnthropicAdapter { let usage = body.x_take("usage").map(Self::into_usage).unwrap_or_default(); // -- Capture the content - // NOTE: Anthropic support a list of content of multitypes but not the ChatResponse + // NOTE: Anthropic supports a list of content of multiple types but not the ChatResponse // So, the strategy is to: // - List all of the content and capture the text and tool_use - // - If there is one or more tool_use, this will take precedence and MessageContent support tool_call list + // - If there is one or more tool_use, this will take precedence and MessageContent will support tool_call list // - Otherwise, the text is concatenated // NOTE: We need to see if the multiple content type text happens and why. If not, we can probably simplify this by just capturing the first one. // Eventually, ChatResponse will have `content: Option>` for the multi parts (with images and such) let content_items: Vec = body.x_take("content")?; let mut text_content: Vec = Vec::new(); - // Note: here tool_calls is probably the exception, so, not creating the vector if not needed + // Note: here tool_calls is probably the exception, so not creating the vector if not needed let mut tool_calls: Option> = None; for mut item in content_items { @@ -228,12 +228,12 @@ impl AnthropicAdapter { // -- Process the messages for msg in chat_req.messages { match msg.role { - // for now, system and tool messages go to system + // for now, system and tool messages go to the system ChatRole::System => { if let MessageContent::Text(content) = msg.content { systems.push(content) } - // TODO: Needs to trace/warn that other type are not supported + // TODO: Needs to trace/warn that other types are not supported } ChatRole::User => { let content = match msg.content { @@ -261,7 +261,7 @@ impl AnthropicAdapter { } // Use `match` instead of `if let`. This will allow to future-proof this // implementation in case some new message content types would appear, - // this way library would not compile if not all methods are implemented + // this way the library would not compile if not all methods are implemented // continue would allow to gracefully skip pushing unserializable message // TODO: Probably need to warn if it is a ToolCalls type of content MessageContent::ToolCalls(_) => continue, @@ -311,7 +311,7 @@ impl AnthropicAdapter { }) .collect::>(); - // FIXME: MessageContent::ToolResponse should be MessageContent::ToolResponses (even if openAI does require multi Tool message) + // FIXME: MessageContent::ToolResponse should be MessageContent::ToolResponses (even if OpenAI does require multi Tool message) messages.push(json!({ "role": "user", "content": tool_responses @@ -337,7 +337,7 @@ impl AnthropicAdapter { .map(|tool| { // TODO: Need to handle the error correctly // TODO: Needs to have a custom serializer (tool should not have to match to a provider) - // NOTE: Right now, low probability, so, we just return null if cannto to value. + // NOTE: Right now, low probability, so we just return null if cannot convert to value. let mut tool_value = json!({ "name": tool.name, "input_schema": tool.schema, diff --git a/src/adapter/adapters/anthropic/streamer.rs b/src/adapter/adapters/anthropic/streamer.rs index d909c59..6a9a2b6 100644 --- a/src/adapter/adapters/anthropic/streamer.rs +++ b/src/adapter/adapters/anthropic/streamer.rs @@ -79,7 +79,7 @@ impl futures::Stream for AnthropicStreamer { } // -- END MESSAGE "message_stop" => { - // Make sure we do not poll the EventSource anymore on the next poll. + // Ensure we do not poll the EventSource anymore on the next poll. // NOTE: This way, the last MessageStop event is still sent, // but then, on the next poll, it will be stopped. self.done = true; @@ -142,7 +142,7 @@ impl AnthropicStreamer { }; // -- Capture/Add the eventual input_tokens - // NOTE: Permissive on this one, if error, treat as nonexistent (for now) + // NOTE: Permissive on this one; if an error occurs, treat it as nonexistent (for now) if let Ok(input_tokens) = data.x_get::(input_path) { let val = self .captured_data diff --git a/src/adapter/adapters/cohere/adapter_impl.rs b/src/adapter/adapters/cohere/adapter_impl.rs index 1285fe6..23cb5ac 100644 --- a/src/adapter/adapters/cohere/adapter_impl.rs +++ b/src/adapter/adapters/cohere/adapter_impl.rs @@ -223,7 +223,7 @@ impl CohereAdapter { }; match msg.role { - // For now, system and tool go to the system + // For now, system and tool messages go to the system ChatRole::System => systems.push(content), ChatRole::User => chat_history.push(json! ({"role": "USER", "content": content})), ChatRole::Assistant => chat_history.push(json! ({"role": "CHATBOT", "content": content})), diff --git a/src/adapter/adapters/cohere/streamer.rs b/src/adapter/adapters/cohere/streamer.rs index a534c4c..82aafcf 100644 --- a/src/adapter/adapters/cohere/streamer.rs +++ b/src/adapter/adapters/cohere/streamer.rs @@ -71,7 +71,7 @@ impl futures::Stream for CohereStreamer { "stream-start" => InterStreamEvent::Start, "text-generation" => { if let Some(content) = cohere_message.text { - // Add to the captured_content if chat options allow it + // Add to the captured content if chat options allow it if self.options.capture_content { match self.captured_data.content { Some(ref mut c) => c.push_str(&content), @@ -110,7 +110,7 @@ impl futures::Stream for CohereStreamer { InterStreamEvent::End(inter_stream_end) } - _ => continue, // Skip the "Other" event + _ => continue, // Skip the "other" event }; return Poll::Ready(Some(Ok(inter_event))); diff --git a/src/adapter/adapters/deepseek/adapter_impl.rs b/src/adapter/adapters/deepseek/adapter_impl.rs index cb1853d..0905db8 100644 --- a/src/adapter/adapters/deepseek/adapter_impl.rs +++ b/src/adapter/adapters/deepseek/adapter_impl.rs @@ -15,7 +15,7 @@ impl DeepSeekAdapter { pub const API_KEY_DEFAULT_ENV_NAME: &str = "DEEPSEEK_API_KEY"; } -// The Groq API adapter is modeled after the OpenAI adapter, as the Groq API is compatible with the OpenAI API. +// The DeepSeek API adapter is modeled after the OpenAI adapter, as the DeepSeek API is compatible with the OpenAI API. impl Adapter for DeepSeekAdapter { fn default_endpoint() -> Endpoint { const BASE_URL: &str = "https://api.deepseek.com/v1/"; diff --git a/src/adapter/adapters/gemini/streamer.rs b/src/adapter/adapters/gemini/streamer.rs index bced2c0..4ca913b 100644 --- a/src/adapter/adapters/gemini/streamer.rs +++ b/src/adapter/adapters/gemini/streamer.rs @@ -13,7 +13,7 @@ pub struct GeminiStreamer { options: StreamerOptions, // -- Set by the poll_next - /// Flag to not poll the EventSource after a MessageStop event + /// Flag to not poll the EventSource after a MessageStop event. done: bool, captured_data: StreamerCapturedData, } @@ -41,7 +41,7 @@ impl futures::Stream for GeminiStreamer { while let Poll::Ready(item) = Pin::new(&mut self.inner).poll_next(cx) { match item { Some(Ok(raw_message)) => { - // This is the message sent by the WebStream in PrettyJsonArray mode + // This is the message sent by the WebStream in PrettyJsonArray mode. // - `[` document start // - `{...}` block // - `]` document end @@ -93,10 +93,10 @@ impl futures::Stream for GeminiStreamer { } } - // NOTE: Apparently in the Gemini API, all events have cumulative usage + // NOTE: Apparently in the Gemini API, all events have cumulative usage, // meaning each message seems to include the tokens for all previous streams. // Thus, we do not need to add it; we only need to replace captured_data.usage with the latest one. - // See https://twitter.com/jeremychone/status/1813734565967802859 for potential additional information + // See https://twitter.com/jeremychone/status/1813734565967802859 for potential additional information. if self.options.capture_usage { self.captured_data.usage = Some(usage); } diff --git a/src/adapter/adapters/ollama/adapter_impl.rs b/src/adapter/adapters/ollama/adapter_impl.rs index 1572af5..401f1d4 100644 --- a/src/adapter/adapters/ollama/adapter_impl.rs +++ b/src/adapter/adapters/ollama/adapter_impl.rs @@ -26,13 +26,13 @@ impl Adapter for OllamaAdapter { AuthData::from_single("ollama") } - /// Note 1: For now, this adapter is the only one making a full request to the ollama server - /// Note 2: Will the OpenAI API to talk to Ollam server (https://platform.openai.com/docs/api-reference/models/list) + /// Note 1: For now, this adapter is the only one making a full request to the Ollama server + /// Note 2: Use the OpenAI API to communicate with the Ollama server (https://platform.openai.com/docs/api-reference/models/list) /// /// TODO: This will use the default endpoint. - /// Later, we might add another function with a endpoint, so the the user can give an custom endpoint. + /// Later, we might add another function with an endpoint, so the user can provide a custom endpoint. async fn all_model_names(adapter_kind: AdapterKind) -> Result> { - // FIXME: This is harcoded to the default endpoint, should take endpoint as Argument + // FIXME: This is hardcoded to the default endpoint; it should take the endpoint as an argument. let endpoint = Self::default_endpoint(); let base_url = endpoint.base_url(); let url = format!("{base_url}models"); diff --git a/src/adapter/adapters/ollama/mod.rs b/src/adapter/adapters/ollama/mod.rs index 62b9955..db6337d 100644 --- a/src/adapter/adapters/ollama/mod.rs +++ b/src/adapter/adapters/ollama/mod.rs @@ -1,5 +1,5 @@ //! OPENAI API DOC: https://platform.openai.com/docs/api-reference/chat -//! NOTE: Currently, genai uses the OpenAI compatibility layer, except for listing models. +//! NOTE: Currently, GenAI uses the OpenAI compatibility layer, except for listing models. //! OLLAMA API DOC: https://github.com/ollama/ollama/blob/main/docs/api.md // region: --- Modules diff --git a/src/adapter/adapters/openai/adapter_impl.rs b/src/adapter/adapters/openai/adapter_impl.rs index 0fd2f7d..66ee7f2 100644 --- a/src/adapter/adapters/openai/adapter_impl.rs +++ b/src/adapter/adapters/openai/adapter_impl.rs @@ -327,7 +327,7 @@ impl OpenAIAdapter { .map(|tool| { // TODO: Need to handle the error correctly // TODO: Needs to have a custom serializer (tool should not have to match to a provider) - // NOTE: Right now, low probability, so, we just return null if cannto to value. + // NOTE: Right now, low probability, so, we just return null if cannot convert to value. json!({ "type": "function", "function": { @@ -387,7 +387,7 @@ fn parse_tool_call(raw_tool_call: Value) -> Result { let fn_name = iterim.function.name; - // For now support Object only, and parse the eventual string as a json value. + // For now, support Object only, and parse the eventual string as a json value. // Eventually, we might check pricing let fn_arguments = match iterim.function.arguments { Value::Object(obj) => Value::Object(obj), diff --git a/src/adapter/adapters/openai/streamer.rs b/src/adapter/adapters/openai/streamer.rs index 8315575..5079507 100644 --- a/src/adapter/adapters/openai/streamer.rs +++ b/src/adapter/adapters/openai/streamer.rs @@ -82,7 +82,7 @@ impl futures::Stream for OpenAIStreamer { // If finish_reason exists, it's the end of this choice. // Since we support only a single choice, we can proceed, // as there might be other messages, and the last one contains data: `[DONE]` - // NOTE: xAI have no `finish_reason` when not finished, so, need to just account for both null/absent + // NOTE: xAI has no `finish_reason` when not finished, so, need to just account for both null/absent if let Ok(_finish_reason) = first_choice.x_take::("finish_reason") { // NOTE: For Groq, the usage is captured when finish_reason indicates stopping, and in the `/x_groq/usage` if self.options.capture_usage { @@ -101,7 +101,7 @@ impl futures::Stream for OpenAIStreamer { .unwrap_or_default(); self.captured_data.usage = Some(usage) } - _ => (), // do nothing, will be captured the OpenAi way + _ => (), // do nothing, will be captured the OpenAI way } } diff --git a/src/adapter/adapters/support.rs b/src/adapter/adapters/support.rs index 967a108..f423224 100644 --- a/src/adapter/adapters/support.rs +++ b/src/adapter/adapters/support.rs @@ -1,4 +1,4 @@ -//! This support model is for common constructs and utilities for all of the adapter implementations. +//! This support module is for common constructs and utilities for all the adapter implementations. //! It should be private to the `crate::adapter::adapters` module. use crate::chat::{ChatOptionsSet, MetaUsage}; diff --git a/src/adapter/dispatcher.rs b/src/adapter/dispatcher.rs index 6b4df03..350892f 100644 --- a/src/adapter/dispatcher.rs +++ b/src/adapter/dispatcher.rs @@ -17,7 +17,7 @@ use crate::resolver::{AuthData, Endpoint}; /// A construct that allows dispatching calls to the Adapters. /// -/// Note 1: This struct does not need to implement the Adapter trait, as some of its methods take the adapter_kind as a parameter. +/// Note 1: This struct does not need to implement the Adapter trait, as some of its methods take the adapter kind as a parameter. /// /// Note 2: This struct might be renamed to avoid confusion with the traditional Rust dispatcher pattern. pub struct AdapterDispatcher; @@ -118,7 +118,7 @@ impl AdapterDispatcher { AdapterKind::OpenAI => OpenAIAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), AdapterKind::Anthropic => AnthropicAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), AdapterKind::Cohere => CohereAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), - AdapterKind::Ollama => OpenAIAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), + AdapterKind::Ollama => OllamaAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), AdapterKind::Gemini => GeminiAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), AdapterKind::Groq => GroqAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), AdapterKind::Xai => XaiAdapter::to_chat_stream(model_iden, reqwest_builder, options_set), diff --git a/src/adapter/inter_stream.rs b/src/adapter/inter_stream.rs index 0b56bfe..dbe7078 100644 --- a/src/adapter/inter_stream.rs +++ b/src/adapter/inter_stream.rs @@ -1,4 +1,4 @@ -//! Internal stream event types that serve as an intermediary between the provider event and the GenAI stream event. +//! Internal stream event types that serve as intermediaries between the provider event and the GenAI stream event. //! //! This allows for flexibility if we want to capture events across providers that do not need to //! be reflected in the public ChatStream event. diff --git a/src/adapter/mod.rs b/src/adapter/mod.rs index 88ae3b4..6ff8bcf 100644 --- a/src/adapter/mod.rs +++ b/src/adapter/mod.rs @@ -1,4 +1,4 @@ -//! The Adapter layer allows adapting client requests/responses to various AI Providers. +//! The Adapter layer allows adapting client requests/responses to various AI providers. //! Currently, it employs a static dispatch pattern with the `Adapter` trait and `AdapterDispatcher` implementation. //! Adapter implementations are organized by adapter type under the `adapters` submodule. //! diff --git a/src/chat/chat_options.rs b/src/chat/chat_options.rs index eaf7222..f3962a5 100644 --- a/src/chat/chat_options.rs +++ b/src/chat/chat_options.rs @@ -1,6 +1,6 @@ //! ChatOptions allows customization of a chat request. //! - It can be provided at the `client::exec_chat(..)` level as an argument, -//! - or set in the client config `client_config.with_chat_options(..)` to be used as default for all requests +//! - or set in the client config `client_config.with_chat_options(..)` to be used as the default for all requests //! //! Note 1: In the future, we will probably allow setting the client //! Note 2: Extracting it from the `ChatRequest` object allows for better reusability of each component. @@ -9,7 +9,7 @@ use crate::chat::chat_req_response_format::ChatResponseFormat; use serde::{Deserialize, Serialize}; use std::ops::Deref; -/// Chat Options that are taken into account for any `Client::exec...` calls. +/// Chat Options that are considered for any `Client::exec...` calls. /// /// A fallback `ChatOptions` can also be set at the `Client` during the client builder phase /// `` @@ -39,7 +39,7 @@ pub struct ChatOptions { /// NOTE: More response formats are coming soon. pub response_format: Option, - /// Specifies sequences used as end marker when generating text + /// Specifies sequences used as end markers when generating text pub stop_sequences: Vec, } diff --git a/src/chat/chat_req_response_format.rs b/src/chat/chat_req_response_format.rs index 2f31424..9228d52 100644 --- a/src/chat/chat_req_response_format.rs +++ b/src/chat/chat_req_response_format.rs @@ -5,7 +5,7 @@ use serde_json::Value; /// The chat response format for the ChatRequest for structured output. /// This will be taken into consideration only if the provider supports it. /// -/// > Note: Currently, the AI Providers will not report an error if not supported. It will just be ignored. +/// > Note: Currently, the AI Providers do not report an error if not supported; it will just be ignored. /// > This may change in the future. #[derive(Debug, Clone, From, Serialize, Deserialize)] pub enum ChatResponseFormat { @@ -21,10 +21,10 @@ pub enum ChatResponseFormat { /// The JSON specification for the structured output format. #[derive(Debug, Clone, From, Serialize, Deserialize)] pub struct JsonSpec { - /// The name of the spec. Mostly used by OpenAI. + /// The name of the specification. Mostly used by OpenAI. /// IMPORTANT: With OpenAI, this cannot contain any spaces or special characters besides `-` and `_`. pub name: String, - /// The description of the JSON spec. Mostly used by OpenAI adapters (future). + /// The description of the JSON specification. Mostly used by OpenAI adapters (future). /// NOTE: Currently ignored in the OpenAI adapter. pub description: Option, diff --git a/src/chat/chat_request.rs b/src/chat/chat_request.rs index 9a956c3..1c32043 100644 --- a/src/chat/chat_request.rs +++ b/src/chat/chat_request.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; /// The Chat request when performing a direct `Client::` #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct ChatRequest { - /// The initial system of the request. + /// The initial system content of the request. pub system: Option, /// The messages of the request. @@ -28,7 +28,7 @@ impl ChatRequest { } } - /// From the `.system` property content. + /// Create a ChatRequest from the `.system` property content. pub fn from_system(content: impl Into) -> Self { Self { system: Some(content.into()), @@ -46,7 +46,7 @@ impl ChatRequest { } } - /// Create a new request from messages + /// Create a new request from messages. pub fn from_messages(messages: Vec) -> Self { Self { system: None, @@ -97,7 +97,7 @@ impl ChatRequest { .chain(self.messages.iter().filter_map(|message| match message.role { ChatRole::System => match message.content { MessageContent::Text(ref content) => Some(content.as_str()), - // If system content is not text, then, we do not add it for now. + // If system content is not text, then we do not add it for now. _ => None, }, _ => None, @@ -116,12 +116,12 @@ impl ChatRequest { for system in self.iter_systems() { let systems_content = systems.get_or_insert_with(|| "".to_string()); - // add eventual separator + // Add eventual separator if systems_content.ends_with('\n') { systems_content.push('\n'); } else if !systems_content.is_empty() { systems_content.push_str("\n\n"); - } // do not add any empty line if previous content is empty + } // Do not add any empty line if previous content is empty systems_content.push_str(system); } diff --git a/src/chat/chat_respose.rs b/src/chat/chat_response.rs similarity index 100% rename from src/chat/chat_respose.rs rename to src/chat/chat_response.rs diff --git a/src/chat/chat_stream.rs b/src/chat/chat_stream.rs index fc1699a..19d0b1b 100644 --- a/src/chat/chat_stream.rs +++ b/src/chat/chat_stream.rs @@ -58,14 +58,14 @@ impl Stream for ChatStream { /// The normalized chat stream event for any provider when calling `Client::exec`. #[derive(Debug, From, Serialize, Deserialize)] pub enum ChatStreamEvent { - /// Represents the start of the stream. First event. + /// Represents the start of the stream. The first event. Start, - /// Represents each chunk response. Currently only contains text content. + /// Represents each chunk response. Currently, it only contains text content. Chunk(StreamChunk), /// Represents the end of the stream. - /// Will have the `.captured_usage` and `.captured_content` if specified in the `ChatOptions`. + /// It will have the `.captured_usage` and `.captured_content` if specified in the `ChatOptions`. End(StreamEnd), } diff --git a/src/chat/message_content.rs b/src/chat/message_content.rs index 6fe5fb7..b09932c 100644 --- a/src/chat/message_content.rs +++ b/src/chat/message_content.rs @@ -15,7 +15,7 @@ pub enum MessageContent { #[from] ToolCalls(Vec), - /// Tool call Responses + /// Tool call responses #[from] ToolResponses(Vec), } @@ -43,7 +43,7 @@ impl MessageContent { /// Returns the MessageContent as &str, only if it is MessageContent::Text /// Otherwise, it returns None. /// - /// NOTE: When multi parts content, this will return None and won't concatenate the text parts. + /// NOTE: When multi-part content is present, this will return None and won't concatenate the text parts. pub fn text_as_str(&self) -> Option<&str> { match self { MessageContent::Text(content) => Some(content.as_str()), @@ -56,7 +56,7 @@ impl MessageContent { /// Consumes the MessageContent and returns it as &str, /// only if it is MessageContent::Text; otherwise, it returns None. /// - /// NOTE: When multi parts content, this will return None and won't concatenate the text parts. + /// NOTE: When multi-part content is present, this will return None and won't concatenate the text parts. pub fn text_into_string(self) -> Option { match self { MessageContent::Text(content) => Some(content), @@ -66,7 +66,7 @@ impl MessageContent { } } - /// Checks if the text content or the tools calls is empty. + /// Checks if the text content or the tool calls are empty. pub fn is_empty(&self) -> bool { match self { MessageContent::Text(content) => content.is_empty(), @@ -150,18 +150,18 @@ impl<'a> From<&'a str> for ContentPart { #[derive(Debug, Clone, Serialize, Deserialize)] pub enum ImageSource { - /// For model/services that support URL as input + /// For models/services that support URL as input /// NOTE: Few AI services support this. Url(String), /// The base64 string of the image /// - /// Note: Here we use an Arc to avoid cloning large amounts of data when cloning a ChatRequest. + /// NOTE: Here we use an Arc to avoid cloning large amounts of data when cloning a ChatRequest. /// The overhead is minimal compared to cloning relatively large data. /// The downside is that it will be an Arc even when used only once, but for this particular data type, the net benefit is positive. Base64(Arc), } -// No `Local` location, this would require handling errors like "file not found" etc. -// Such file can be easily provided by user as Base64, also can implement convenient -// TryFrom to Base64 version. All LLMs accepts local Images only as Base64 +// No `Local` location; this would require handling errors like "file not found" etc. +// Such a file can be easily provided by the user as Base64, and we can implement a convenient +// TryFrom to Base64 version. All LLMs accept local images only as Base64. diff --git a/src/chat/mod.rs b/src/chat/mod.rs index ddb6ed5..779dd5e 100644 --- a/src/chat/mod.rs +++ b/src/chat/mod.rs @@ -1,13 +1,14 @@ //! The genai chat module contains all of the constructs necessary //! to make genai requests with the `genai::Client`. + // region: --- Modules mod chat_message; mod chat_options; mod chat_req_response_format; mod chat_request; -mod chat_respose; +mod chat_response; mod chat_stream; mod message_content; mod tool; @@ -17,7 +18,7 @@ pub use chat_message::*; pub use chat_options::*; pub use chat_req_response_format::*; pub use chat_request::*; -pub use chat_respose::*; +pub use chat_response::*; pub use chat_stream::*; pub use message_content::*; pub use tool::*; diff --git a/src/chat/printer.rs b/src/chat/printer.rs index 31f1660..32e51dd 100644 --- a/src/chat/printer.rs +++ b/src/chat/printer.rs @@ -112,7 +112,7 @@ async fn print_chat_stream_inner( // making the main crate error aware of the different error types would be unnecessary. // // Note 2: This Printer Error is not wrapped in the main crate error because the printer -// functions are not used by any other crate function (they are more of a debug utility) +// functions are not used by any other crate functions (they are more of a debug utility) use derive_more::From; diff --git a/src/chat/tool/tool_base.rs b/src/chat/tool/tool_base.rs index 5d08013..d928486 100644 --- a/src/chat/tool/tool_base.rs +++ b/src/chat/tool/tool_base.rs @@ -7,10 +7,10 @@ pub struct Tool { /// e.g., `get_weather` pub name: String, - /// The description of the tool which will be used by the LLM to understand the context/usage of this tool + /// The description of the tool that will be used by the LLM to understand the context/usage of this tool pub description: Option, - /// The json-schema for the parameters + /// The JSON schema for the parameters /// e.g., /// ```json /// json!({ @@ -27,7 +27,7 @@ pub struct Tool { /// "unit": { /// "type": "string", /// "enum": ["C", "F"], - /// "description": "The temperature unit of the country. C for Celsius, and F for Fahrenheit" + /// "description": "The temperature unit for the country. C for Celsius, and F for Fahrenheit" /// } /// }, /// "required": ["city", "country", "unit"], diff --git a/src/chat/tool/tool_call.rs b/src/chat/tool/tool_call.rs index 7e89432..82acc4f 100644 --- a/src/chat/tool/tool_call.rs +++ b/src/chat/tool/tool_call.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; -/// The tool call function name and arguments send back by the LLM. +/// The tool call function name and arguments sent back by the LLM. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ToolCall { pub call_id: String, diff --git a/src/chat/tool/tool_response.rs b/src/chat/tool/tool_response.rs index e1ca697..ffeaec6 100644 --- a/src/chat/tool/tool_response.rs +++ b/src/chat/tool/tool_response.rs @@ -3,11 +3,11 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ToolResponse { pub call_id: String, - // for now, just string (would probably be serialized json) + // For now, just a string (would probably be serialized JSON) pub content: String, } -/// constructor +/// Constructor impl ToolResponse { pub fn new(tool_call_id: impl Into, content: impl Into) -> Self { Self { diff --git a/src/client/client_impl.rs b/src/client/client_impl.rs index 92563f8..4f1dc2f 100644 --- a/src/client/client_impl.rs +++ b/src/client/client_impl.rs @@ -29,7 +29,7 @@ impl Client { Ok(model_iden) } - #[deprecated(note = "use `client.resolve_service_target(model_name)")] + #[deprecated(note = "use `client.resolve_service_target(model_name)`")] pub fn resolve_model_iden(&self, model_name: &str) -> Result { let model = self.default_model(model_name)?; let target = self.config().resolve_service_target(model)?; diff --git a/src/client/client_types.rs b/src/client/client_types.rs index 5618a74..ef1a7c6 100644 --- a/src/client/client_types.rs +++ b/src/client/client_types.rs @@ -4,7 +4,7 @@ use crate::ClientBuilder; use std::sync::Arc; /// genai Client for executing AI requests to any providers. -/// Build with: +/// Built with: /// - `ClientBuilder::default()...build()` /// - or `Client::builder()`, which is equivalent to `ClientBuilder::default()...build()` #[derive(Debug, Clone)] diff --git a/src/client/config.rs b/src/client/config.rs index 1fbd217..a4f34fb 100644 --- a/src/client/config.rs +++ b/src/client/config.rs @@ -17,7 +17,7 @@ pub struct ClientConfig { impl ClientConfig { /// Set the AuthResolver for the ClientConfig. /// Note: This will be called before the `service_target_resolver`, and if registered - /// the `service_target_resolver` will get this new value. + /// the `service_target_resolver` will receive this new value. pub fn with_auth_resolver(mut self, auth_resolver: AuthResolver) -> Self { self.auth_resolver = Some(auth_resolver); self @@ -25,7 +25,7 @@ impl ClientConfig { /// Set the ModelMapper for the ClientConfig. /// Note: This will be called before the `service_target_resolver`, and if registered - /// the `service_target_resolver` will get this new value. + /// the `service_target_resolver` will receive this new value. pub fn with_model_mapper(mut self, model_mapper: ModelMapper) -> Self { self.model_mapper = Some(model_mapper); self @@ -33,8 +33,8 @@ impl ClientConfig { /// Set the ServiceTargetResolver for this client config. /// - /// A ServiceTargetResolver is the last step before execution allowing the users full - /// control of the resolved Endpoint, AuthData, and ModelIden + /// A ServiceTargetResolver is the last step before execution, allowing the users full + /// control of the resolved Endpoint, AuthData, and ModelIden. pub fn with_service_target_resolver(mut self, service_target_resolver: ServiceTargetResolver) -> Self { self.service_target_resolver = Some(service_target_resolver); self @@ -91,12 +91,12 @@ impl ClientConfig { resolver_error, }) }) - .transpose()? // return error if there is an error on auth resolver + .transpose()? // return an error if there is an error with the auth resolver .flatten() .unwrap_or_else(|| AdapterDispatcher::default_auth(model.adapter_kind)); // flatten the two options // -- Get the default endpoint - // For now, just get the default endpoint, the `resolve_target` will allow to override it + // For now, just get the default endpoint; the `resolve_target` will allow overriding it. let endpoint = AdapterDispatcher::default_endpoint(model.adapter_kind); // -- Resolve the service_target diff --git a/src/common/model_iden.rs b/src/common/model_iden.rs index 69cd20f..5a7c531 100644 --- a/src/common/model_iden.rs +++ b/src/common/model_iden.rs @@ -5,7 +5,7 @@ use crate::ModelName; /// Holds the adapter kind and model name in an efficient, clonable way. /// -/// This struct is used to represent the association between an adapter kind +/// This struct represents the association between an adapter kind /// and a model name, allowing for easy conversion and instantiation. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ModelIden { diff --git a/src/common/model_name.rs b/src/common/model_name.rs index ea6f5d4..f2641bc 100644 --- a/src/common/model_name.rs +++ b/src/common/model_name.rs @@ -22,7 +22,7 @@ impl From for String { } // NOTE: Below we avoid the `T: Into` blanket implementation because -// it would prevent us from having the `From for String` as `ModelName` +// it would prevent us from having the `From for String` implementation since `ModelName` // also implements `T: Into` from its deref to `&str` impl From for ModelName { diff --git a/src/resolver/endpoint.rs b/src/resolver/endpoint.rs index 7be5380..6e37610 100644 --- a/src/resolver/endpoint.rs +++ b/src/resolver/endpoint.rs @@ -2,7 +2,7 @@ use std::sync::Arc; /// A construct to store the endpoint of a service. /// It is designed to be efficiently clonable. -/// For now, it just supports `base_url` but later might have other URLs per "service name". +/// For now, it supports only `base_url`, but it may later have other URLs per "service name". #[derive(Debug, Clone)] pub struct Endpoint { inner: EndpointInner, diff --git a/src/resolver/mod.rs b/src/resolver/mod.rs index 8dd3eea..ea4bbd9 100644 --- a/src/resolver/mod.rs +++ b/src/resolver/mod.rs @@ -1,5 +1,5 @@ //! Resolvers are hooks that library users can set to customize aspects of the library's default behavior. -//! A good example for now is the AuthResolver, which provides the authentication data (e.g., api_key). +//! A good example is the AuthResolver, which provides the authentication data (e.g., api_key). //! //! Eventually, the library will have more resolvers. diff --git a/src/resolver/service_target_resolver.rs b/src/resolver/service_target_resolver.rs index ff7c53d..a0a6d80 100644 --- a/src/resolver/service_target_resolver.rs +++ b/src/resolver/service_target_resolver.rs @@ -1,5 +1,5 @@ //! A `ServiceTargetResolver` is responsible for returning the `ServiceTarget`. -//! It allows users to customize/override the service target properties. +//! It allows users to customize or override the service target properties. //! //! It can take the following forms: //! - Contains a fixed service target value, diff --git a/src/webc/mod.rs b/src/webc/mod.rs index 6affef9..5c5e0d1 100644 --- a/src/webc/mod.rs +++ b/src/webc/mod.rs @@ -4,14 +4,14 @@ mod error; mod web_client; -// for when not `text/event-stream` +// For when not using `text/event-stream` mod web_stream; pub(crate) use error::Result; pub(crate) use web_client::*; pub(crate) use web_stream::*; -// only public for external use +// Only public for external use pub use error::Error; // endregion: --- Modules diff --git a/src/webc/web_stream.rs b/src/webc/web_stream.rs index ab6eea2..8c9c85c 100644 --- a/src/webc/web_stream.rs +++ b/src/webc/web_stream.rs @@ -19,7 +19,7 @@ pub struct WebStream { reqwest_builder: Option, response_future: Option>> + Send>>>, bytes_stream: Option>> + Send>>>, - // If a poll was a partial message, then we kept the previous part + // If a poll was a partial message, then we keep the previous part partial_message: Option, // If a poll retrieved multiple messages, we keep them to be sent in the next poll remaining_messages: Option>, @@ -206,7 +206,7 @@ fn new_with_pretty_json_array( messages.push(array_end.to_string()); } - // -- Return the buf response + // -- Return the buff response let first_message = if !messages.is_empty() { Some(messages[0].to_string()) } else {