From 6f79fa17f659e87501c061e20283251c0a389a29 Mon Sep 17 00:00:00 2001 From: "Jack Boswell (boswelja)" Date: Tue, 21 Jan 2025 19:04:55 +1100 Subject: [PATCH] Fix doc warnings --- rust/src/config.rs | 2 +- rust/src/embeddings/cloud/cohere.rs | 2 +- rust/src/embeddings/embed.rs | 4 ++-- rust/src/models/clip/mod.rs | 4 ++-- rust/src/models/clip/text_model.rs | 4 ++-- rust/src/models/clip/vision_model.rs | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/rust/src/config.rs b/rust/src/config.rs index feb1276b..60ae0f73 100644 --- a/rust/src/config.rs +++ b/rust/src/config.rs @@ -47,7 +47,7 @@ pub struct TextEmbedConfig { /// Defaults to [SplittingStrategy::Sentence] pub splitting_strategy: Option, /// Allows overriding the embedder used when the splitting strategy is - /// [SplittingStrategy::Semantic]. Defaults to [JinaEmbedder::default()]. + /// [SplittingStrategy::Semantic]. Defaults to JINA. pub semantic_encoder: Option>, /// When embedding a PDF, controls whether **o**ptical **c**haracter **r**ecognition is used on /// the PDF to extract text. This process involves rendering the PDF as a series of images, and diff --git a/rust/src/embeddings/cloud/cohere.rs b/rust/src/embeddings/cloud/cohere.rs index def8f4a8..9055695e 100644 --- a/rust/src/embeddings/cloud/cohere.rs +++ b/rust/src/embeddings/cloud/cohere.rs @@ -36,7 +36,7 @@ impl CohereEmbedder { /// /// # Arguments /// - /// * `model` - A string slice that holds the model to be used for embedding. Find available models at https://docs.cohere.com/docs/cohere-embed + /// * `model` - A string slice that holds the model to be used for embedding. Find available models at /// * `api_key` - An optional string slice that holds the API key for authenticating requests to the Cohere API. /// /// # Returns diff --git a/rust/src/embeddings/embed.rs b/rust/src/embeddings/embed.rs index 48f36a14..efa468c1 100644 --- a/rust/src/embeddings/embed.rs +++ b/rust/src/embeddings/embed.rs @@ -198,8 +198,8 @@ impl TextEmbedder { /// - "cohere" /// /// * `model_id` - A string holds the model ID for the model to be used for embedding. - /// - For OpenAI, find available models at https://platform.openai.com/docs/guides/embeddings/embedding-models - /// - For Cohere, find available models at https://docs.cohere.com/docs/cohere-embed + /// - For OpenAI, find available models at + /// - For Cohere, find available models at /// * `api_key` - An optional string holds the API key for authenticating requests to the Cohere API. If not provided, it is taken from the environment variable /// - For OpenAI, create environment variable `OPENAI_API_KEY` /// - For Cohere, create environment variable `CO_API_KEY` diff --git a/rust/src/models/clip/mod.rs b/rust/src/models/clip/mod.rs index b8cc2b56..f7f931e2 100644 --- a/rust/src/models/clip/mod.rs +++ b/rust/src/models/clip/mod.rs @@ -3,8 +3,8 @@ //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! -//! https://github.com/openai/CLIP -//! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip +//! +//! use self::{ text_model::{Activation, ClipTextTransformer}, vision_model::ClipVisionTransformer, diff --git a/rust/src/models/clip/text_model.rs b/rust/src/models/clip/text_model.rs index fc9b5f23..a7fb49d0 100644 --- a/rust/src/models/clip/text_model.rs +++ b/rust/src/models/clip/text_model.rs @@ -3,8 +3,8 @@ //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! -//! https://github.com/openai/CLIP -//! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip +//! +//! use candle_core::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn as nn; diff --git a/rust/src/models/clip/vision_model.rs b/rust/src/models/clip/vision_model.rs index 6a9cb934..86684b3e 100644 --- a/rust/src/models/clip/vision_model.rs +++ b/rust/src/models/clip/vision_model.rs @@ -3,8 +3,8 @@ //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! -//! https://github.com/openai/CLIP -//! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip +//! +//! use candle_core::{IndexOp, Result, Shape, Tensor, D}; use candle_nn as nn;