"vscode:/vscode.git/clone" did not exist on "6919d707b6c1deb94964219c8943154b65517756"
Commit c06b95ff authored by Ryan McCormick's avatar Ryan McCormick Committed by GitHub
Browse files

ci: Add rust checks to missing directories (#239)


Signed-off-by: default avatarRyan McCormick <rmccormick@nvidia.com>
parent 5f1af25a
......@@ -14,7 +14,9 @@
// limitations under the License.
use tempfile::tempdir;
use triton_distributed_llm::model_card::model::{ModelDeploymentCard, PromptFormatterArtifact, TokenizerKind};
use triton_distributed_llm::model_card::model::{
ModelDeploymentCard, PromptFormatterArtifact, TokenizerKind,
};
const HF_PATH: &str = "tests/data/sample-models/TinyLlama_v1.1";
......@@ -46,7 +48,6 @@ async fn test_tokenizer_from_hf_like_local_repo() {
// Verify tokenizer file was found
match mdc.tokenizer {
TokenizerKind::HfTokenizerJson(_) => (),
_ => panic!("Expected HfTokenizerJson"),
}
}
......
......@@ -16,9 +16,9 @@
use anyhow::Ok;
use serde::{Deserialize, Serialize};
use triton_llm::model_card::model::{ModelDeploymentCard, PromptContextMixin};
use triton_llm::preprocessor::prompt::PromptFormatter;
use triton_llm::protocols::openai::chat_completions::{
use triton_distributed_llm::model_card::model::{ModelDeploymentCard, PromptContextMixin};
use triton_distributed_llm::preprocessor::prompt::PromptFormatter;
use triton_distributed_llm::protocols::openai::chat_completions::{
ChatCompletionMessage, ChatCompletionRequest, Tool, ToolChoiceType,
};
......@@ -33,10 +33,9 @@ use std::path::PathBuf;
/// set in the environment variable `HF_TOKEN`.
/// The model is downloaded and cached in `tests/data/sample-models` directory.
/// make sure the token has access to `meta-llama/Llama-3.1-70B-Instruct` model
fn check_hf_token() -> bool {
let hf_token = std::env::var("HF_TOKEN").ok();
return hf_token.is_some();
hf_token.is_some()
}
async fn make_mdc_from_repo(
......@@ -71,7 +70,7 @@ async fn maybe_download_model(local_path: &str, model: &str, revision: &str) ->
for file in &files_to_download {
downloaded_path = repo_builder.get(file).await.unwrap();
}
return downloaded_path.parent().unwrap().display().to_string();
downloaded_path.parent().unwrap().display().to_string()
}
async fn make_mdcs() -> Vec<ModelDeploymentCard> {
......
......@@ -27,12 +27,8 @@
use std::collections::HashMap;
use std::sync::Arc;
use triton_llm::protocols::TokenIdType;
use triton_llm::tokenizers::*;
use triton_llm::tokenizers::{
traits::{Decoder, Encoder, Tokenizer},
Encoding, Error, Result,
};
use triton_distributed_llm::tokenizers::traits::{Decoder, Encoder};
use triton_distributed_llm::tokenizers::*;
const TEST_PROMPTS: [&str; 4] = [
"deep learning is",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment