Commit 602352ce authored by Neelay Shah's avatar Neelay Shah Committed by GitHub
Browse files

chore: rename dynamo (#44)


Co-authored-by: default avatarBiswa Panda <biswa.panda@gmail.com>
parent ecf53ce2
......@@ -15,9 +15,9 @@
use anyhow::Ok;
use dynemo_llm::model_card::model::{ModelDeploymentCard, PromptContextMixin};
use dynemo_llm::preprocessor::prompt::PromptFormatter;
use dynemo_llm::protocols::openai::chat_completions::NvCreateChatCompletionRequest;
use dynamo_llm::model_card::model::{ModelDeploymentCard, PromptContextMixin};
use dynamo_llm::preprocessor::prompt::PromptFormatter;
use dynamo_llm::protocols::openai::chat_completions::NvCreateChatCompletionRequest;
use serde::{Deserialize, Serialize};
use hf_hub::{api::tokio::ApiBuilder, Cache, Repo, RepoType};
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
expression: request
---
{
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
description: "should have prompt, model, and logit_bias fields with the logits_bias having two key/value pairs"
expression: sample.request
---
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
description: "should have prompt, model, and temperature fields"
expression: sample.request
---
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
description: "should have prompt, model, and top_p fields"
expression: sample.request
---
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
description: "should have prompt, model, and stop fields"
expression: sample.request
---
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
description: "should have prompt, model, and stream fields"
expression: sample.request
---
......
---
source: dynemo-llm/tests/openai_completions.rs
source: dynamo.llm/tests/openai_completions.rs
description: should have only prompt and model fields
expression: sample.request
---
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
---
source: dynemo-llm/tests/preprocessor.rs
source: dynamo.llm/tests/preprocessor.rs
expression: formatted_prompt
info:
messages:
......
......@@ -25,8 +25,8 @@
//! in a hashmap. We will then use these hashes to test that the tokenizer is working correctly. This
//! will detect if upstream dependency changes result in different/new behavior.
use dynemo_llm::tokenizers::traits::{Decoder, Encoder};
use dynemo_llm::tokenizers::*;
use dynamo_llm::tokenizers::traits::{Decoder, Encoder};
use dynamo_llm::tokenizers::*;
use std::collections::HashMap;
use std::sync::Arc;
......
......@@ -696,7 +696,7 @@ dependencies = [
]
[[package]]
name = "dynemo-runtime"
name = "dynamo-runtime"
version = "0.2.1"
dependencies = [
"anyhow",
......
......@@ -14,7 +14,7 @@
# limitations under the License.
[package]
name = "dynemo-runtime"
name = "dynamo-runtime"
description = "Distributed Inference Framework"
readme = "README.md"
version = "0.2.1" # TODO: Centralize Version Automation
......@@ -23,7 +23,7 @@ authors = ["NVIDIA"]
license = "Apache-2.0"
homepage = "https://github.com/dynemo-ai/dynemo"
repository = "https://github.com/dynemo-ai/dynemo.git"
keywords = ["llm", "genai", "inference", "nvidia", "distributed", "dynemo"]
keywords = ["llm", "genai", "inference", "nvidia", "distributed", "dynamo"]
[features]
default = []
......
......@@ -81,8 +81,8 @@ impl RuntimeConfig {
pub(crate) fn figment() -> Figment {
Figment::new()
.merge(Serialized::defaults(RuntimeConfig::default()))
.merge(Toml::file("/opt/dynemo/defaults/runtime.toml"))
.merge(Toml::file("/opt/dynemo/etc/runtime.toml"))
.merge(Toml::file("/opt/dynamo/defaults/runtime.toml"))
.merge(Toml::file("/opt/dynamo/etc/runtime.toml"))
.merge(Env::prefixed("DYN_RUNTIME_").filter_map(|k| {
let full_key = format!("DYN_RUNTIME_{}", k.as_str());
// filters out empty environment variables
......@@ -97,8 +97,8 @@ impl RuntimeConfig {
/// Configuration is priorities in the following order, where the last has the lowest priority:
/// 1. Environment variables (top priority)
/// TO DO: Add documentation for configuration files. Paths should be configurable.
/// 2. /opt/dynemo/etc/runtime.toml
/// 3. /opt/dynemo/defaults/runtime.toml (lowest priority)
/// 2. /opt/dynamo/etc/runtime.toml
/// 3. /opt/dynamo/defaults/runtime.toml (lowest priority)
///
/// Environment variables are prefixed with `DYN_RUNTIME_`
pub fn from_settings() -> Result<RuntimeConfig> {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment