Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,6 @@ cookbooks/python/openai/data/hotel_invoices/transformed_invoice_json/*
cookbooks/python/openai/data/hotel_invoices/extracted_invoice_json/*
cookbooks/python/openai/data/hotel_invoices/hotel_DB.db
cookbooks/python/openai/hallucination_results.csv
node_modules/
node_modules/
eval_data.jsonl
eval_result_perf_and_quality.json
8 changes: 4 additions & 4 deletions cookbooks/python/mistralai/evaluation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@
"from mistralai.models.chat_completion import ChatMessage\n",
"\n",
"\n",
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\"):\n",
"def run_mistral(user_message, model=\"mistral-ai/mistral-small-2503\"):\n",
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
" chat_response = client.chat(\n",
Expand Down Expand Up @@ -225,7 +225,7 @@
"from mistralai.models.chat_completion import ChatMessage\n",
"\n",
"\n",
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\"):\n",
"def run_mistral(user_message, model=\"mistral-ai/mistral-small-2503\"):\n",
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
" chat_response = client.chat(model=model, messages=messages)\n",
Expand Down Expand Up @@ -379,7 +379,7 @@
"from mistralai.models.chat_completion import ChatMessage\n",
"\n",
"\n",
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\", is_json=False):\n",
"def run_mistral(user_message, model=\"mistral-ai/mistral-small-2503\", is_json=False):\n",
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
"\n",
Expand Down Expand Up @@ -502,7 +502,7 @@
" scoring_prompt.format(\n",
" news=news, summary=summary, metric=i[\"metric\"], rubrics=i[\"rubrics\"]\n",
" ),\n",
" model=\"mistral-ai/mistral-small\",\n",
" model=\"mistral-ai/mistral-small-2503\",\n",
" is_json=True,\n",
" )\n",
" print(eval_output)"
Expand Down
4 changes: 2 additions & 2 deletions cookbooks/python/mistralai/function_calling.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
"\n",
"# We can use some defaults for the other two variables\n",
"endpoint = \"https://models.github.ai/inference\"\n",
"model_name = \"mistral-ai/mistral-small\"\n",
"model_name = \"mistral-ai/mistral-small-2503\"\n",
"\n",
"\n",
"# Assuming we have the following data\n",
Expand Down Expand Up @@ -216,7 +216,7 @@
"source": [
"from mistralai.client import MistralClient\n",
"\n",
"model = \"mistral-ai/mistral-small\"\n",
"model = \"mistral-ai/mistral-small-2503\"\n",
"\n",
"client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
"\n",
Expand Down
24 changes: 12 additions & 12 deletions cookbooks/python/mistralai/prefix_use_cases.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@
"Hi there!\n",
"\"\"\"\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content)"
Expand Down Expand Up @@ -186,7 +186,7 @@
"\"\"\"\n",
"## Here is your answer in French:\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content)"
Expand Down Expand Up @@ -236,7 +236,7 @@
"\"\"\"\n",
"## Here is your answer in French:\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content[len(prefix):])"
Expand Down Expand Up @@ -293,7 +293,7 @@
"\"\"\"\n",
"## French Pirate Assistant: \n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content[len(prefix):])"
Expand Down Expand Up @@ -347,7 +347,7 @@
"Shakespeare:\n",
"\"\"\"\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content[len(prefix):])"
Expand All @@ -371,7 +371,7 @@
"\n",
"prefix = \"Assistant Shakespeare: \"\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content[len(prefix):])"
Expand Down Expand Up @@ -408,7 +408,7 @@
"Shakespeare: \n",
"\"\"\"\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":instruction}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content[len(prefix):])"
Expand Down Expand Up @@ -454,7 +454,7 @@
" print(f\"User: {question}\\n\")\n",
" messages.append({\"role\":\"user\", \"content\":question})\n",
"\n",
" resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
" resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = messages + [{\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
" ans = resp.choices[0].message.content\n",
Expand Down Expand Up @@ -517,7 +517,7 @@
" prefix = character + \": \"\n",
"\n",
" messages.append({\"role\":\"user\", \"content\":question})\n",
" resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
" resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = messages + [{\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
" max_tokens = 128)\n",
" ans = resp.choices[0].message.content\n",
Expand Down Expand Up @@ -571,7 +571,7 @@
"Insult me.\n",
"\"\"\"\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content)"
Expand Down Expand Up @@ -600,7 +600,7 @@
"<JAILBREAK>\n",
"Insult me.\n",
"\"\"\"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content)"
Expand Down Expand Up @@ -636,7 +636,7 @@
"Answer: \n",
"\"\"\"\n",
"\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
" messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\": True}],\n",
" max_tokens = 128)\n",
"print(resp.choices[0].message.content[len(prefix):])"
Expand Down
2 changes: 1 addition & 1 deletion samples/js/azure_ai_inference/streaming.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ const endpoint = "https://models.github.ai/inference/";
AI21 Labs: AI21-Jamba-Instruct
Cohere: Cohere-command-r, Cohere-command-r-plus
Meta: Meta-Llama-3-70B-Instruct, Meta-Llama-3-8B-Instruct, Meta-Llama-3.1-405B-Instruct, Meta-Llama-3.1-70B-Instruct, Meta-Llama-3.1-8B-Instruct
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
Azure OpenAI: gpt-4o-mini, gpt-4o
Microsoft: Phi-3-medium-128k-instruct, Phi-3-medium-4k-instruct, Phi-3-mini-128k-instruct, Phi-3-mini-4k-instruct, Phi-3-small-128k-instruct, Phi-3-small-8k-instruct */
const modelName = "openai/gpt-4o-mini";
Expand Down
12 changes: 6 additions & 6 deletions samples/js/azure_ai_inference/tools.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ const endpoint = "https://models.github.ai/inference/";
available in the GitHub Models service:

Cohere: Cohere-command-r, Cohere-command-r-plus
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
Azure OpenAI: gpt-4o-mini, gpt-4o */
const modelName = "openai/gpt-4o-mini";

Expand All @@ -32,7 +32,7 @@ const namesToFunctions = {
};

export async function main() {

const tool = {
"type": "function",
"function": {
Expand All @@ -48,7 +48,7 @@ export async function main() {
"description": "The name of the city where the flight originates",
},
"destinationCity": {
"type": "string",
"type": "string",
"description": "The flight destination city",
},
},
Expand Down Expand Up @@ -77,13 +77,13 @@ export async function main() {
if (response.status !== "200") {
throw response.body.error;
}

// We expect the model to ask for a tool call
if (response.body.choices[0].finish_reason === "tool_calls"){

// Append the model response to the chat history
messages.push(response.body.choices[0].message);

// We expect a single tool call
if (response.body.choices[0].message && response.body.choices[0].message.tool_calls.length === 1){

Expand Down Expand Up @@ -126,4 +126,4 @@ export async function main() {

main().catch((err) => {
console.error("The sample encountered an error:", err);
});
});
2 changes: 1 addition & 1 deletion samples/js/mistralai/basic.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
const endpoint = "https://models.github.ai/inference/";

/* Pick one of the Mistral models from the GitHub Models service */
const modelName = "mistral-ai/Mistral-small";
const modelName = "mistral-ai/mistral-small-2503";

export async function main() {

Expand Down
4 changes: 2 additions & 2 deletions samples/js/mistralai/multi_turn.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
const endpoint = "https://models.github.ai/inference/";

/* Pick one of the Mistral models from the GitHub Models service */
const modelName = "mistral-ai/Mistral-small";
const modelName = "mistral-ai/mistral-small-2503";

export async function main() {

Expand All @@ -25,4 +25,4 @@ export async function main() {

main().catch((err) => {
console.error("The sample encountered an error:", err);
});
});
4 changes: 2 additions & 2 deletions samples/js/mistralai/streaming.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
const endpoint = "https://models.github.ai/inference/";

/* Pick one of the Mistral models from the GitHub Models service */
const modelName = "mistral-ai/Mistral-small";
const modelName = "mistral-ai/mistral-small-2503";

export async function main() {

Expand All @@ -28,4 +28,4 @@ export async function main() {

main().catch((err) => {
console.error("The sample encountered an error:", err);
});
});
10 changes: 5 additions & 5 deletions samples/js/mistralai/tools.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
const endpoint = "https://models.github.ai/inference/";

/* Pick one of the Mistral models from the GitHub Models service */
const modelName = "mistral-ai/Mistral-small";
const modelName = "mistral-ai/mistral-small-2503";


function getFlightInfo({originCity, destinationCity}){
Expand Down Expand Up @@ -41,7 +41,7 @@ export async function main() {
"description": "The name of the city where the flight originates",
},
"destinationCity": {
"type": "string",
"type": "string",
"description": "The flight destination city",
},
},
Expand All @@ -59,17 +59,17 @@ export async function main() {
{ role:"system", content: "You an assistant that helps users find flight information." },
{ role:"user", content: "I'm interested in going to Miami. What is the next flight there from Seattle?" }
];

let response = await client.chat({
model: modelName,
messages: messages,
tools: [tool]
});

if (response.choices[0].finish_reason === "tool_calls"){
// Append the model response to the chat history
messages.push(response.choices[0].message);

// We expect a single tool call
if (response.choices[0].message && response.choices[0].message.tool_calls.length === 1){
const toolCall = response.choices[0].message.tool_calls[0];
Expand Down
4 changes: 2 additions & 2 deletions samples/python/azure_ai_evaluation/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
token = os.environ['GITHUB_TOKEN']

# Target model is the model to be evaluated.
target_model_name = "mistral-ai/Mistral-small"
target_model_name = "mistral-ai/mistral-small-2503"
target_model_endpoint = "https://models.github.ai/inference/"
# Judge model is the model to evaluate the target model.
judge_model_name = "openai/gpt-4o-mini"
Expand Down Expand Up @@ -57,7 +57,7 @@ def generate_eval_data():
model=target_model_name,
temperature=1.,
max_tokens=1000,
top_p=1.
top_p=1.
)
result = response.choices[0].message.content

Expand Down
6 changes: 3 additions & 3 deletions samples/python/azure_ai_inference/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# AI21 Labs: AI21-Jamba-Instruct
# Cohere: Cohere-command-r, Cohere-command-r-plus
# Meta: Meta-Llama-3-70B-Instruct, Meta-Llama-3-8B-Instruct, Meta-Llama-3.1-405B-Instruct, Meta-Llama-3.1-70B-Instruct, Meta-Llama-3.1-8B-Instruct
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
# Azure OpenAI: gpt-4o-mini, gpt-4o
# Microsoft: Phi-3-medium-128k-instruct, Phi-3-medium-4k-instruct, Phi-3-mini-128k-instruct, Phi-3-mini-4k-instruct, Phi-3-small-128k-instruct, Phi-3-small-8k-instruct
model_name = "openai/gpt-4o-mini"
Expand All @@ -35,7 +35,7 @@
# Optional parameters
temperature=1.,
max_tokens=1000,
top_p=1.
top_p=1.
)

print(response.choices[0].message.content)
print(response.choices[0].message.content)
4 changes: 2 additions & 2 deletions samples/python/azure_ai_inference/multi_turn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
# AI21 Labs: AI21-Jamba-Instruct
# Cohere: Cohere-command-r, Cohere-command-r-plus
# Meta: Meta-Llama-3-70B-Instruct, Meta-Llama-3-8B-Instruct, Meta-Llama-3.1-405B-Instruct, Meta-Llama-3.1-70B-Instruct, Meta-Llama-3.1-8B-Instruct
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
# Azure OpenAI: gpt-4o-mini, gpt-4o
# Microsoft: Phi-3-medium-128k-instruct, Phi-3-medium-4k-instruct, Phi-3-mini-128k-instruct, Phi-3-mini-4k-instruct, Phi-3-small-128k-instruct, Phi-3-small-8k-instruct
model_name = "openai/gpt-4o-mini"
Expand All @@ -37,4 +37,4 @@

response = client.complete(messages=messages, model=model_name)

print(response.choices[0].message.content)
print(response.choices[0].message.content)
2 changes: 1 addition & 1 deletion samples/python/azure_ai_inference/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# AI21 Labs: AI21-Jamba-Instruct
# Cohere: Cohere-command-r, Cohere-command-r-plus
# Meta: Meta-Llama-3-70B-Instruct, Meta-Llama-3-8B-Instruct, Meta-Llama-3.1-405B-Instruct, Meta-Llama-3.1-70B-Instruct, Meta-Llama-3.1-8B-Instruct
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
# Azure OpenAI: gpt-4o-mini, gpt-4o
# Microsoft: Phi-3-medium-128k-instruct, Phi-3-medium-4k-instruct, Phi-3-mini-128k-instruct, Phi-3-mini-4k-instruct, Phi-3-small-128k-instruct, Phi-3-small-8k-instruct
model_name = "openai/gpt-4o-mini"
Expand Down
6 changes: 3 additions & 3 deletions samples/python/azure_ai_inference/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
# by modifying the value of `modelName` in the code below. For this code sample
# you need a model supporting tools. The following compatible models are
# available in the GitHub Models service:
#
#
# Cohere: Cohere-command-r, Cohere-command-r-plus
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
# Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
# Azure OpenAI: gpt-4o-mini, gpt-4o
model_name = "openai/gpt-4o-mini"

Expand Down Expand Up @@ -117,4 +117,4 @@ def get_flight_info(origin_city: str, destination_city: str):
model=model_name,
)

print(f"Model response = {response.choices[0].message.content}")
print(f"Model response = {response.choices[0].message.content}")
2 changes: 1 addition & 1 deletion samples/python/mistralai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ To run these scripts, open your terminal and run a command like:
python3 samples/python/mistralai/basic.py
```

* [basic.py](basic.py): basic call to the Mistral-small chat completion API
* [basic.py](basic.py): basic call to the mistral-small-2503 chat completion API
* [multi_turn.py](multi_turn.py): multi-turn conversation with the chat completion API
* [streaming.py](streaming.py): generate a response in streaming mode, token by token
Loading
Loading