Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -26,14 +26,14 @@ datasets:
26
 
27
  <img src="./calme_3.png" alt="Calme-3 Models" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
28
 
29
- # MaziyarPanahi/calme-3.1-instruct-3b
30
 
31
  This model is an advanced iteration of the powerful `Qwen/Qwen2.5-3B`, specifically fine-tuned to enhance its capabilities in generic domains.
32
 
33
 
34
  # ⚡ Quantized GGUF
35
 
36
- All GGUF models are available here: [MaziyarPanahi/calme-3.1-instruct-3b-GGUF](https://huggingface.co/MaziyarPanahi/calme-3.1-instruct-3b-GGUF)
37
 
38
 
39
  # 🏆 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
@@ -68,7 +68,7 @@ from transformers import pipeline
68
  messages = [
69
  {"role": "user", "content": "Who are you?"},
70
  ]
71
- pipe = pipeline("text-generation", model="MaziyarPanahi/calme-3.1-instruct-3b")
72
  pipe(messages)
73
 
74
 
@@ -76,8 +76,8 @@ pipe(messages)
76
 
77
  from transformers import AutoTokenizer, AutoModelForCausalLM
78
 
79
- tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/calme-3.1-instruct-3b")
80
- model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/calme-3.1-instruct-3b")
81
  ```
82
 
83
 
 
26
 
27
  <img src="./calme_3.png" alt="Calme-3 Models" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
28
 
29
+ # MaziyarPanahi/calme-3.1-baguette-3b
30
 
31
  This model is an advanced iteration of the powerful `Qwen/Qwen2.5-3B`, specifically fine-tuned to enhance its capabilities in generic domains.
32
 
33
 
34
  # ⚡ Quantized GGUF
35
 
36
+ All GGUF models are available here: [MaziyarPanahi/calme-3.1-baguette-3b-GGUF](https://huggingface.co/MaziyarPanahi/calme-3.1-baguette-3b-GGUF)
37
 
38
 
39
  # 🏆 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
 
68
  messages = [
69
  {"role": "user", "content": "Who are you?"},
70
  ]
71
+ pipe = pipeline("text-generation", model="MaziyarPanahi/calme-3.1-baguette-3b")
72
  pipe(messages)
73
 
74
 
 
76
 
77
  from transformers import AutoTokenizer, AutoModelForCausalLM
78
 
79
+ tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/calme-3.1-baguette-3b")
80
+ model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/calme-3.1-baguette-3b")
81
  ```
82
 
83