Robzy commited on
Commit
56659bb
·
1 Parent(s): 9f76391

local version done

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. app.py +19 -74
  3. local-requirements.txt +2 -1
  4. local.ipynb +575 -17
  5. requirements.txt +2 -2
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv
2
+ .venv2
app.py CHANGED
@@ -1,86 +1,31 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from unsloth import FastLanguageModel
4
- from transformers import TextStreamer
5
 
6
- model = 1
7
- tokenizer = 1
8
-
9
- FastLanguageModel.for_inference(model) # Enable native 2x faster inference
10
-
11
- messages = [
12
- {"role": "user", "content": "Continue the fibonnaci sequence: 1, 1, 2, 3, 5, 8,"},
13
- ]
14
- inputs = tokenizer.apply_chat_template(
15
- messages,
16
- tokenize = True,
17
- add_generation_prompt = True, # Must add for generation
18
- return_tensors = "pt",
19
- ).to("cuda")
20
-
21
-
22
- text_streamer = TextStreamer(tokenizer, skip_prompt = True)
23
- _ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 128,
24
- use_cache = True, temperature = 1.5, min_p = 0.1)
25
-
26
- """
27
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
28
- """
29
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
30
-
31
-
32
- def respond(
33
- message,
34
- history: list[tuple[str, str]],
35
- system_message,
36
- max_tokens,
37
- temperature,
38
- top_p,
39
- ):
40
- messages = [{"role": "system", "content": system_message}]
41
-
42
- for val in history:
43
- if val[0]:
44
- messages.append({"role": "user", "content": val[0]})
45
- if val[1]:
46
- messages.append({"role": "assistant", "content": val[1]})
47
 
 
 
 
 
 
 
 
48
  messages.append({"role": "user", "content": message})
49
-
50
  response = ""
51
-
52
- for message in client.chat_completion(
53
- messages,
54
- max_tokens=max_tokens,
55
  stream=True,
56
- temperature=temperature,
57
- top_p=top_p,
58
  ):
59
- token = message.choices[0].delta.content
60
-
61
- response += token
62
  yield response
63
 
64
-
65
- """
66
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
67
- """
68
- demo = gr.ChatInterface(
69
- respond,
70
- additional_inputs=[
71
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
72
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
73
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
74
- gr.Slider(
75
- minimum=0.1,
76
- maximum=1.0,
77
- value=0.95,
78
- step=0.05,
79
- label="Top-p (nucleus sampling)",
80
- ),
81
- ],
82
- )
83
-
84
 
85
  if __name__ == "__main__":
86
  demo.launch()
 
1
+ from llama_cpp import Llama
2
  import gradio as gr
 
 
 
3
 
4
+ llm = Llama.from_pretrained(
5
+ repo_id="Robzy/Llama-3.2-1B-Instruct-Finetuned-q4_k_m",
6
+ filename="unsloth.Q4_K_M.gguf",
7
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ def predict(message, history):
10
+ messages = [{"role": "system", "content": "You are a helpful assistant."}]
11
+ for user_message, bot_message in history:
12
+ if user_message:
13
+ messages.append({"role": "user", "content": user_message})
14
+ if bot_message:
15
+ messages.append({"role": "assistant", "content": bot_message})
16
  messages.append({"role": "user", "content": message})
17
+
18
  response = ""
19
+ for chunk in llm.create_chat_completion(
 
 
 
20
  stream=True,
21
+ messages=messages,
 
22
  ):
23
+ part = chunk["choices"][0]["delta"].get("content", None)
24
+ if part:
25
+ response += part
26
  yield response
27
 
28
+ demo = gr.ChatInterface(predict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  if __name__ == "__main__":
31
  demo.launch()
local-requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  huggingface_hub==0.25.2
2
  transformers
3
- ipykernel
 
 
1
  huggingface_hub==0.25.2
2
  transformers
3
+ ipykernel
4
+ llama-cpp-python
local.ipynb CHANGED
@@ -9,8 +9,7 @@
9
  "name": "stderr",
10
  "output_type": "stream",
11
  "text": [
12
- "/home/robert/Documents/scalable-ml/llm/.venv/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
- " from .autonotebook import tqdm as notebook_tqdm\n"
14
  ]
15
  }
16
  ],
@@ -20,33 +19,592 @@
20
  },
21
  {
22
  "cell_type": "code",
23
- "execution_count": 2,
24
  "metadata": {},
25
  "outputs": [
26
  {
27
- "ename": "ValueError",
28
- "evalue": "Unrecognized model in Robzy/lora_model. Should have a `model_type` key in its config.json, or contain one of the following strings in its name: albert, align, altclip, audio-spectrogram-transformer, autoformer, bark, bart, beit, bert, bert-generation, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot-small, blip, blip-2, bloom, bridgetower, bros, camembert, canine, chameleon, chinese_clip, chinese_clip_vision_model, clap, clip, clip_text_model, clip_vision_model, clipseg, clvp, code_llama, codegen, cohere, conditional_detr, convbert, convnext, convnextv2, cpmant, ctrl, cvt, dac, data2vec-audio, data2vec-text, data2vec-vision, dbrx, deberta, deberta-v2, decision_transformer, deformable_detr, deit, depth_anything, deta, detr, dinat, dinov2, distilbert, donut-swin, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder-decoder, ernie, ernie_m, esm, falcon, falcon_mamba, fastspeech2_conformer, flaubert, flava, fnet, focalnet, fsmt, funnel, fuyu, gemma, gemma2, git, glm, glpn, gpt-sw3, gpt2, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gptj, gptsan-japanese, granite, granitemoe, graphormer, grounding-dino, groupvit, hiera, hubert, ibert, idefics, idefics2, idefics3, imagegpt, informer, instructblip, instructblipvideo, jamba, jetmoe, jukebox, kosmos-2, layoutlm, layoutlmv2, layoutlmv3, led, levit, lilt, llama, llava, llava_next, llava_next_video, llava_onevision, longformer, longt5, luke, lxmert, m2m_100, mamba, mamba2, marian, markuplm, mask2former, maskformer, maskformer-swin, mbart, mctct, mega, megatron-bert, mgp-str, mimi, mistral, mixtral, mllama, mobilebert, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, moshi, mpnet, mpt, mra, mt5, musicgen, musicgen_melody, mvp, nat, nemotron, nezha, nllb-moe, nougat, nystromformer, olmo, olmoe, omdet-turbo, oneformer, open-llama, openai-gpt, opt, owlv2, owlvit, paligemma, patchtsmixer, patchtst, pegasus, pegasus_x, perceiver, persimmon, phi, phi3, phimoe, pix2struct, pixtral, plbart, poolformer, pop2piano, prophetnet, pvt, pvt_v2, qdqbert, qwen2, qwen2_audio, qwen2_audio_encoder, qwen2_moe, qwen2_vl, rag, realm, recurrent_gemma, reformer, regnet, rembert, resnet, retribert, roberta, roberta-prelayernorm, roc_bert, roformer, rt_detr, rt_detr_resnet, rwkv, sam, seamless_m4t, seamless_m4t_v2, segformer, seggpt, sew, sew-d, siglip, siglip_vision_model, speech-encoder-decoder, speech_to_text, speech_to_text_2, speecht5, splinter, squeezebert, stablelm, starcoder2, superpoint, swiftformer, swin, swin2sr, swinv2, switch_transformers, t5, table-transformer, tapas, time_series_transformer, timesformer, timm_backbone, trajectory_transformer, transfo-xl, trocr, tvlt, tvp, udop, umt5, unispeech, unispeech-sat, univnet, upernet, van, video_llava, videomae, vilt, vipllava, vision-encoder-decoder, vision-text-dual-encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vitdet, vitmatte, vits, vivit, wav2vec2, wav2vec2-bert, wav2vec2-conformer, wavlm, whisper, xclip, xglm, xlm, xlm-prophetnet, xlm-roberta, xlm-roberta-xl, xlnet, xmod, yolos, yoso, zamba, zoedepth",
29
- "output_type": "error",
30
- "traceback": [
31
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
32
- "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
33
- "Cell \u001b[0;32mIn[2], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Load model directly\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AutoModel\n\u001b[0;32m----> 3\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mAutoModel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mRobzy/lora_model\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
34
- "File \u001b[0;32m~/Documents/scalable-ml/llm/.venv/lib/python3.12/site-packages/transformers/models/auto/auto_factory.py:526\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 523\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 524\u001b[0m _ \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 526\u001b[0m config, kwargs \u001b[38;5;241m=\u001b[39m \u001b[43mAutoConfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 527\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 528\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_unused_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 529\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrust_remote_code\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrust_remote_code\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 530\u001b[0m \u001b[43m \u001b[49m\u001b[43mcode_revision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcode_revision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 531\u001b[0m \u001b[43m \u001b[49m\u001b[43m_commit_hash\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_hash\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 532\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mhub_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 533\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 534\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 536\u001b[0m \u001b[38;5;66;03m# if torch_dtype=auto was passed here, ensure to pass it on\u001b[39;00m\n\u001b[1;32m 537\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs_orig\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtorch_dtype\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
35
- "File \u001b[0;32m~/Documents/scalable-ml/llm/.venv/lib/python3.12/site-packages/transformers/models/auto/configuration_auto.py:1049\u001b[0m, in \u001b[0;36mAutoConfig.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[1;32m 1046\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pattern \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(pretrained_model_name_or_path):\n\u001b[1;32m 1047\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m CONFIG_MAPPING[pattern]\u001b[38;5;241m.\u001b[39mfrom_dict(config_dict, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39munused_kwargs)\n\u001b[0;32m-> 1049\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 1050\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnrecognized model in \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpretrained_model_name_or_path\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1051\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mShould have a `model_type` key in its \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mCONFIG_NAME\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, or contain one of the following strings \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1052\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124min its name: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(CONFIG_MAPPING\u001b[38;5;241m.\u001b[39mkeys())\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1053\u001b[0m )\n",
36
- "\u001b[0;31mValueError\u001b[0m: Unrecognized model in Robzy/lora_model. Should have a `model_type` key in its config.json, or contain one of the following strings in its name: albert, align, altclip, audio-spectrogram-transformer, autoformer, bark, bart, beit, bert, bert-generation, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot-small, blip, blip-2, bloom, bridgetower, bros, camembert, canine, chameleon, chinese_clip, chinese_clip_vision_model, clap, clip, clip_text_model, clip_vision_model, clipseg, clvp, code_llama, codegen, cohere, conditional_detr, convbert, convnext, convnextv2, cpmant, ctrl, cvt, dac, data2vec-audio, data2vec-text, data2vec-vision, dbrx, deberta, deberta-v2, decision_transformer, deformable_detr, deit, depth_anything, deta, detr, dinat, dinov2, distilbert, donut-swin, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder-decoder, ernie, ernie_m, esm, falcon, falcon_mamba, fastspeech2_conformer, flaubert, flava, fnet, focalnet, fsmt, funnel, fuyu, gemma, gemma2, git, glm, glpn, gpt-sw3, gpt2, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gptj, gptsan-japanese, granite, granitemoe, graphormer, grounding-dino, groupvit, hiera, hubert, ibert, idefics, idefics2, idefics3, imagegpt, informer, instructblip, instructblipvideo, jamba, jetmoe, jukebox, kosmos-2, layoutlm, layoutlmv2, layoutlmv3, led, levit, lilt, llama, llava, llava_next, llava_next_video, llava_onevision, longformer, longt5, luke, lxmert, m2m_100, mamba, mamba2, marian, markuplm, mask2former, maskformer, maskformer-swin, mbart, mctct, mega, megatron-bert, mgp-str, mimi, mistral, mixtral, mllama, mobilebert, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, moshi, mpnet, mpt, mra, mt5, musicgen, musicgen_melody, mvp, nat, nemotron, nezha, nllb-moe, nougat, nystromformer, olmo, olmoe, omdet-turbo, oneformer, open-llama, openai-gpt, opt, owlv2, owlvit, paligemma, patchtsmixer, patchtst, pegasus, pegasus_x, perceiver, persimmon, phi, phi3, phimoe, pix2struct, pixtral, plbart, poolformer, pop2piano, prophetnet, pvt, pvt_v2, qdqbert, qwen2, qwen2_audio, qwen2_audio_encoder, qwen2_moe, qwen2_vl, rag, realm, recurrent_gemma, reformer, regnet, rembert, resnet, retribert, roberta, roberta-prelayernorm, roc_bert, roformer, rt_detr, rt_detr_resnet, rwkv, sam, seamless_m4t, seamless_m4t_v2, segformer, seggpt, sew, sew-d, siglip, siglip_vision_model, speech-encoder-decoder, speech_to_text, speech_to_text_2, speecht5, splinter, squeezebert, stablelm, starcoder2, superpoint, swiftformer, swin, swin2sr, swinv2, switch_transformers, t5, table-transformer, tapas, time_series_transformer, timesformer, timm_backbone, trajectory_transformer, transfo-xl, trocr, tvlt, tvp, udop, umt5, unispeech, unispeech-sat, univnet, upernet, van, video_llava, videomae, vilt, vipllava, vision-encoder-decoder, vision-text-dual-encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vitdet, vitmatte, vits, vivit, wav2vec2, wav2vec2-bert, wav2vec2-conformer, wavlm, whisper, xclip, xglm, xlm, xlm-prophetnet, xlm-roberta, xlm-roberta-xl, xlnet, xmod, yolos, yoso, zamba, zoedepth"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  ]
38
  }
39
  ],
40
  "source": [
41
- "# Load model directly\n",
42
- "from transformers import AutoModel\n",
43
- "model = AutoModel.from_pretrained(\"Robzy/lora_model\")"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  ]
45
  }
46
  ],
47
  "metadata": {
48
  "kernelspec": {
49
- "display_name": ".venv",
50
  "language": "python",
51
  "name": "python3"
52
  },
 
9
  "name": "stderr",
10
  "output_type": "stream",
11
  "text": [
12
+ "None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n"
 
13
  ]
14
  }
15
  ],
 
19
  },
20
  {
21
  "cell_type": "code",
22
+ "execution_count": 3,
23
  "metadata": {},
24
  "outputs": [
25
  {
26
+ "data": {
27
+ "application/vnd.jupyter.widget-view+json": {
28
+ "model_id": "c94c88bacc2c48cb8ce50e93d73e15eb",
29
+ "version_major": 2,
30
+ "version_minor": 0
31
+ },
32
+ "text/plain": [
33
+ "unsloth.Q4_K_M.gguf: 0%| | 0.00/808M [00:00<?, ?B/s]"
34
+ ]
35
+ },
36
+ "metadata": {},
37
+ "output_type": "display_data"
38
+ },
39
+ {
40
+ "name": "stderr",
41
+ "output_type": "stream",
42
+ "text": [
43
+ "llama_model_loader: loaded meta data with 30 key-value pairs and 147 tensors from /home/robert/.cache/huggingface/hub/models--Robzy--Llama-3.2-1B-Instruct-Finetuned-q4_k_m/snapshots/49dc2f37761bb04ce3513b70087676029ccd4f20/./unsloth.Q4_K_M.gguf (version GGUF V3 (latest))\n",
44
+ "llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
45
+ "llama_model_loader: - kv 0: general.architecture str = llama\n",
46
+ "llama_model_loader: - kv 1: general.type str = model\n",
47
+ "llama_model_loader: - kv 2: general.name str = Llama 3.2 1b Instruct Bnb 4bit\n",
48
+ "llama_model_loader: - kv 3: general.organization str = Unsloth\n",
49
+ "llama_model_loader: - kv 4: general.finetune str = instruct-bnb-4bit\n",
50
+ "llama_model_loader: - kv 5: general.basename str = llama-3.2\n",
51
+ "llama_model_loader: - kv 6: general.size_label str = 1B\n",
52
+ "llama_model_loader: - kv 7: llama.block_count u32 = 16\n",
53
+ "llama_model_loader: - kv 8: llama.context_length u32 = 131072\n",
54
+ "llama_model_loader: - kv 9: llama.embedding_length u32 = 2048\n",
55
+ "llama_model_loader: - kv 10: llama.feed_forward_length u32 = 8192\n",
56
+ "llama_model_loader: - kv 11: llama.attention.head_count u32 = 32\n",
57
+ "llama_model_loader: - kv 12: llama.attention.head_count_kv u32 = 8\n",
58
+ "llama_model_loader: - kv 13: llama.rope.freq_base f32 = 500000.000000\n",
59
+ "llama_model_loader: - kv 14: llama.attention.layer_norm_rms_epsilon f32 = 0.000010\n",
60
+ "llama_model_loader: - kv 15: llama.attention.key_length u32 = 64\n",
61
+ "llama_model_loader: - kv 16: llama.attention.value_length u32 = 64\n",
62
+ "llama_model_loader: - kv 17: general.file_type u32 = 15\n",
63
+ "llama_model_loader: - kv 18: llama.vocab_size u32 = 128256\n",
64
+ "llama_model_loader: - kv 19: llama.rope.dimension_count u32 = 64\n",
65
+ "llama_model_loader: - kv 20: tokenizer.ggml.model str = gpt2\n",
66
+ "llama_model_loader: - kv 21: tokenizer.ggml.pre str = llama-bpe\n",
67
+ "llama_model_loader: - kv 22: tokenizer.ggml.tokens arr[str,128256] = [\"!\", \"\\\"\", \"#\", \"$\", \"%\", \"&\", \"'\", ...\n",
68
+ "llama_model_loader: - kv 23: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...\n",
69
+ "llama_model_loader: - kv 24: tokenizer.ggml.merges arr[str,280147] = [\"Ġ Ġ\", \"Ġ ĠĠĠ\", \"ĠĠ ĠĠ\", \"...\n",
70
+ "llama_model_loader: - kv 25: tokenizer.ggml.bos_token_id u32 = 128000\n",
71
+ "llama_model_loader: - kv 26: tokenizer.ggml.eos_token_id u32 = 128009\n",
72
+ "llama_model_loader: - kv 27: tokenizer.ggml.padding_token_id u32 = 128004\n",
73
+ "llama_model_loader: - kv 28: tokenizer.chat_template str = {{- bos_token }}\\n{%- if custom_tools ...\n",
74
+ "llama_model_loader: - kv 29: general.quantization_version u32 = 2\n",
75
+ "llama_model_loader: - type f32: 34 tensors\n",
76
+ "llama_model_loader: - type q4_K: 96 tensors\n",
77
+ "llama_model_loader: - type q6_K: 17 tensors\n",
78
+ "llm_load_vocab: control token: 128254 '<|reserved_special_token_246|>' is not marked as EOG\n",
79
+ "llm_load_vocab: control token: 128249 '<|reserved_special_token_241|>' is not marked as EOG\n",
80
+ "llm_load_vocab: control token: 128246 '<|reserved_special_token_238|>' is not marked as EOG\n",
81
+ "llm_load_vocab: control token: 128243 '<|reserved_special_token_235|>' is not marked as EOG\n",
82
+ "llm_load_vocab: control token: 128242 '<|reserved_special_token_234|>' is not marked as EOG\n",
83
+ "llm_load_vocab: control token: 128241 '<|reserved_special_token_233|>' is not marked as EOG\n",
84
+ "llm_load_vocab: control token: 128240 '<|reserved_special_token_232|>' is not marked as EOG\n",
85
+ "llm_load_vocab: control token: 128235 '<|reserved_special_token_227|>' is not marked as EOG\n",
86
+ "llm_load_vocab: control token: 128231 '<|reserved_special_token_223|>' is not marked as EOG\n",
87
+ "llm_load_vocab: control token: 128230 '<|reserved_special_token_222|>' is not marked as EOG\n",
88
+ "llm_load_vocab: control token: 128228 '<|reserved_special_token_220|>' is not marked as EOG\n",
89
+ "llm_load_vocab: control token: 128225 '<|reserved_special_token_217|>' is not marked as EOG\n",
90
+ "llm_load_vocab: control token: 128218 '<|reserved_special_token_210|>' is not marked as EOG\n",
91
+ "llm_load_vocab: control token: 128214 '<|reserved_special_token_206|>' is not marked as EOG\n",
92
+ "llm_load_vocab: control token: 128213 '<|reserved_special_token_205|>' is not marked as EOG\n",
93
+ "llm_load_vocab: control token: 128207 '<|reserved_special_token_199|>' is not marked as EOG\n",
94
+ "llm_load_vocab: control token: 128206 '<|reserved_special_token_198|>' is not marked as EOG\n",
95
+ "llm_load_vocab: control token: 128204 '<|reserved_special_token_196|>' is not marked as EOG\n",
96
+ "llm_load_vocab: control token: 128200 '<|reserved_special_token_192|>' is not marked as EOG\n",
97
+ "llm_load_vocab: control token: 128199 '<|reserved_special_token_191|>' is not marked as EOG\n",
98
+ "llm_load_vocab: control token: 128198 '<|reserved_special_token_190|>' is not marked as EOG\n",
99
+ "llm_load_vocab: control token: 128196 '<|reserved_special_token_188|>' is not marked as EOG\n",
100
+ "llm_load_vocab: control token: 128194 '<|reserved_special_token_186|>' is not marked as EOG\n",
101
+ "llm_load_vocab: control token: 128193 '<|reserved_special_token_185|>' is not marked as EOG\n",
102
+ "llm_load_vocab: control token: 128188 '<|reserved_special_token_180|>' is not marked as EOG\n",
103
+ "llm_load_vocab: control token: 128187 '<|reserved_special_token_179|>' is not marked as EOG\n",
104
+ "llm_load_vocab: control token: 128185 '<|reserved_special_token_177|>' is not marked as EOG\n",
105
+ "llm_load_vocab: control token: 128184 '<|reserved_special_token_176|>' is not marked as EOG\n",
106
+ "llm_load_vocab: control token: 128180 '<|reserved_special_token_172|>' is not marked as EOG\n",
107
+ "llm_load_vocab: control token: 128179 '<|reserved_special_token_171|>' is not marked as EOG\n",
108
+ "llm_load_vocab: control token: 128178 '<|reserved_special_token_170|>' is not marked as EOG\n",
109
+ "llm_load_vocab: control token: 128177 '<|reserved_special_token_169|>' is not marked as EOG\n",
110
+ "llm_load_vocab: control token: 128176 '<|reserved_special_token_168|>' is not marked as EOG\n",
111
+ "llm_load_vocab: control token: 128175 '<|reserved_special_token_167|>' is not marked as EOG\n",
112
+ "llm_load_vocab: control token: 128171 '<|reserved_special_token_163|>' is not marked as EOG\n",
113
+ "llm_load_vocab: control token: 128170 '<|reserved_special_token_162|>' is not marked as EOG\n",
114
+ "llm_load_vocab: control token: 128169 '<|reserved_special_token_161|>' is not marked as EOG\n",
115
+ "llm_load_vocab: control token: 128168 '<|reserved_special_token_160|>' is not marked as EOG\n",
116
+ "llm_load_vocab: control token: 128165 '<|reserved_special_token_157|>' is not marked as EOG\n",
117
+ "llm_load_vocab: control token: 128162 '<|reserved_special_token_154|>' is not marked as EOG\n",
118
+ "llm_load_vocab: control token: 128158 '<|reserved_special_token_150|>' is not marked as EOG\n",
119
+ "llm_load_vocab: control token: 128156 '<|reserved_special_token_148|>' is not marked as EOG\n",
120
+ "llm_load_vocab: control token: 128155 '<|reserved_special_token_147|>' is not marked as EOG\n",
121
+ "llm_load_vocab: control token: 128154 '<|reserved_special_token_146|>' is not marked as EOG\n",
122
+ "llm_load_vocab: control token: 128151 '<|reserved_special_token_143|>' is not marked as EOG\n",
123
+ "llm_load_vocab: control token: 128149 '<|reserved_special_token_141|>' is not marked as EOG\n",
124
+ "llm_load_vocab: control token: 128147 '<|reserved_special_token_139|>' is not marked as EOG\n",
125
+ "llm_load_vocab: control token: 128146 '<|reserved_special_token_138|>' is not marked as EOG\n",
126
+ "llm_load_vocab: control token: 128144 '<|reserved_special_token_136|>' is not marked as EOG\n",
127
+ "llm_load_vocab: control token: 128142 '<|reserved_special_token_134|>' is not marked as EOG\n",
128
+ "llm_load_vocab: control token: 128141 '<|reserved_special_token_133|>' is not marked as EOG\n",
129
+ "llm_load_vocab: control token: 128138 '<|reserved_special_token_130|>' is not marked as EOG\n",
130
+ "llm_load_vocab: control token: 128136 '<|reserved_special_token_128|>' is not marked as EOG\n",
131
+ "llm_load_vocab: control token: 128135 '<|reserved_special_token_127|>' is not marked as EOG\n",
132
+ "llm_load_vocab: control token: 128134 '<|reserved_special_token_126|>' is not marked as EOG\n",
133
+ "llm_load_vocab: control token: 128133 '<|reserved_special_token_125|>' is not marked as EOG\n",
134
+ "llm_load_vocab: control token: 128131 '<|reserved_special_token_123|>' is not marked as EOG\n",
135
+ "llm_load_vocab: control token: 128128 '<|reserved_special_token_120|>' is not marked as EOG\n",
136
+ "llm_load_vocab: control token: 128124 '<|reserved_special_token_116|>' is not marked as EOG\n",
137
+ "llm_load_vocab: control token: 128123 '<|reserved_special_token_115|>' is not marked as EOG\n",
138
+ "llm_load_vocab: control token: 128122 '<|reserved_special_token_114|>' is not marked as EOG\n",
139
+ "llm_load_vocab: control token: 128119 '<|reserved_special_token_111|>' is not marked as EOG\n",
140
+ "llm_load_vocab: control token: 128115 '<|reserved_special_token_107|>' is not marked as EOG\n",
141
+ "llm_load_vocab: control token: 128112 '<|reserved_special_token_104|>' is not marked as EOG\n",
142
+ "llm_load_vocab: control token: 128110 '<|reserved_special_token_102|>' is not marked as EOG\n",
143
+ "llm_load_vocab: control token: 128109 '<|reserved_special_token_101|>' is not marked as EOG\n",
144
+ "llm_load_vocab: control token: 128108 '<|reserved_special_token_100|>' is not marked as EOG\n",
145
+ "llm_load_vocab: control token: 128106 '<|reserved_special_token_98|>' is not marked as EOG\n",
146
+ "llm_load_vocab: control token: 128103 '<|reserved_special_token_95|>' is not marked as EOG\n",
147
+ "llm_load_vocab: control token: 128102 '<|reserved_special_token_94|>' is not marked as EOG\n",
148
+ "llm_load_vocab: control token: 128101 '<|reserved_special_token_93|>' is not marked as EOG\n",
149
+ "llm_load_vocab: control token: 128097 '<|reserved_special_token_89|>' is not marked as EOG\n",
150
+ "llm_load_vocab: control token: 128091 '<|reserved_special_token_83|>' is not marked as EOG\n",
151
+ "llm_load_vocab: control token: 128090 '<|reserved_special_token_82|>' is not marked as EOG\n",
152
+ "llm_load_vocab: control token: 128089 '<|reserved_special_token_81|>' is not marked as EOG\n",
153
+ "llm_load_vocab: control token: 128087 '<|reserved_special_token_79|>' is not marked as EOG\n",
154
+ "llm_load_vocab: control token: 128085 '<|reserved_special_token_77|>' is not marked as EOG\n",
155
+ "llm_load_vocab: control token: 128081 '<|reserved_special_token_73|>' is not marked as EOG\n",
156
+ "llm_load_vocab: control token: 128078 '<|reserved_special_token_70|>' is not marked as EOG\n",
157
+ "llm_load_vocab: control token: 128076 '<|reserved_special_token_68|>' is not marked as EOG\n",
158
+ "llm_load_vocab: control token: 128075 '<|reserved_special_token_67|>' is not marked as EOG\n",
159
+ "llm_load_vocab: control token: 128073 '<|reserved_special_token_65|>' is not marked as EOG\n",
160
+ "llm_load_vocab: control token: 128068 '<|reserved_special_token_60|>' is not marked as EOG\n",
161
+ "llm_load_vocab: control token: 128067 '<|reserved_special_token_59|>' is not marked as EOG\n",
162
+ "llm_load_vocab: control token: 128065 '<|reserved_special_token_57|>' is not marked as EOG\n",
163
+ "llm_load_vocab: control token: 128063 '<|reserved_special_token_55|>' is not marked as EOG\n",
164
+ "llm_load_vocab: control token: 128062 '<|reserved_special_token_54|>' is not marked as EOG\n",
165
+ "llm_load_vocab: control token: 128060 '<|reserved_special_token_52|>' is not marked as EOG\n",
166
+ "llm_load_vocab: control token: 128059 '<|reserved_special_token_51|>' is not marked as EOG\n",
167
+ "llm_load_vocab: control token: 128057 '<|reserved_special_token_49|>' is not marked as EOG\n",
168
+ "llm_load_vocab: control token: 128054 '<|reserved_special_token_46|>' is not marked as EOG\n",
169
+ "llm_load_vocab: control token: 128046 '<|reserved_special_token_38|>' is not marked as EOG\n",
170
+ "llm_load_vocab: control token: 128045 '<|reserved_special_token_37|>' is not marked as EOG\n",
171
+ "llm_load_vocab: control token: 128044 '<|reserved_special_token_36|>' is not marked as EOG\n",
172
+ "llm_load_vocab: control token: 128043 '<|reserved_special_token_35|>' is not marked as EOG\n",
173
+ "llm_load_vocab: control token: 128038 '<|reserved_special_token_30|>' is not marked as EOG\n",
174
+ "llm_load_vocab: control token: 128036 '<|reserved_special_token_28|>' is not marked as EOG\n",
175
+ "llm_load_vocab: control token: 128035 '<|reserved_special_token_27|>' is not marked as EOG\n",
176
+ "llm_load_vocab: control token: 128032 '<|reserved_special_token_24|>' is not marked as EOG\n",
177
+ "llm_load_vocab: control token: 128028 '<|reserved_special_token_20|>' is not marked as EOG\n",
178
+ "llm_load_vocab: control token: 128027 '<|reserved_special_token_19|>' is not marked as EOG\n",
179
+ "llm_load_vocab: control token: 128024 '<|reserved_special_token_16|>' is not marked as EOG\n",
180
+ "llm_load_vocab: control token: 128023 '<|reserved_special_token_15|>' is not marked as EOG\n",
181
+ "llm_load_vocab: control token: 128022 '<|reserved_special_token_14|>' is not marked as EOG\n",
182
+ "llm_load_vocab: control token: 128021 '<|reserved_special_token_13|>' is not marked as EOG\n",
183
+ "llm_load_vocab: control token: 128018 '<|reserved_special_token_10|>' is not marked as EOG\n",
184
+ "llm_load_vocab: control token: 128016 '<|reserved_special_token_8|>' is not marked as EOG\n",
185
+ "llm_load_vocab: control token: 128015 '<|reserved_special_token_7|>' is not marked as EOG\n",
186
+ "llm_load_vocab: control token: 128013 '<|reserved_special_token_5|>' is not marked as EOG\n",
187
+ "llm_load_vocab: control token: 128011 '<|reserved_special_token_3|>' is not marked as EOG\n",
188
+ "llm_load_vocab: control token: 128005 '<|reserved_special_token_2|>' is not marked as EOG\n",
189
+ "llm_load_vocab: control token: 128004 '<|finetune_right_pad_id|>' is not marked as EOG\n",
190
+ "llm_load_vocab: control token: 128002 '<|reserved_special_token_0|>' is not marked as EOG\n",
191
+ "llm_load_vocab: control token: 128252 '<|reserved_special_token_244|>' is not marked as EOG\n",
192
+ "llm_load_vocab: control token: 128190 '<|reserved_special_token_182|>' is not marked as EOG\n",
193
+ "llm_load_vocab: control token: 128183 '<|reserved_special_token_175|>' is not marked as EOG\n",
194
+ "llm_load_vocab: control token: 128137 '<|reserved_special_token_129|>' is not marked as EOG\n",
195
+ "llm_load_vocab: control token: 128182 '<|reserved_special_token_174|>' is not marked as EOG\n",
196
+ "llm_load_vocab: control token: 128040 '<|reserved_special_token_32|>' is not marked as EOG\n",
197
+ "llm_load_vocab: control token: 128048 '<|reserved_special_token_40|>' is not marked as EOG\n",
198
+ "llm_load_vocab: control token: 128092 '<|reserved_special_token_84|>' is not marked as EOG\n",
199
+ "llm_load_vocab: control token: 128215 '<|reserved_special_token_207|>' is not marked as EOG\n",
200
+ "llm_load_vocab: control token: 128107 '<|reserved_special_token_99|>' is not marked as EOG\n",
201
+ "llm_load_vocab: control token: 128208 '<|reserved_special_token_200|>' is not marked as EOG\n",
202
+ "llm_load_vocab: control token: 128145 '<|reserved_special_token_137|>' is not marked as EOG\n",
203
+ "llm_load_vocab: control token: 128031 '<|reserved_special_token_23|>' is not marked as EOG\n",
204
+ "llm_load_vocab: control token: 128129 '<|reserved_special_token_121|>' is not marked as EOG\n",
205
+ "llm_load_vocab: control token: 128201 '<|reserved_special_token_193|>' is not marked as EOG\n",
206
+ "llm_load_vocab: control token: 128074 '<|reserved_special_token_66|>' is not marked as EOG\n",
207
+ "llm_load_vocab: control token: 128095 '<|reserved_special_token_87|>' is not marked as EOG\n",
208
+ "llm_load_vocab: control token: 128186 '<|reserved_special_token_178|>' is not marked as EOG\n",
209
+ "llm_load_vocab: control token: 128143 '<|reserved_special_token_135|>' is not marked as EOG\n",
210
+ "llm_load_vocab: control token: 128229 '<|reserved_special_token_221|>' is not marked as EOG\n",
211
+ "llm_load_vocab: control token: 128007 '<|end_header_id|>' is not marked as EOG\n",
212
+ "llm_load_vocab: control token: 128055 '<|reserved_special_token_47|>' is not marked as EOG\n",
213
+ "llm_load_vocab: control token: 128056 '<|reserved_special_token_48|>' is not marked as EOG\n",
214
+ "llm_load_vocab: control token: 128061 '<|reserved_special_token_53|>' is not marked as EOG\n",
215
+ "llm_load_vocab: control token: 128153 '<|reserved_special_token_145|>' is not marked as EOG\n",
216
+ "llm_load_vocab: control token: 128152 '<|reserved_special_token_144|>' is not marked as EOG\n",
217
+ "llm_load_vocab: control token: 128212 '<|reserved_special_token_204|>' is not marked as EOG\n",
218
+ "llm_load_vocab: control token: 128172 '<|reserved_special_token_164|>' is not marked as EOG\n",
219
+ "llm_load_vocab: control token: 128160 '<|reserved_special_token_152|>' is not marked as EOG\n",
220
+ "llm_load_vocab: control token: 128041 '<|reserved_special_token_33|>' is not marked as EOG\n",
221
+ "llm_load_vocab: control token: 128181 '<|reserved_special_token_173|>' is not marked as EOG\n",
222
+ "llm_load_vocab: control token: 128094 '<|reserved_special_token_86|>' is not marked as EOG\n",
223
+ "llm_load_vocab: control token: 128118 '<|reserved_special_token_110|>' is not marked as EOG\n",
224
+ "llm_load_vocab: control token: 128236 '<|reserved_special_token_228|>' is not marked as EOG\n",
225
+ "llm_load_vocab: control token: 128148 '<|reserved_special_token_140|>' is not marked as EOG\n",
226
+ "llm_load_vocab: control token: 128042 '<|reserved_special_token_34|>' is not marked as EOG\n",
227
+ "llm_load_vocab: control token: 128139 '<|reserved_special_token_131|>' is not marked as EOG\n",
228
+ "llm_load_vocab: control token: 128173 '<|reserved_special_token_165|>' is not marked as EOG\n",
229
+ "llm_load_vocab: control token: 128239 '<|reserved_special_token_231|>' is not marked as EOG\n",
230
+ "llm_load_vocab: control token: 128157 '<|reserved_special_token_149|>' is not marked as EOG\n",
231
+ "llm_load_vocab: control token: 128052 '<|reserved_special_token_44|>' is not marked as EOG\n",
232
+ "llm_load_vocab: control token: 128026 '<|reserved_special_token_18|>' is not marked as EOG\n",
233
+ "llm_load_vocab: control token: 128003 '<|reserved_special_token_1|>' is not marked as EOG\n",
234
+ "llm_load_vocab: control token: 128019 '<|reserved_special_token_11|>' is not marked as EOG\n",
235
+ "llm_load_vocab: control token: 128116 '<|reserved_special_token_108|>' is not marked as EOG\n",
236
+ "llm_load_vocab: control token: 128161 '<|reserved_special_token_153|>' is not marked as EOG\n",
237
+ "llm_load_vocab: control token: 128226 '<|reserved_special_token_218|>' is not marked as EOG\n",
238
+ "llm_load_vocab: control token: 128159 '<|reserved_special_token_151|>' is not marked as EOG\n",
239
+ "llm_load_vocab: control token: 128012 '<|reserved_special_token_4|>' is not marked as EOG\n",
240
+ "llm_load_vocab: control token: 128088 '<|reserved_special_token_80|>' is not marked as EOG\n",
241
+ "llm_load_vocab: control token: 128163 '<|reserved_special_token_155|>' is not marked as EOG\n",
242
+ "llm_load_vocab: control token: 128001 '<|end_of_text|>' is not marked as EOG\n",
243
+ "llm_load_vocab: control token: 128113 '<|reserved_special_token_105|>' is not marked as EOG\n",
244
+ "llm_load_vocab: control token: 128250 '<|reserved_special_token_242|>' is not marked as EOG\n",
245
+ "llm_load_vocab: control token: 128125 '<|reserved_special_token_117|>' is not marked as EOG\n",
246
+ "llm_load_vocab: control token: 128053 '<|reserved_special_token_45|>' is not marked as EOG\n",
247
+ "llm_load_vocab: control token: 128224 '<|reserved_special_token_216|>' is not marked as EOG\n",
248
+ "llm_load_vocab: control token: 128247 '<|reserved_special_token_239|>' is not marked as EOG\n",
249
+ "llm_load_vocab: control token: 128251 '<|reserved_special_token_243|>' is not marked as EOG\n",
250
+ "llm_load_vocab: control token: 128216 '<|reserved_special_token_208|>' is not marked as EOG\n",
251
+ "llm_load_vocab: control token: 128006 '<|start_header_id|>' is not marked as EOG\n",
252
+ "llm_load_vocab: control token: 128211 '<|reserved_special_token_203|>' is not marked as EOG\n",
253
+ "llm_load_vocab: control token: 128077 '<|reserved_special_token_69|>' is not marked as EOG\n",
254
+ "llm_load_vocab: control token: 128237 '<|reserved_special_token_229|>' is not marked as EOG\n",
255
+ "llm_load_vocab: control token: 128086 '<|reserved_special_token_78|>' is not marked as EOG\n",
256
+ "llm_load_vocab: control token: 128227 '<|reserved_special_token_219|>' is not marked as EOG\n",
257
+ "llm_load_vocab: control token: 128058 '<|reserved_special_token_50|>' is not marked as EOG\n",
258
+ "llm_load_vocab: control token: 128100 '<|reserved_special_token_92|>' is not marked as EOG\n",
259
+ "llm_load_vocab: control token: 128209 '<|reserved_special_token_201|>' is not marked as EOG\n",
260
+ "llm_load_vocab: control token: 128084 '<|reserved_special_token_76|>' is not marked as EOG\n",
261
+ "llm_load_vocab: control token: 128071 '<|reserved_special_token_63|>' is not marked as EOG\n",
262
+ "llm_load_vocab: control token: 128070 '<|reserved_special_token_62|>' is not marked as EOG\n",
263
+ "llm_load_vocab: control token: 128049 '<|reserved_special_token_41|>' is not marked as EOG\n",
264
+ "llm_load_vocab: control token: 128197 '<|reserved_special_token_189|>' is not marked as EOG\n",
265
+ "llm_load_vocab: control token: 128072 '<|reserved_special_token_64|>' is not marked as EOG\n",
266
+ "llm_load_vocab: control token: 128000 '<|begin_of_text|>' is not marked as EOG\n",
267
+ "llm_load_vocab: control token: 128223 '<|reserved_special_token_215|>' is not marked as EOG\n",
268
+ "llm_load_vocab: control token: 128217 '<|reserved_special_token_209|>' is not marked as EOG\n",
269
+ "llm_load_vocab: control token: 128111 '<|reserved_special_token_103|>' is not marked as EOG\n",
270
+ "llm_load_vocab: control token: 128203 '<|reserved_special_token_195|>' is not marked as EOG\n",
271
+ "llm_load_vocab: control token: 128051 '<|reserved_special_token_43|>' is not marked as EOG\n",
272
+ "llm_load_vocab: control token: 128030 '<|reserved_special_token_22|>' is not marked as EOG\n",
273
+ "llm_load_vocab: control token: 128117 '<|reserved_special_token_109|>' is not marked as EOG\n",
274
+ "llm_load_vocab: control token: 128010 '<|python_tag|>' is not marked as EOG\n",
275
+ "llm_load_vocab: control token: 128238 '<|reserved_special_token_230|>' is not marked as EOG\n",
276
+ "llm_load_vocab: control token: 128255 '<|reserved_special_token_247|>' is not marked as EOG\n",
277
+ "llm_load_vocab: control token: 128202 '<|reserved_special_token_194|>' is not marked as EOG\n",
278
+ "llm_load_vocab: control token: 128132 '<|reserved_special_token_124|>' is not marked as EOG\n",
279
+ "llm_load_vocab: control token: 128248 '<|reserved_special_token_240|>' is not marked as EOG\n",
280
+ "llm_load_vocab: control token: 128167 '<|reserved_special_token_159|>' is not marked as EOG\n",
281
+ "llm_load_vocab: control token: 128127 '<|reserved_special_token_119|>' is not marked as EOG\n",
282
+ "llm_load_vocab: control token: 128105 '<|reserved_special_token_97|>' is not marked as EOG\n",
283
+ "llm_load_vocab: control token: 128039 '<|reserved_special_token_31|>' is not marked as EOG\n",
284
+ "llm_load_vocab: control token: 128232 '<|reserved_special_token_224|>' is not marked as EOG\n",
285
+ "llm_load_vocab: control token: 128166 '<|reserved_special_token_158|>' is not marked as EOG\n",
286
+ "llm_load_vocab: control token: 128130 '<|reserved_special_token_122|>' is not marked as EOG\n",
287
+ "llm_load_vocab: control token: 128114 '<|reserved_special_token_106|>' is not marked as EOG\n",
288
+ "llm_load_vocab: control token: 128234 '<|reserved_special_token_226|>' is not marked as EOG\n",
289
+ "llm_load_vocab: control token: 128191 '<|reserved_special_token_183|>' is not marked as EOG\n",
290
+ "llm_load_vocab: control token: 128064 '<|reserved_special_token_56|>' is not marked as EOG\n",
291
+ "llm_load_vocab: control token: 128140 '<|reserved_special_token_132|>' is not marked as EOG\n",
292
+ "llm_load_vocab: control token: 128096 '<|reserved_special_token_88|>' is not marked as EOG\n",
293
+ "llm_load_vocab: control token: 128098 '<|reserved_special_token_90|>' is not marked as EOG\n",
294
+ "llm_load_vocab: control token: 128192 '<|reserved_special_token_184|>' is not marked as EOG\n",
295
+ "llm_load_vocab: control token: 128093 '<|reserved_special_token_85|>' is not marked as EOG\n",
296
+ "llm_load_vocab: control token: 128150 '<|reserved_special_token_142|>' is not marked as EOG\n",
297
+ "llm_load_vocab: control token: 128222 '<|reserved_special_token_214|>' is not marked as EOG\n",
298
+ "llm_load_vocab: control token: 128233 '<|reserved_special_token_225|>' is not marked as EOG\n",
299
+ "llm_load_vocab: control token: 128220 '<|reserved_special_token_212|>' is not marked as EOG\n",
300
+ "llm_load_vocab: control token: 128034 '<|reserved_special_token_26|>' is not marked as EOG\n",
301
+ "llm_load_vocab: control token: 128033 '<|reserved_special_token_25|>' is not marked as EOG\n",
302
+ "llm_load_vocab: control token: 128253 '<|reserved_special_token_245|>' is not marked as EOG\n",
303
+ "llm_load_vocab: control token: 128195 '<|reserved_special_token_187|>' is not marked as EOG\n",
304
+ "llm_load_vocab: control token: 128099 '<|reserved_special_token_91|>' is not marked as EOG\n",
305
+ "llm_load_vocab: control token: 128189 '<|reserved_special_token_181|>' is not marked as EOG\n",
306
+ "llm_load_vocab: control token: 128210 '<|reserved_special_token_202|>' is not marked as EOG\n",
307
+ "llm_load_vocab: control token: 128174 '<|reserved_special_token_166|>' is not marked as EOG\n",
308
+ "llm_load_vocab: control token: 128083 '<|reserved_special_token_75|>' is not marked as EOG\n",
309
+ "llm_load_vocab: control token: 128080 '<|reserved_special_token_72|>' is not marked as EOG\n",
310
+ "llm_load_vocab: control token: 128104 '<|reserved_special_token_96|>' is not marked as EOG\n",
311
+ "llm_load_vocab: control token: 128082 '<|reserved_special_token_74|>' is not marked as EOG\n",
312
+ "llm_load_vocab: control token: 128219 '<|reserved_special_token_211|>' is not marked as EOG\n",
313
+ "llm_load_vocab: control token: 128017 '<|reserved_special_token_9|>' is not marked as EOG\n",
314
+ "llm_load_vocab: control token: 128050 '<|reserved_special_token_42|>' is not marked as EOG\n",
315
+ "llm_load_vocab: control token: 128205 '<|reserved_special_token_197|>' is not marked as EOG\n",
316
+ "llm_load_vocab: control token: 128047 '<|reserved_special_token_39|>' is not marked as EOG\n",
317
+ "llm_load_vocab: control token: 128164 '<|reserved_special_token_156|>' is not marked as EOG\n",
318
+ "llm_load_vocab: control token: 128020 '<|reserved_special_token_12|>' is not marked as EOG\n",
319
+ "llm_load_vocab: control token: 128069 '<|reserved_special_token_61|>' is not marked as EOG\n",
320
+ "llm_load_vocab: control token: 128245 '<|reserved_special_token_237|>' is not marked as EOG\n",
321
+ "llm_load_vocab: control token: 128121 '<|reserved_special_token_113|>' is not marked as EOG\n",
322
+ "llm_load_vocab: control token: 128079 '<|reserved_special_token_71|>' is not marked as EOG\n",
323
+ "llm_load_vocab: control token: 128037 '<|reserved_special_token_29|>' is not marked as EOG\n",
324
+ "llm_load_vocab: control token: 128244 '<|reserved_special_token_236|>' is not marked as EOG\n",
325
+ "llm_load_vocab: control token: 128029 '<|reserved_special_token_21|>' is not marked as EOG\n",
326
+ "llm_load_vocab: control token: 128221 '<|reserved_special_token_213|>' is not marked as EOG\n",
327
+ "llm_load_vocab: control token: 128066 '<|reserved_special_token_58|>' is not marked as EOG\n",
328
+ "llm_load_vocab: control token: 128120 '<|reserved_special_token_112|>' is not marked as EOG\n",
329
+ "llm_load_vocab: control token: 128014 '<|reserved_special_token_6|>' is not marked as EOG\n",
330
+ "llm_load_vocab: control token: 128025 '<|reserved_special_token_17|>' is not marked as EOG\n",
331
+ "llm_load_vocab: control token: 128126 '<|reserved_special_token_118|>' is not marked as EOG\n",
332
+ "llm_load_vocab: special tokens cache size = 256\n",
333
+ "llm_load_vocab: token to piece cache size = 0.7999 MB\n",
334
+ "llm_load_print_meta: format = GGUF V3 (latest)\n",
335
+ "llm_load_print_meta: arch = llama\n",
336
+ "llm_load_print_meta: vocab type = BPE\n",
337
+ "llm_load_print_meta: n_vocab = 128256\n",
338
+ "llm_load_print_meta: n_merges = 280147\n",
339
+ "llm_load_print_meta: vocab_only = 0\n",
340
+ "llm_load_print_meta: n_ctx_train = 131072\n",
341
+ "llm_load_print_meta: n_embd = 2048\n",
342
+ "llm_load_print_meta: n_layer = 16\n",
343
+ "llm_load_print_meta: n_head = 32\n",
344
+ "llm_load_print_meta: n_head_kv = 8\n",
345
+ "llm_load_print_meta: n_rot = 64\n",
346
+ "llm_load_print_meta: n_swa = 0\n",
347
+ "llm_load_print_meta: n_embd_head_k = 64\n",
348
+ "llm_load_print_meta: n_embd_head_v = 64\n",
349
+ "llm_load_print_meta: n_gqa = 4\n",
350
+ "llm_load_print_meta: n_embd_k_gqa = 512\n",
351
+ "llm_load_print_meta: n_embd_v_gqa = 512\n",
352
+ "llm_load_print_meta: f_norm_eps = 0.0e+00\n",
353
+ "llm_load_print_meta: f_norm_rms_eps = 1.0e-05\n",
354
+ "llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
355
+ "llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
356
+ "llm_load_print_meta: f_logit_scale = 0.0e+00\n",
357
+ "llm_load_print_meta: n_ff = 8192\n",
358
+ "llm_load_print_meta: n_expert = 0\n",
359
+ "llm_load_print_meta: n_expert_used = 0\n",
360
+ "llm_load_print_meta: causal attn = 1\n",
361
+ "llm_load_print_meta: pooling type = 0\n",
362
+ "llm_load_print_meta: rope type = 0\n",
363
+ "llm_load_print_meta: rope scaling = linear\n",
364
+ "llm_load_print_meta: freq_base_train = 500000.0\n",
365
+ "llm_load_print_meta: freq_scale_train = 1\n",
366
+ "llm_load_print_meta: n_ctx_orig_yarn = 131072\n",
367
+ "llm_load_print_meta: rope_finetuned = unknown\n",
368
+ "llm_load_print_meta: ssm_d_conv = 0\n",
369
+ "llm_load_print_meta: ssm_d_inner = 0\n",
370
+ "llm_load_print_meta: ssm_d_state = 0\n",
371
+ "llm_load_print_meta: ssm_dt_rank = 0\n",
372
+ "llm_load_print_meta: ssm_dt_b_c_rms = 0\n",
373
+ "llm_load_print_meta: model type = 1B\n",
374
+ "llm_load_print_meta: model ftype = Q4_K - Medium\n",
375
+ "llm_load_print_meta: model params = 1.24 B\n",
376
+ "llm_load_print_meta: model size = 762.81 MiB (5.18 BPW) \n",
377
+ "llm_load_print_meta: general.name = Llama 3.2 1b Instruct Bnb 4bit\n",
378
+ "llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>'\n",
379
+ "llm_load_print_meta: EOS token = 128009 '<|eot_id|>'\n",
380
+ "llm_load_print_meta: EOT token = 128009 '<|eot_id|>'\n",
381
+ "llm_load_print_meta: EOM token = 128008 '<|eom_id|>'\n",
382
+ "llm_load_print_meta: PAD token = 128004 '<|finetune_right_pad_id|>'\n",
383
+ "llm_load_print_meta: LF token = 128 'Ä'\n",
384
+ "llm_load_print_meta: EOG token = 128008 '<|eom_id|>'\n",
385
+ "llm_load_print_meta: EOG token = 128009 '<|eot_id|>'\n",
386
+ "llm_load_print_meta: max token length = 256\n",
387
+ "llm_load_tensors: tensor 'token_embd.weight' (q6_K) (and 162 others) cannot be used with preferred buffer type CPU_AARCH64, using CPU instead\n",
388
+ "llm_load_tensors: CPU_Mapped model buffer size = 762.81 MiB\n",
389
+ "............................................................\n",
390
+ "llama_new_context_with_model: n_seq_max = 1\n",
391
+ "llama_new_context_with_model: n_ctx = 512\n",
392
+ "llama_new_context_with_model: n_ctx_per_seq = 512\n",
393
+ "llama_new_context_with_model: n_batch = 512\n",
394
+ "llama_new_context_with_model: n_ubatch = 512\n",
395
+ "llama_new_context_with_model: flash_attn = 0\n",
396
+ "llama_new_context_with_model: freq_base = 500000.0\n",
397
+ "llama_new_context_with_model: freq_scale = 1\n",
398
+ "llama_new_context_with_model: n_ctx_per_seq (512) < n_ctx_train (131072) -- the full capacity of the model will not be utilized\n",
399
+ "llama_kv_cache_init: CPU KV buffer size = 16.00 MiB\n",
400
+ "llama_new_context_with_model: KV self size = 16.00 MiB, K (f16): 8.00 MiB, V (f16): 8.00 MiB\n",
401
+ "llama_new_context_with_model: CPU output buffer size = 0.49 MiB\n",
402
+ "llama_new_context_with_model: CPU compute buffer size = 254.50 MiB\n",
403
+ "llama_new_context_with_model: graph nodes = 518\n",
404
+ "llama_new_context_with_model: graph splits = 1\n",
405
+ "AVX = 1 | AVX_VNNI = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | AMX_INT8 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | \n",
406
+ "Model metadata: {'tokenizer.chat_template': '{{- bos_token }}\\n{%- if custom_tools is defined %}\\n {%- set tools = custom_tools %}\\n{%- endif %}\\n{%- if not tools_in_user_message is defined %}\\n {%- set tools_in_user_message = true %}\\n{%- endif %}\\n{%- if not date_string is defined %}\\n {%- set date_string = \"26 July 2024\" %}\\n{%- endif %}\\n{%- if not tools is defined %}\\n {%- set tools = none %}\\n{%- endif %}\\n\\n{#- This block extracts the system message, so we can slot it into the right place. #}\\n{%- if messages[0][\\'role\\'] == \\'system\\' %}\\n {%- set system_message = messages[0][\\'content\\'] %}\\n {%- set messages = messages[1:] %}\\n{%- else %}\\n {%- set system_message = \"\" %}\\n{%- endif %}\\n\\n{#- System message + builtin tools #}\\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\\n{%- if builtin_tools is defined or tools is not none %}\\n {{- \"Environment: ipython\\n\" }}\\n{%- endif %}\\n{%- if builtin_tools is defined %}\\n {{- \"Tools: \" + builtin_tools | reject(\\'equalto\\', \\'code_interpreter\\') | join(\", \") + \"\\n\\n\"}}\\n{%- endif %}\\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\\n{%- if tools is not none and not tools_in_user_message %}\\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\\n {{- \\'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.\\' }}\\n {{- \"Do not use variables.\\n\\n\" }}\\n {%- for t in tools %}\\n {{- t | tojson(indent=4) }}\\n {{- \"\\n\\n\" }}\\n {%- endfor %}\\n{%- endif %}\\n{{- system_message }}\\n{{- \"<|eot_id|>\" }}\\n\\n{#- Custom tools are passed in a user message with some extra guidance #}\\n{%- if tools_in_user_message and not tools is none %}\\n {#- Extract the first user message so we can plug it in here #}\\n {%- if messages | length != 0 %}\\n {%- set first_user_message = messages[0][\\'content\\'] %}\\n {%- set messages = messages[1:] %}\\n {%- else %}\\n {{- raise_exception(\"Cannot put tools in the first user message when there\\'s no first user message!\") }}\\n{%- endif %}\\n {{- \\'<|start_header_id|>user<|end_header_id|>\\n\\n\\' -}}\\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\\n {{- \\'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.\\' }}\\n {{- \"Do not use variables.\\n\\n\" }}\\n {%- for t in tools %}\\n {{- t | tojson(indent=4) }}\\n {{- \"\\n\\n\" }}\\n {%- endfor %}\\n {{- first_user_message + \"<|eot_id|>\"}}\\n{%- endif %}\\n\\n{%- for message in messages %}\\n {%- if not (message.role == \\'ipython\\' or message.role == \\'tool\\' or \\'tool_calls\\' in message) %}\\n {{- \\'<|start_header_id|>\\' + message[\\'role\\'] + \\'<|end_header_id|>\\n\\n\\'+ message[\\'content\\'] + \\'<|eot_id|>\\' }}\\n {%- elif \\'tool_calls\\' in message %}\\n {%- if not message.tool_calls|length == 1 %}\\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\\n {%- endif %}\\n {%- set tool_call = message.tool_calls[0].function %}\\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\\n {{- \\'<|start_header_id|>assistant<|end_header_id|>\\n\\n\\' -}}\\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\\n {%- for arg_name, arg_val in tool_call.arguments | items %}\\n {{- arg_name + \\'=\"\\' + arg_val + \\'\"\\' }}\\n {%- if not loop.last %}\\n {{- \", \" }}\\n {%- endif %}\\n {%- endfor %}\\n {{- \")\" }}\\n {%- else %}\\n {{- \\'<|start_header_id|>assistant<|end_header_id|>\\n\\n\\' -}}\\n {{- \\'{\"name\": \"\\' + tool_call.name + \\'\", \\' }}\\n {{- \\'\"parameters\": \\' }}\\n {{- tool_call.arguments | tojson }}\\n {{- \"}\" }}\\n {%- endif %}\\n {%- if builtin_tools is defined %}\\n {#- This means we\\'re in ipython mode #}\\n {{- \"<|eom_id|>\" }}\\n {%- else %}\\n {{- \"<|eot_id|>\" }}\\n {%- endif %}\\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\\n {%- if message.content is mapping or message.content is iterable %}\\n {{- message.content | tojson }}\\n {%- else %}\\n {{- message.content }}\\n {%- endif %}\\n {{- \"<|eot_id|>\" }}\\n {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n {{- \\'<|start_header_id|>assistant<|end_header_id|>\\n\\n\\' }}\\n{%- endif %}\\n', 'tokenizer.ggml.eos_token_id': '128009', 'general.quantization_version': '2', 'tokenizer.ggml.model': 'gpt2', 'llama.rope.dimension_count': '64', 'llama.vocab_size': '128256', 'general.file_type': '15', 'llama.attention.value_length': '64', 'llama.attention.key_length': '64', 'llama.attention.layer_norm_rms_epsilon': '0.000010', 'llama.rope.freq_base': '500000.000000', 'general.architecture': 'llama', 'tokenizer.ggml.padding_token_id': '128004', 'general.basename': 'llama-3.2', 'tokenizer.ggml.bos_token_id': '128000', 'llama.attention.head_count': '32', 'tokenizer.ggml.pre': 'llama-bpe', 'llama.context_length': '131072', 'general.name': 'Llama 3.2 1b Instruct Bnb 4bit', 'general.organization': 'Unsloth', 'general.finetune': 'instruct-bnb-4bit', 'general.type': 'model', 'general.size_label': '1B', 'llama.embedding_length': '2048', 'llama.feed_forward_length': '8192', 'llama.block_count': '16', 'llama.attention.head_count_kv': '8'}\n",
407
+ "Available chat formats from metadata: chat_template.default\n",
408
+ "Using gguf chat template: {{- bos_token }}\n",
409
+ "{%- if custom_tools is defined %}\n",
410
+ " {%- set tools = custom_tools %}\n",
411
+ "{%- endif %}\n",
412
+ "{%- if not tools_in_user_message is defined %}\n",
413
+ " {%- set tools_in_user_message = true %}\n",
414
+ "{%- endif %}\n",
415
+ "{%- if not date_string is defined %}\n",
416
+ " {%- set date_string = \"26 July 2024\" %}\n",
417
+ "{%- endif %}\n",
418
+ "{%- if not tools is defined %}\n",
419
+ " {%- set tools = none %}\n",
420
+ "{%- endif %}\n",
421
+ "\n",
422
+ "{#- This block extracts the system message, so we can slot it into the right place. #}\n",
423
+ "{%- if messages[0]['role'] == 'system' %}\n",
424
+ " {%- set system_message = messages[0]['content'] %}\n",
425
+ " {%- set messages = messages[1:] %}\n",
426
+ "{%- else %}\n",
427
+ " {%- set system_message = \"\" %}\n",
428
+ "{%- endif %}\n",
429
+ "\n",
430
+ "{#- System message + builtin tools #}\n",
431
+ "{{- \"<|start_header_id|>system<|end_header_id|>\n",
432
+ "\n",
433
+ "\" }}\n",
434
+ "{%- if builtin_tools is defined or tools is not none %}\n",
435
+ " {{- \"Environment: ipython\n",
436
+ "\" }}\n",
437
+ "{%- endif %}\n",
438
+ "{%- if builtin_tools is defined %}\n",
439
+ " {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\n",
440
+ "\n",
441
+ "\"}}\n",
442
+ "{%- endif %}\n",
443
+ "{{- \"Cutting Knowledge Date: December 2023\n",
444
+ "\" }}\n",
445
+ "{{- \"Today Date: \" + date_string + \"\n",
446
+ "\n",
447
+ "\" }}\n",
448
+ "{%- if tools is not none and not tools_in_user_message %}\n",
449
+ " {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n",
450
+ " {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n",
451
+ " {{- \"Do not use variables.\n",
452
+ "\n",
453
+ "\" }}\n",
454
+ " {%- for t in tools %}\n",
455
+ " {{- t | tojson(indent=4) }}\n",
456
+ " {{- \"\n",
457
+ "\n",
458
+ "\" }}\n",
459
+ " {%- endfor %}\n",
460
+ "{%- endif %}\n",
461
+ "{{- system_message }}\n",
462
+ "{{- \"<|eot_id|>\" }}\n",
463
+ "\n",
464
+ "{#- Custom tools are passed in a user message with some extra guidance #}\n",
465
+ "{%- if tools_in_user_message and not tools is none %}\n",
466
+ " {#- Extract the first user message so we can plug it in here #}\n",
467
+ " {%- if messages | length != 0 %}\n",
468
+ " {%- set first_user_message = messages[0]['content'] %}\n",
469
+ " {%- set messages = messages[1:] %}\n",
470
+ " {%- else %}\n",
471
+ " {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n",
472
+ "{%- endif %}\n",
473
+ " {{- '<|start_header_id|>user<|end_header_id|>\n",
474
+ "\n",
475
+ "' -}}\n",
476
+ " {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n",
477
+ " {{- \"with its proper arguments that best answers the given prompt.\n",
478
+ "\n",
479
+ "\" }}\n",
480
+ " {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n",
481
+ " {{- \"Do not use variables.\n",
482
+ "\n",
483
+ "\" }}\n",
484
+ " {%- for t in tools %}\n",
485
+ " {{- t | tojson(indent=4) }}\n",
486
+ " {{- \"\n",
487
+ "\n",
488
+ "\" }}\n",
489
+ " {%- endfor %}\n",
490
+ " {{- first_user_message + \"<|eot_id|>\"}}\n",
491
+ "{%- endif %}\n",
492
+ "\n",
493
+ "{%- for message in messages %}\n",
494
+ " {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n",
495
+ " {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n",
496
+ "\n",
497
+ "'+ message['content'] + '<|eot_id|>' }}\n",
498
+ " {%- elif 'tool_calls' in message %}\n",
499
+ " {%- if not message.tool_calls|length == 1 %}\n",
500
+ " {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n",
501
+ " {%- endif %}\n",
502
+ " {%- set tool_call = message.tool_calls[0].function %}\n",
503
+ " {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n",
504
+ " {{- '<|start_header_id|>assistant<|end_header_id|>\n",
505
+ "\n",
506
+ "' -}}\n",
507
+ " {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n",
508
+ " {%- for arg_name, arg_val in tool_call.arguments | items %}\n",
509
+ " {{- arg_name + '=\"' + arg_val + '\"' }}\n",
510
+ " {%- if not loop.last %}\n",
511
+ " {{- \", \" }}\n",
512
+ " {%- endif %}\n",
513
+ " {%- endfor %}\n",
514
+ " {{- \")\" }}\n",
515
+ " {%- else %}\n",
516
+ " {{- '<|start_header_id|>assistant<|end_header_id|>\n",
517
+ "\n",
518
+ "' -}}\n",
519
+ " {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n",
520
+ " {{- '\"parameters\": ' }}\n",
521
+ " {{- tool_call.arguments | tojson }}\n",
522
+ " {{- \"}\" }}\n",
523
+ " {%- endif %}\n",
524
+ " {%- if builtin_tools is defined %}\n",
525
+ " {#- This means we're in ipython mode #}\n",
526
+ " {{- \"<|eom_id|>\" }}\n",
527
+ " {%- else %}\n",
528
+ " {{- \"<|eot_id|>\" }}\n",
529
+ " {%- endif %}\n",
530
+ " {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n",
531
+ " {{- \"<|start_header_id|>ipython<|end_header_id|>\n",
532
+ "\n",
533
+ "\" }}\n",
534
+ " {%- if message.content is mapping or message.content is iterable %}\n",
535
+ " {{- message.content | tojson }}\n",
536
+ " {%- else %}\n",
537
+ " {{- message.content }}\n",
538
+ " {%- endif %}\n",
539
+ " {{- \"<|eot_id|>\" }}\n",
540
+ " {%- endif %}\n",
541
+ "{%- endfor %}\n",
542
+ "{%- if add_generation_prompt %}\n",
543
+ " {{- '<|start_header_id|>assistant<|end_header_id|>\n",
544
+ "\n",
545
+ "' }}\n",
546
+ "{%- endif %}\n",
547
+ "\n",
548
+ "Using chat eos_token: <|eot_id|>\n",
549
+ "Using chat bos_token: <|begin_of_text|>\n"
550
  ]
551
  }
552
  ],
553
  "source": [
554
+ "from llama_cpp import Llama\n",
555
+ "\n",
556
+ "llm = Llama.from_pretrained(\n",
557
+ "\trepo_id=\"Robzy/Llama-3.2-1B-Instruct-Finetuned-q4_k_m\",\n",
558
+ "\tfilename=\"unsloth.Q4_K_M.gguf\",\n",
559
+ ")"
560
+ ]
561
+ },
562
+ {
563
+ "cell_type": "code",
564
+ "execution_count": 20,
565
+ "metadata": {},
566
+ "outputs": [
567
+ {
568
+ "name": "stderr",
569
+ "output_type": "stream",
570
+ "text": [
571
+ "llama_perf_context_print: load time = 414.62 ms\n",
572
+ "llama_perf_context_print: prompt eval time = 0.00 ms / 45 tokens ( 0.00 ms per token, inf tokens per second)\n",
573
+ "llama_perf_context_print: eval time = 0.00 ms / 288 runs ( 0.00 ms per token, inf tokens per second)\n",
574
+ "llama_perf_context_print: total time = 8736.94 ms / 333 tokens\n"
575
+ ]
576
+ },
577
+ {
578
+ "data": {
579
+ "text/plain": [
580
+ "{'id': 'chatcmpl-7b3f051b-3008-4c34-afb4-da527e07904c',\n",
581
+ " 'object': 'chat.completion',\n",
582
+ " 'created': 1733027296,\n",
583
+ " 'model': '/home/robert/.cache/huggingface/hub/models--Robzy--Llama-3.2-1B-Instruct-Finetuned-q4_k_m/snapshots/49dc2f37761bb04ce3513b70087676029ccd4f20/./unsloth.Q4_K_M.gguf',\n",
584
+ " 'choices': [{'index': 0,\n",
585
+ " 'message': {'role': 'assistant',\n",
586
+ " 'content': \"The tower is a prominent landmark in the capital of France, standing tall and proud in the heart of the city. It is a grandiose structure, with a sleek and modern design that reflects the country's rich history and architectural heritage. The tower is adorned with intricate details and ornate carvings, adding to its majestic appearance.\\n\\nThe tower is a marvel of engineering, with a sturdy foundation that allows it to stand tall for centuries. Its height is impressive, with a grand staircase that winds its way up to the top of the tower. The staircase is lined with elegant railings, providing a comfortable and safe path for visitors to ascend.\\n\\nThe tower is also home to a museum, showcasing a vast collection of art and artifacts from French history. The museum is a treasure trove of knowledge, with exhibits on everything from the Renaissance to the modern era. Visitors can explore the exhibits, learning about the country's rich cultural heritage.\\n\\nThe tower is a popular destination for tourists and locals alike, offering a unique and unforgettable experience. Visitors can take a guided tour of the tower, learning about its history and significance. The tower is also a popular spot for weddings and other special events, making it a beloved landmark in the city.\\n\\nOverall, the tower is a stunning and iconic landmark that reflects the best of French culture and architecture. Its grandeur and beauty make it a must-visit destination for anyone traveling to the capital of France.\"},\n",
587
+ " 'logprobs': None,\n",
588
+ " 'finish_reason': 'stop'}],\n",
589
+ " 'usage': {'prompt_tokens': 45, 'completion_tokens': 288, 'total_tokens': 333}}"
590
+ ]
591
+ },
592
+ "execution_count": 20,
593
+ "metadata": {},
594
+ "output_type": "execute_result"
595
+ }
596
+ ],
597
+ "source": [
598
+ "messages = [\n",
599
+ " {\"role\": \"user\", \"content\": \"Describe a tall tower in the capital of France.\"},\n",
600
+ "]\n",
601
+ "llm.create_chat_completion(messages)"
602
  ]
603
  }
604
  ],
605
  "metadata": {
606
  "kernelspec": {
607
+ "display_name": ".venv2",
608
  "language": "python",
609
  "name": "python3"
610
  },
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  huggingface_hub==0.25.2
2
- unsloth
3
- transformers
 
1
  huggingface_hub==0.25.2
2
+ gradio
3
+ llama-cpp-python