tinybiggames's picture
Update README.md
c04b11d verified
|
raw
history blame
2.25 kB
metadata
license: other
tags:
  - generated_from_trainer
  - axolotl
  - llama-cpp
  - gguf-my-repo
  - LMEngine
base_model: meta-llama/Meta-Llama-3-8B
datasets:
  - cognitivecomputations/Dolphin-2.9
  - teknium/OpenHermes-2.5
  - m-a-p/CodeFeedback-Filtered-Instruction
  - cognitivecomputations/dolphin-coder
  - cognitivecomputations/samantha-data
  - microsoft/orca-math-word-problems-200k
  - Locutusque/function-calling-chatml
  - internlm/Agent-FLAN
model-index:
  - name: out
    results: []

tinybiggames/dolphin-2.9.1-llama-3-8b-Q4_K_M-GGUF

This model was converted to GGUF format from cognitivecomputations/dolphin-2.9.1-llama-3-8b using llama.cpp via the ggml.ai's GGUF-my-repo space. Refer to the original model card for more details on the model.

Use with tinyBigGAMES's LMEngine Inference Library

How to configure LMEngine:

Config_Init(
 'C:/LLM/gguf', // path to model files
 -1             // number of GPU layer, -1 to use all available layers
);

How to define model:

Model_Define('dolphin-2.9.1-llama-3-8b.Q4_K_M.gguf',
  'dolphin-llama3:8B:Q4KM', 8000,
  '<|im_start|>{role}\n{content}<|im_end|>\n',
  '<|im_start|>assistant');

How to add a message:

Message_Add(
  ROLE_USER,    // role
 'What is AI?'  // content
);

{role} - will be substituted with the message "role"
{content} - will be substituted with the message "content"

How to do inference:

var
  LTokenOutputSpeed: Single;
  LInputTokens: Int32;
  LOutputTokens: Int32;
  LTotalTokens: Int32;
  
if Inference_Run('dolphin-llama3:8B:Q4KM', 1024) then
  begin
    Inference_GetUsage(nil, @LTokenOutputSpeed, @LInputTokens, @LOutputTokens,
      @LTotalTokens);
    Console_PrintLn('', FG_WHITE);
    Console_PrintLn('Tokens :: Input: %d, Output: %d, Total: %d, Speed: %3.1f t/s',
      FG_BRIGHTYELLOW, LInputTokens, LOutputTokens, LTotalTokens, LTokenOutputSpeed);
  end
else
  begin
    Console_PrintLn('', FG_WHITE);
    Console_PrintLn('Error: %s', FG_RED, Error_Get());
  end;