File size: 425 Bytes
82218a9
ae919d8
 
1
2
3
4
python llm/llama.cpp/convert-hf-to-gguf.py  /home/amd/workspace/Arun/data_dir/llamaCpp/ollama/models/meta-llama/Meta-Llama-3-70B --outtype f16 --outfile output/Meta-Llama-3-70B_fp16.bin
#python llm/llama.cpp/convert-hf-to-gguf.py  models/meta-llama/llama3_model/ --outtype f16 --outfile output/Meta-Llama-3-8B-Instruct_fp16.bin
#python llm/llama.cpp/convert.py ./llama2_model --outtype f16 --outfile output/converted_f16.bin