LLM-Models / convert.sh
Arun Kumar Tiwary
Upload folder using huggingface_hub
82218a9 verified
raw
history blame
No virus
425 Bytes
python llm/llama.cpp/convert-hf-to-gguf.py /home/amd/workspace/Arun/data_dir/llamaCpp/ollama/models/meta-llama/Meta-Llama-3-70B --outtype f16 --outfile output/Meta-Llama-3-70B_fp16.bin
#python llm/llama.cpp/convert-hf-to-gguf.py models/meta-llama/llama3_model/ --outtype f16 --outfile output/Meta-Llama-3-8B-Instruct_fp16.bin
#python llm/llama.cpp/convert.py ./llama2_model --outtype f16 --outfile output/converted_f16.bin