Arun Kumar Tiwary
commited on
Upload folder using huggingface_hub
Browse files- Meta-Llama-3-8B-Instruct_fp16.bin +3 -0
- convert.sh +1 -1
Meta-Llama-3-8B-Instruct_fp16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3d3bc5d62d889e622702a22675cfd765c2638037248c5afda74f1398cb0717d
|
3 |
+
size 16068890848
|
convert.sh
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
-
python llm/llama.cpp/convert-hf-to-gguf.py models/meta-llama/Meta-Llama-3-70B --outtype f16 --outfile output/Meta-Llama-3-70B_fp16.bin
|
2 |
#python llm/llama.cpp/convert-hf-to-gguf.py models/meta-llama/llama3_model/ --outtype f16 --outfile output/Meta-Llama-3-8B-Instruct_fp16.bin
|
3 |
#python llm/llama.cpp/convert.py ./llama2_model --outtype f16 --outfile output/converted_f16.bin
|
|
|
1 |
+
python llm/llama.cpp/convert-hf-to-gguf.py /home/amd/workspace/Arun/data_dir/llamaCpp/ollama/models/meta-llama/Meta-Llama-3-70B --outtype f16 --outfile output/Meta-Llama-3-70B_fp16.bin
|
2 |
#python llm/llama.cpp/convert-hf-to-gguf.py models/meta-llama/llama3_model/ --outtype f16 --outfile output/Meta-Llama-3-8B-Instruct_fp16.bin
|
3 |
#python llm/llama.cpp/convert.py ./llama2_model --outtype f16 --outfile output/converted_f16.bin
|