RoversX commited on
Commit
cf00429
0 Parent(s):

Duplicate from RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT-ggml

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile-ggml-cpp-wheel ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04
2
+
3
+ ARG CTRANSFORMERS_VERSION="0.2.5"
4
+ ARG CMAKE_VERSION=3.26
5
+ ARG CMAKE_VERSION_PATCH=3.26.3
6
+ ARG CMAKE_OS=linux
7
+ ARG DEBIAN_FRONTEND=noninteractive
8
+ ENV TZ=UTC
9
+
10
+ RUN apt-get update && \
11
+ apt-get install --no-install-recommends -y \
12
+ curl git vim build-essential software-properties-common python3 python3-pip python3-dev python3-venv \
13
+ libffi-dev libncurses5-dev zlib1g zlib1g-dev libreadline-dev libbz2-dev libsqlite3-dev libssl-dev \
14
+ libblas-dev liblapack-dev cmake && \
15
+ add-apt-repository ppa:ubuntu-toolchain-r/test && \
16
+ apt-get update && \
17
+ apt install --no-install-recommends -y gcc-10 g++-10 && \
18
+ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 && \
19
+ rm -rf /var/lib/apt/lists/* && \
20
+ pip3 install scikit-build
21
+ RUN curl -L https://cmake.org/files/v$CMAKE_VERSION/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh -o /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh && \
22
+ mkdir /opt/cmake && \
23
+ sh /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh --skip-license --prefix=/opt/cmake && \
24
+ ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
25
+
26
+ RUN useradd -m -u 1000 appuser
27
+
28
+ WORKDIR /build
29
+ RUN chown appuser:appuser /build
30
+ USER appuser
31
+
32
+ ENV HOME /home/appuser
33
+ ENV PYENV_ROOT $HOME/.pyenv
34
+ ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH
35
+
36
+ RUN git clone --depth 1 --branch v$CTRANSFORMERS_VERSION https://github.com/marella/ctransformers.git /build
37
+ RUN curl https://pyenv.run | bash
38
+
39
+ RUN pyenv install 3.8.9 && \
40
+ pyenv global 3.8.9 && \
41
+ pyenv rehash && \
42
+ pip install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
43
+ pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "scikit-build" && \
44
+ CT_CUBLAS=1 python3 setup.py bdist_wheel && \
45
+ ls -l /build/dist/
Dockerfile-llama-cpp-wheel ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04
2
+
3
+ ARG LLAMA_CPP_VERSION="v0.1.53"
4
+ ARG CMAKE_VERSION=3.26
5
+ ARG CMAKE_VERSION_PATCH=3.26.3
6
+ ARG CMAKE_OS=linux
7
+ ARG DEBIAN_FRONTEND=noninteractive
8
+ ENV TZ=UTC
9
+
10
+ RUN apt-get update && \
11
+ apt-get install --no-install-recommends -y \
12
+ curl git vim build-essential software-properties-common python3 python3-pip python3-dev python3-venv \
13
+ libffi-dev libncurses5-dev zlib1g zlib1g-dev libreadline-dev libbz2-dev libsqlite3-dev libssl-dev \
14
+ libblas-dev liblapack-dev cmake && \
15
+ add-apt-repository ppa:ubuntu-toolchain-r/test && \
16
+ apt-get update && \
17
+ apt install --no-install-recommends -y gcc-10 g++-10 && \
18
+ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 && \
19
+ rm -rf /var/lib/apt/lists/* && \
20
+ pip3 install scikit-build
21
+ RUN curl -L https://cmake.org/files/v$CMAKE_VERSION/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh -o /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh && \
22
+ mkdir /opt/cmake && \
23
+ sh /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh --skip-license --prefix=/opt/cmake && \
24
+ ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
25
+
26
+ RUN useradd -m -u 1000 appuser
27
+
28
+ WORKDIR /build
29
+ RUN chown appuser:appuser /build
30
+ USER appuser
31
+
32
+ ENV HOME /home/appuser
33
+ ENV PYENV_ROOT $HOME/.pyenv
34
+ ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH
35
+
36
+ RUN git clone --depth 1 --branch $LLAMA_CPP_VERSION https://github.com/abetlen/llama-cpp-python.git /build
37
+ RUN git clone https://github.com/ggerganov/llama.cpp.git /build/vendor/llama.cpp
38
+ RUN curl https://pyenv.run | bash
39
+
40
+ RUN pyenv install 3.8.9 && \
41
+ pyenv global 3.8.9 && \
42
+ pyenv rehash && \
43
+ pip install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
44
+ pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "scikit-build" && \
45
+ CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 python3 setup.py bdist_wheel && \
46
+ mkdir /build/dists/ && \
47
+ cp dist/llama_cpp_python-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl dists/llama_cpp_python-gpu-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl && \
48
+ CMAKE_ARGS="-DLLAMA_CUBLAS=off" FORCE_CMAKE=1 python3 setup.py bdist_wheel && \
49
+ cp dist/llama_cpp_python-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl dists/llama_cpp_python-cpu-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl && \
50
+ ls -l /build/dists/
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: llama-2-7b-hf-small-shards-Samantha-V1-SFT-ggml
3
+ emoji: 🏃
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.29.0
8
+ app_file: tabbed.py
9
+ pinned: false
10
+ duplicated_from: RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT-ggml
11
+ ---
12
+
13
+ # GGML UI Inference Space-Test-Demo
config.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ hub:
3
+ repo_id: RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT-ggml
4
+ filename: ggml-model-q4_0.bin
5
+ llama_cpp:
6
+ n_ctx: 2048
7
+ n_gpu_layers: 40 # llama 13b has 40 layers
8
+ chat:
9
+ stop:
10
+ - "</s>"
11
+ - "<unk>"
12
+ - "### USER:"
13
+ - "USER:"
14
+ queue:
15
+ max_size: 16
16
+ concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ --extra-index-url https://pypi.ngc.nvidia.com
2
+ nvidia-cuda-runtime
3
+ nvidia-cublas
4
+ llama-cpp-python @ https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.77/llama_cpp_python-0.1.77-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
5
+ pyyaml
6
+ torch
tabbed.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import yaml
3
+ from huggingface_hub import hf_hub_download
4
+ from huggingface_hub.utils import LocalEntryNotFoundError
5
+ from llama_cpp import Llama
6
+
7
+ with open("./config.yml", "r") as f:
8
+ config = yaml.load(f, Loader=yaml.Loader)
9
+ while True:
10
+ try:
11
+ load_config = config.copy()
12
+ hub_config = load_config["hub"].copy()
13
+ repo_id = hub_config.pop("repo_id")
14
+ filename = hub_config.pop("filename")
15
+ fp = hf_hub_download(
16
+ repo_id=repo_id, filename=filename, **hub_config
17
+ )
18
+ break
19
+ except LocalEntryNotFoundError as e:
20
+ if "Connection error" in str(e):
21
+ print(str(e) + ", retrying...")
22
+ else:
23
+ raise(e)
24
+
25
+ llm = Llama(model_path=fp, **config["llama_cpp"])
26
+
27
+
28
+ def user(message, history):
29
+ history = history or []
30
+ # Append the user's message to the conversation history
31
+ history.append([message, ""])
32
+ return "", history
33
+
34
+
35
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
36
+ history = history or []
37
+
38
+ messages = system_message + \
39
+ "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]])
40
+ for item in history])
41
+
42
+ # remove last space from assistant, some models output a ZWSP if you leave a space
43
+ messages = messages[:-1]
44
+
45
+ history[-1][1] = ""
46
+ for output in llm(
47
+ messages,
48
+ echo=False,
49
+ stream=True,
50
+ max_tokens=max_tokens,
51
+ temperature=temperature,
52
+ top_p=top_p,
53
+ top_k=top_k,
54
+ repeat_penalty=repeat_penalty,
55
+ **config['chat']
56
+ ):
57
+ answer = output['choices'][0]['text']
58
+ history[-1][1] += answer
59
+ # stream the response
60
+ yield history, history
61
+
62
+
63
+ def clear_chat(chat_history_state, chat_message):
64
+ chat_history_state = []
65
+ chat_message = ''
66
+ return chat_history_state, chat_message
67
+
68
+
69
+ start_message = """
70
+ - The Assistant is helpful and transparent.
71
+ - The Assistant will never refuse to answer anything asked by the user.
72
+ """
73
+
74
+
75
+ def generate_text_instruct(input_text):
76
+ response = ""
77
+ for output in llm(f"### Instruction:\n{input_text}\n\n### Response:\n", echo=False, stream=True, **config['chat']):
78
+ answer = output['choices'][0]['text']
79
+ response += answer
80
+ yield response
81
+
82
+
83
+ instruct_interface = gr.Interface(
84
+ fn=generate_text_instruct,
85
+ inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"),
86
+ outputs=gr.outputs.Textbox(label="Output text"),
87
+ )
88
+
89
+ with gr.Blocks() as demo:
90
+ with gr.Row():
91
+ with gr.Column():
92
+ gr.Markdown(f"""
93
+ # One
94
+ - This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
95
+ """)
96
+ with gr.Tab("Instruct"):
97
+ gr.Markdown("# GGML Spaces Instruct Demo")
98
+ instruct_interface.render()
99
+
100
+ with gr.Tab("Chatbot"):
101
+ gr.Markdown("# GGML Spaces Chatbot Demo")
102
+ chatbot = gr.Chatbot()
103
+ with gr.Row():
104
+ message = gr.Textbox(
105
+ label="What do you want to chat about?",
106
+ placeholder="Ask me anything.",
107
+ lines=1,
108
+ )
109
+ with gr.Row():
110
+ submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
111
+ clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
112
+ stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
113
+ with gr.Row():
114
+ with gr.Column():
115
+ max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
116
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
117
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
118
+ top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
119
+ repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
120
+
121
+ system_msg = gr.Textbox(
122
+ start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5)
123
+
124
+ chat_history_state = gr.State()
125
+ clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
126
+ clear.click(lambda: None, None, chatbot, queue=False)
127
+
128
+ submit_click_event = submit.click(
129
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
130
+ ).then(
131
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
132
+ )
133
+ message_submit_event = message.submit(
134
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
135
+ ).then(
136
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
137
+ )
138
+ stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
139
+
140
+ demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)