s3nh commited on
Commit
519adf1
1 Parent(s): b0cf564

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -3
app.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import os
2
  import platform
3
  import random
@@ -13,11 +16,131 @@ from ctransformers import AutoModelForCausalLM
13
  from dl_hf_model import dl_hf_model
14
  from loguru import logger
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- LLM = AutoModelForCausalLM.from_pretrained("s3nh/mamba-gpt-3b-GGML/mamba-gpt-3b.ggmlv3.q8_0.bin",
18
- model_type="llama")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  @dataclass
23
  class GenerationConfig:
@@ -336,4 +459,4 @@ else:
336
  concurrency_count = 1
337
  logger.info(f"{concurrency_count=}")
338
 
339
- block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
 
1
+ """Run codes."""
2
+ # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
3
+ # ruff: noqa: E501
4
  import os
5
  import platform
6
  import random
 
16
  from dl_hf_model import dl_hf_model
17
  from loguru import logger
18
 
19
+ filename_list = [
20
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin",
21
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin",
22
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
23
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
24
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
25
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
26
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
27
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
28
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
29
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
30
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
31
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
32
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
33
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
34
+ ]
35
+
36
+ URL = "https://huggingface.co/s3nh/mamba-gpt-3b-GGML/resolve/main/mamba-gpt-3b.ggmlv3.q8_0.bin" # 4.05G
37
+
38
+ _ = (
39
+ "golay" in platform.node()
40
+ or "okteto" in platform.node()
41
+ or Path("/kaggle").exists()
42
+ # or psutil.cpu_count(logical=False) < 4
43
+ or 1 # run 7b in hf
44
+ )
45
+
46
+ if _:
47
+ # url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
48
+ url = "https://huggingface.co/s3nh/mamba-gpt-3b-GGML/resolve/main/mamba-gpt-3b.ggmlv3.q8_0.bin" # 2.87G
49
+ # url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
50
+ # url = "https://huggingface.co/TheBloke/llama2_7b_chat_uncensored-GGML/blob/main/llama2_7b_chat_uncensored.ggmlv3.q4_K_M.bin" # 4.08G
51
+
52
+
53
+ prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
54
+
55
+ ### Instruction: {user_prompt}
56
+
57
+ ### Response:
58
+ """
59
+
60
+ prompt_template = """System: You are a helpful,
61
+ respectful and honest assistant. Always answer as
62
+ helpfully as possible, while being safe. Your answers
63
+ should not include any harmful, unethical, racist,
64
+ sexist, toxic, dangerous, or illegal content. Please
65
+ ensure that your responses are socially unbiased and
66
+ positive in nature. If a question does not make any
67
+ sense, or is not factually coherent, explain why instead
68
+ of answering something not correct. If you don't know
69
+ the answer to a question, please don't share false
70
+ information.
71
+ User: {prompt}
72
+ Assistant: """
73
+
74
+ prompt_template = """System: You are a helpful assistant.
75
+ User: {prompt}
76
+ Assistant: """
77
+
78
+ prompt_template = """Question: {question}
79
+ Answer: Let's work this out in a step by step way to be sure we have the right answer."""
80
+
81
+ prompt_template = """[INST] <>
82
+ You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
83
+ <>
84
+
85
+ What NFL team won the Super Bowl in the year Justin Bieber was born?
86
+ [/INST]"""
87
+
88
+ prompt_template = """[INST] <<SYS>>
89
+ You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <</SYS>>
90
+
91
+ {question} [/INST]
92
+ """
93
+
94
+ prompt_template = """[INST] <<SYS>>
95
+ You are a helpful assistant.
96
+ <</SYS>>
97
 
98
+ {question} [/INST]
99
+ """
100
+
101
+ prompt_template = """### HUMAN:
102
+ {question}
103
+
104
+ ### RESPONSE:"""
105
+
106
+ _ = [elm for elm in prompt_template.splitlines() if elm.strip()]
107
+ stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
108
+
109
+ logger.debug(f"{stop_string=} not used")
110
+
111
+ _ = psutil.cpu_count(logical=False) - 1
112
+ cpu_count: int = int(_) if _ else 1
113
+ logger.debug(f"{cpu_count=}")
114
 
115
+ LLM = None
116
 
117
+ try:
118
+ model_loc, file_size = dl_hf_model(url)
119
+ except Exception as exc_:
120
+ logger.error(exc_)
121
+ raise SystemExit(1) from exc_
122
+
123
+ LLM = AutoModelForCausalLM.from_pretrained(
124
+ model_loc,
125
+ model_type="llama",
126
+ # threads=cpu_count,
127
+ )
128
+
129
+ logger.info(f"done load llm {model_loc=} {file_size=}G")
130
+
131
+ os.environ["TZ"] = "Asia/Shanghai"
132
+ try:
133
+ time.tzset() # type: ignore # pylint: disable=no-member
134
+ except Exception:
135
+ # Windows
136
+ logger.warning("Windows, cant run time.tzset()")
137
+
138
+ _ = """
139
+ ns = SimpleNamespace(
140
+ response="",
141
+ generator=(_ for _ in []),
142
+ )
143
+ # """
144
 
145
  @dataclass
146
  class GenerationConfig:
 
459
  concurrency_count = 1
460
  logger.info(f"{concurrency_count=}")
461
 
462
+ block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)