Spaces:
Sleeping
Sleeping
File size: 8,487 Bytes
4a2c956 fab2242 7c790c0 61b9ff7 59618c7 4643bb5 99d45f7 59618c7 7c790c0 3cb35fd 42e5f71 3cb35fd f787cc5 59618c7 3cb35fd 7c790c0 59618c7 3cb35fd a00d592 7c790c0 fab2242 5833996 607417a fab2242 5833996 fab2242 5833996 fab2242 5833996 fab2242 5833996 3cb35fd fab2242 3cb35fd 7c790c0 5833996 7c790c0 5833996 fab2242 5833996 3cb35fd ea2eccb fab2242 3cb35fd 7c790c0 fab2242 e63ee0a fab2242 5833996 29c4970 0b1be7c 5833996 0b1be7c fab2242 7c790c0 040cd58 5833996 040cd58 fab2242 040cd58 3cb35fd fab2242 7c790c0 3cb35fd e63ee0a 3cb35fd fab2242 3cb35fd fab2242 3cb35fd fab2242 3cb35fd fab2242 3cb35fd fab2242 7c790c0 59618c7 e63ee0a fab2242 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 |
import gradio as gr
import os, gc, copy, torch
from huggingface_hub import hf_hub_download
from pynvml import *
# Flag to check if GPU is present
HAS_GPU = False
# Model title and context size limit
ctx_limit = 2000
title = "RWKV-5-World-1B5-v2-Translator"
model_file = "RWKV-5-World-1B5-v2-20231025-ctx4096"
# Get the GPU count
try:
nvmlInit()
GPU_COUNT = nvmlDeviceGetCount()
if GPU_COUNT > 0:
HAS_GPU = True
gpu_h = nvmlDeviceGetHandleByIndex(0)
except NVMLError as error:
print(error)
os.environ["RWKV_JIT_ON"] = '1'
# Model strategy to use
MODEL_STRAT = "cpu bf16"
os.environ["RWKV_CUDA_ON"] = '0' # if '1' then use CUDA kernel for seq mode (much faster)
# Switch to GPU mode
if HAS_GPU:
os.environ["RWKV_CUDA_ON"] = '1'
MODEL_STRAT = "cuda bf16"
# Load the model
from rwkv.model import RWKV
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-5-world", filename=f"{model_file}.pth")
model = RWKV(model=model_path, strategy=MODEL_STRAT)
from rwkv.utils import PIPELINE
pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
# State copy
def universal_deepcopy(obj):
if hasattr(obj, 'clone'): # Assuming it's a tensor if it has a clone method
return obj.clone()
elif isinstance(obj, list):
return [universal_deepcopy(item) for item in obj]
else:
return copy.deepcopy(obj)
# For debgging mostly
def inspect_structure(obj, depth=0):
indent = " " * depth
obj_type = type(obj).__name__
if isinstance(obj, list):
print(f"{indent}List (length {len(obj)}):")
for item in obj:
inspect_structure(item, depth + 1)
elif isinstance(obj, dict):
print(f"{indent}Dict (length {len(obj)}):")
for key, value in obj.items():
print(f"{indent} Key: {key}")
inspect_structure(value, depth + 1)
else:
print(f"{indent}{obj_type}")
# Precomputation of the state
def precompute_state(text):
state = None
text_encoded = pipeline.encode(text)
_, state = model.forward(text_encoded, state)
return state
# Precomputing the base instruction set
INSTRUCT_PREFIX = f'''
You are a translator bot that can translate text to any language.
And will respond only with the translated text, without additional comments.
## From English:
It is not enough to know, we must also apply; it is not enough to will, we must also do.
## To Polish:
Nie wystarczy wiedzieć, trzeba także zastosować; nie wystarczy chcieć, trzeba też działać.
## From Spanish:
La muerte no nos concierne, porque mientras existamos, la muerte no está aquí. Y cuando llega, ya no existimos.
## To English:
Death does not concern us, because as long as we exist, death is not here. And when it does come, we no longer exist.
'''
# Get the prefix state
PREFIX_STATE = precompute_state(INSTRUCT_PREFIX)
# Translation logic
def translate(text, source_language, target_language, inState=PREFIX_STATE):
prompt = f"## From {source_language}:\n{text}\n\n## To {target_language}:\n"
ctx = prompt.strip()
all_tokens = []
out_last = 0
out_str = ''
occurrence = {}
state = None
if inState != None:
state = universal_deepcopy(inState)
# Clear GC
gc.collect()
if HAS_GPU == True :
torch.cuda.empty_cache()
# Generate things token by token
for i in range(ctx_limit):
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
token = pipeline.sample_logits(out, temperature=0.1, top_p=0.5)
if token in [0]: # EOS token
break
all_tokens += [token]
tmp = pipeline.decode(all_tokens[out_last:])
if '\ufffd' not in tmp:
out_str += tmp
out_last = i + 1
else:
return out_str.strip()
if "\n:" in out_str :
out_str = out_str.split("\n\nHuman:")[0].split("\nHuman:")[0]
return out_str.strip()
if "{source_language}:" in out_str :
out_str = out_str.split("{source_language}:")[0]
return out_str.strip()
if "{target_language}:" in out_str :
out_str = out_str.split("{target_language}:")[0]
return out_str.strip()
if "\nHuman:" in out_str :
out_str = out_str.split("\n\nHuman:")[0].split("\nHuman:")[0]
return out_str.strip()
if "\nAssistant:" in out_str :
out_str = out_str.split("\n\nAssistant:")[0].split("\nAssistant:")[0]
return out_str.strip()
if "\n#" in out_str :
out_str = out_str.split("\n\n#")[0].split("\n#")[0]
return out_str.strip()
# Yield for streaming
yield out_str.strip()
del out
del state
# # Clear GC
# gc.collect()
# if HAS_GPU == True :
# torch.cuda.empty_cache()
# yield out_str.strip()
return out_str.strip()
# Languages
LANGUAGES = [
"English",
"Zombie Speak",
"Chinese",
"Spanish",
"Bengali",
"Hindi",
"Portuguese",
"Russian",
"Japanese",
"German",
"Chinese (Wu)",
"Javanese",
"Korean",
"French",
"Vietnamese",
"Telugu",
"Chinese (Yue)",
"Marathi",
"Tamil",
"Turkish",
"Urdu",
"Chinese (Min Nan)",
"Chinese (Jin Yu)",
"Gujarati",
"Polish",
"Arabic (Egyptian Spoken)",
"Ukrainian",
"Italian",
"Chinese (Xiang)",
"Malayalam",
"Chinese (Hakka)",
"Kannada",
"Oriya",
"Panjabi (Western)",
"Panjabi (Eastern)",
"Sunda",
"Romanian",
"Bhojpuri",
"Azerbaijani (South)",
"Farsi (Western)",
"Maithili",
"Hausa",
"Arabic (Algerian Spoken)",
"Burmese",
"Serbo-Croatian",
"Chinese (Gan)",
"Awadhi",
"Thai",
"Dutch",
"Yoruba",
"Sindhi",
"Arabic (Moroccan Spoken)",
"Arabic (Saidi Spoken)",
"Uzbek, Northern",
"Malay",
"Amharic",
"Indonesian",
"Igbo",
"Tagalog",
"Nepali",
"Arabic (Sudanese Spoken)",
"Saraiki",
"Cebuano",
"Arabic (North Levantine Spoken)",
"Thai (Northeastern)",
"Assamese",
"Hungarian",
"Chittagonian",
"Arabic (Mesopotamian Spoken)",
"Madura",
"Sinhala",
"Haryanvi",
"Marwari",
"Czech",
"Greek",
"Magahi",
"Chhattisgarhi",
"Deccan",
"Chinese (Min Bei)",
"Belarusan",
"Zhuang (Northern)",
"Arabic (Najdi Spoken)",
"Pashto (Northern)",
"Somali",
"Malagasy",
"Arabic (Tunisian Spoken)",
"Rwanda",
"Zulu",
"Latin",
"Bulgarian",
"Swedish",
"Lombard",
"Oromo (West-central)",
"Pashto (Southern)",
"Kazakh",
"Ilocano",
"Tatar",
"Fulfulde (Nigerian)",
"Arabic (Sanaani Spoken)",
"Uyghur",
"Haitian Creole French",
"Azerbaijani, North",
"Napoletano-calabrese",
"Khmer (Central)",
"Farsi (Eastern)",
"Akan",
"Hiligaynon",
"Kurmanji",
"Shona"
]
# Example data
EXAMPLES = [
# More people would learn from their mistakes if they weren't so busy denying them.
["Többen tanulnának a hibáikból, ha nem lennének annyira elfoglalva, hogy tagadják azokat.", "Hungarian", "English"],
["La mejor venganza es el éxito masivo.", "Spanish", "English"],
["Tout est bien qui finit bien.", "French", "English"],
["Lasciate ogne speranza, voi ch'intrate.", "Italian", "English"],
["Errare humanum est.", "Latin", "English"],
]
# Gradio interface
with gr.Blocks(title=title) as demo:
gr.HTML(f"<div style=\"text-align: center;\"><h1>RWKV-5 World v2 - {title}</h1></div>")
gr.Markdown("This is the RWKV-5 World v2 1B5 model tailored for translation tasks")
# Input and output components
text = gr.Textbox(lines=5, label="Source Text", placeholder="Enter the text you want to translate...", value=EXAMPLES[0][0])
source_language = gr.Dropdown(choices=LANGUAGES, label="Source Language", value=EXAMPLES[0][1])
target_language = gr.Dropdown(choices=LANGUAGES, label="Target Language", value=EXAMPLES[0][2])
output = gr.Textbox(lines=5, label="Translated Text")
# Submission
submit = gr.Button("Translate", variant="primary")
# Example data
data = gr.Dataset(components=[text, source_language, target_language], samples=EXAMPLES, label="Example Translations", headers=["Source Text", "Target Language"])
# Button action
submit.click(translate, [text, source_language, target_language], [output])
data.click(lambda x: x, [data], [text, source_language, target_language])
# Gradio launch
demo.queue(concurrency_count=1, max_size=10)
demo.launch(share=False, debug=True) |