akhil2808 commited on
Commit
8172b8c
1 Parent(s): 36a8d65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -6
app.py CHANGED
@@ -1,13 +1,128 @@
 
 
 
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- zero = torch.Tensor([0]).cuda()
6
- print(zero.device) # <-- 'cpu' 🤔
7
 
8
  @spaces.GPU
9
- def greet(n):
10
- print(zero.device) # <-- 'cuda:0' 🤗
11
- return f"Hello {zero + n} Tensor"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text()).launch()
 
 
1
+ import os
2
+ from threading import Thread
3
+ from typing import Iterator
4
+
5
  import gradio as gr
6
  import spaces
7
  import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
+
10
+ MAX_MAX_NEW_TOKENS = 2048
11
+ DEFAULT_MAX_NEW_TOKENS = 1024
12
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
+
14
+
15
+ if not torch.cuda.is_available():
16
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
17
+
18
+
19
+ if torch.cuda.is_available():
20
+ model_id = "mistralai/Mistral-7B-Instruct-v0.2"
21
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
22
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
23
+ tokenizer.use_default_system_prompt = False
24
 
 
 
25
 
26
  @spaces.GPU
27
+ def generate(
28
+ message: str,
29
+ chat_history: list[tuple[str, str]],
30
+ system_prompt: str,
31
+ max_new_tokens: int = 1024,
32
+ temperature: float = 0.6,
33
+ top_p: float = 0.9,
34
+ top_k: int = 50,
35
+ repetition_penalty: float = 1.2,
36
+ ) -> Iterator[str]:
37
+ conversation = []
38
+ if system_prompt:
39
+ conversation.append({"role": "system", "content": system_prompt})
40
+ for user, assistant in chat_history:
41
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
42
+ conversation.append({"role": "user", "content": message})
43
+
44
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
45
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
46
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
47
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
48
+ input_ids = input_ids.to(model.device)
49
+
50
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
51
+ generate_kwargs = dict(
52
+ {"input_ids": input_ids},
53
+ streamer=streamer,
54
+ max_new_tokens=max_new_tokens,
55
+ do_sample=True,
56
+ top_p=top_p,
57
+ top_k=top_k,
58
+ temperature=temperature,
59
+ num_beams=1,
60
+ repetition_penalty=repetition_penalty,
61
+ )
62
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
63
+ t.start()
64
+
65
+ outputs = []
66
+ for text in streamer:
67
+ outputs.append(text)
68
+ yield "".join(outputs)
69
+
70
+
71
+ chat_interface = gr.ChatInterface(
72
+ fn=generate,
73
+ additional_inputs=[
74
+ gr.Textbox(label="System prompt", lines=6),
75
+ gr.Slider(
76
+ label="Max new tokens",
77
+ minimum=1,
78
+ maximum=MAX_MAX_NEW_TOKENS,
79
+ step=1,
80
+ value=DEFAULT_MAX_NEW_TOKENS,
81
+ ),
82
+ gr.Slider(
83
+ label="Temperature",
84
+ minimum=0.1,
85
+ maximum=4.0,
86
+ step=0.1,
87
+ value=0.6,
88
+ ),
89
+ gr.Slider(
90
+ label="Top-p (nucleus sampling)",
91
+ minimum=0.05,
92
+ maximum=1.0,
93
+ step=0.05,
94
+ value=0.9,
95
+ ),
96
+ gr.Slider(
97
+ label="Top-k",
98
+ minimum=1,
99
+ maximum=1000,
100
+ step=1,
101
+ value=50,
102
+ ),
103
+ gr.Slider(
104
+ label="Repetition penalty",
105
+ minimum=1.0,
106
+ maximum=2.0,
107
+ step=0.05,
108
+ value=1.2,
109
+ ),
110
+ ],
111
+ stop_btn=None,
112
+ examples=[
113
+ ["Hello there! How are you doing?"],
114
+ ["Can you explain briefly to me what is the Python programming language?"],
115
+ ["Explain the plot of Cinderella in a sentence."],
116
+ ["How many hours does it take a man to eat a Helicopter?"],
117
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
118
+ ],
119
+ )
120
+
121
+ with gr.Blocks(css="style.css") as demo:
122
+ gr.Markdown(DESCRIPTION)
123
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
124
+ chat_interface.render()
125
+ gr.Markdown(LICENSE)
126
 
127
+ if __name__ == "__main__":
128
+ demo.queue(max_size=20).launch()