909ahmed commited on
Commit
5dcacdd
1 Parent(s): f2c757e

few changes added

Browse files
Files changed (1) hide show
  1. app.py +145 -40
app.py CHANGED
@@ -1,21 +1,139 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
21
  if val[0]:
@@ -27,36 +145,23 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
 
61
 
62
  if __name__ == "__main__":
 
1
  import gradio as gr
 
2
 
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.functional as F
 
6
 
7
+ n_embd = 64
8
+ dropout = 0.0
9
+ block_size = 32
10
+ vocab_size = 65
11
+ n_head = 4
12
+ n_layer = 4
13
+
14
+ class Head(nn.Module):
15
+
16
+ def __init__(self, head_size):
17
+ super().__init__()
18
+ self.key = nn.Linear(n_embd, head_size, bias=False)
19
+ self.query = nn.Linear(n_embd, head_size, bias=False)
20
+ self.value = nn.Linear(n_embd, head_size, bias=False)
21
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
22
+
23
+ self.dropout = nn.Dropout(dropout)
24
+
25
+ def forward(self, x):
26
+ B,T,C = x.shape
27
+ k = self.key(x)
28
+ q = self.query(x)
29
+ wei = q @ k.transpose(-2,-1) * C**-0.5
30
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
31
+ wei = F.softmax(wei, dim=-1)
32
+ wei = self.dropout(wei)
33
+
34
+ v = self.value(x)
35
+ out = wei @ v
36
+ return out
37
+
38
+ class MultiHeadAttention(nn.Module):
39
+
40
+ def __init__(self, num_heads, head_size):
41
+ super().__init__()
42
+ self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
43
+ self.proj = nn.Linear(n_embd, n_embd)
44
+ self.dropout = nn.Dropout(dropout)
45
+
46
+ def forward(self, x):
47
+ out = torch.cat([h(x) for h in self.heads], dim=-1)
48
+ out = self.dropout(self.proj(out))
49
+ return out
50
+
51
+ class FeedFoward(nn.Module):
52
+
53
+ def __init__(self, n_embd):
54
+ super().__init__()
55
+ self.net = nn.Sequential(
56
+ nn.Linear(n_embd, 4 * n_embd),
57
+ nn.ReLU(),
58
+ nn.Linear(4 * n_embd, n_embd),
59
+ nn.Dropout(dropout),
60
+ )
61
+
62
+ def forward(self, x):
63
+ return self.net(x)
64
+
65
+ class Block(nn.Module):
66
+
67
+ def __init__(self, n_embd, n_head):
68
+ super().__init__()
69
+ head_size = n_embd // n_head
70
+ self.sa = MultiHeadAttention(n_head, head_size)
71
+ self.ffwd = FeedFoward(n_embd)
72
+ self.ln1 = nn.LayerNorm(n_embd)
73
+ self.ln2 = nn.LayerNorm(n_embd)
74
+
75
+ def forward(self, x):
76
+ x = x + self.sa(self.ln1(x))
77
+ x = x + self.ffwd(self.ln2(x))
78
+ return x
79
+
80
+ class BigramLanguageModel(nn.Module):
81
+
82
+ def __init__(self):
83
+ super().__init__()
84
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
85
+ self.position_embedding_table = nn.Embedding(block_size, n_embd)
86
+ self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
87
+ self.ln_f = nn.LayerNorm(n_embd)
88
+ self.lm_head = nn.Linear(n_embd, vocab_size)
89
+
90
+ def forward(self, idx, targets=None):
91
+ B, T = idx.shape
92
+
93
+ tok_emb = self.token_embedding_table(idx)
94
+ pos_emb = self.position_embedding_table(torch.arange(T))
95
+ x = tok_emb + pos_emb
96
+ x = self.blocks(x)
97
+ x = self.ln_f(x)
98
+ logits = self.lm_head(x)
99
+
100
+ if targets is None:
101
+ loss = None
102
+ else:
103
+ B, T, C = logits.shape
104
+ logits = logits.view(B*T, C)
105
+ targets = targets.view(B*T)
106
+ loss = F.cross_entropy(logits, targets)
107
+
108
+ return logits, loss
109
+
110
+ def generate(self, idx, max_new_tokens):
111
+ for _ in range(max_new_tokens):
112
+
113
+ idx_cond = idx[:, -block_size:]
114
+ logits, loss = self(idx_cond)
115
+ logits = logits[:, -1, :]
116
+ probs = F.softmax(logits, dim=-1)
117
+ idx_next = torch.multinomial(probs, num_samples=1)
118
+ idx = torch.cat((idx, idx_next), dim=1)
119
+
120
+ return idx
121
+
122
+
123
+ chars = "\n !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
124
+ itos = { i:ch for i,ch in enumerate(chars) }
125
+ stoi = { ch:i for i,ch in enumerate(chars) }
126
+
127
+ decode = lambda l: ''.join([itos[i] for i in l])
128
+ encode = lambda s: [stoi[c] for c in s]
129
+
130
+ model = BigramLanguageModel()
131
 
132
  def respond(
133
  message,
134
  history: list[tuple[str, str]],
 
 
 
 
135
  ):
136
+ messages = [{"role": "system", "content": "Cocaine"}]
137
 
138
  for val in history:
139
  if val[0]:
 
145
 
146
  response = ""
147
 
148
+ input_txt = encode(message)
149
+ context = torch.tensor(input_txt).unsqueeze(0)
150
+ # response = decode(model.generate(context, max_new_tokens=2000)[0].tolist())
151
+
152
+ idx = context
153
+ for _ in range(2000):
154
+
155
+ idx_cond = idx[:, -block_size:]
156
+ logits = model(idx_cond).logits
157
+ logits = logits[:, -1, :]
158
+ probs = F.softmax(logits, dim=-1)
159
+ idx_next = torch.multinomial(probs, num_samples=1)
160
+ idx = torch.cat((idx, idx_next), dim=1)
161
+
162
+ yield decode(idx_next[0].tolist())
163
+
164
+ demo = gr.ChatInterface(respond)
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
 
167
  if __name__ == "__main__":