xzuyn commited on
Commit
c9f41a3
1 Parent(s): 0afb719

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -22,6 +22,7 @@ def tokenize(input_text):
22
  phi2_tokens = len(phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"])
23
  t5_tokens = len(t5_tokenizer(input_text, add_special_tokens=True)["input_ids"])
24
  gemma_tokens = len(gemma_tokenizer(input_text, add_special_tokens=True)["input_ids"])
 
25
 
26
  results = {
27
  "LLaMa": llama_tokens,
@@ -32,7 +33,8 @@ def tokenize(input_text):
32
  "Falcon": falcon_tokens,
33
  "Phi-2": phi2_tokens,
34
  "T5": t5_tokens,
35
- "Gemma": gemma_tokens
 
36
  }
37
 
38
  # Sort the results in descending order based on token length
@@ -51,6 +53,7 @@ if __name__ == "__main__":
51
  phi2_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
52
  t5_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xxl")
53
  gemma_tokenizer = AutoTokenizer.from_pretrained("alpindale/gemma-2b")
 
54
 
55
- iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=9), outputs="text")
56
  iface.launch()
 
22
  phi2_tokens = len(phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"])
23
  t5_tokens = len(t5_tokenizer(input_text, add_special_tokens=True)["input_ids"])
24
  gemma_tokens = len(gemma_tokenizer(input_text, add_special_tokens=True)["input_ids"])
25
+ command_r_tokens = len(command_r_tokenizer(input_text, add_special_tokens=True)["input_ids"])
26
 
27
  results = {
28
  "LLaMa": llama_tokens,
 
33
  "Falcon": falcon_tokens,
34
  "Phi-2": phi2_tokens,
35
  "T5": t5_tokens,
36
+ "Gemma": gemma_tokens,
37
+ "Command-R": command_r_tokens
38
  }
39
 
40
  # Sort the results in descending order based on token length
 
53
  phi2_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
54
  t5_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xxl")
55
  gemma_tokenizer = AutoTokenizer.from_pretrained("alpindale/gemma-2b")
56
+ command_r_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-plus")
57
 
58
+ iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=10), outputs="text")
59
  iface.launch()