abdulmalek9 commited on
Commit
74d1991
1 Parent(s): ea3db07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -16
app.py CHANGED
@@ -1,39 +1,51 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import transformers
4
  import torch
5
  from huggingface_hub import login
6
  from langchain.llms import HuggingFacePipeline
7
-
8
  # login(token=token)
9
 
10
  def greet(name):
11
  return str(int(name)+10)
12
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  # Load model directly
15
  # Load model directly
16
- from transformers import AutoTokenizer, AutoModelForCausalLM
17
 
18
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
19
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
20
 
21
 
22
  # model = "meta-llama/Llama-2-13b-chat-hf" # meta-llama/Llama-2-7b-hf
23
  #
24
  # tokenizer = AutoTokenizer.from_pretrained(model, use_auth_token=True)
25
 
26
- pipe = pipeline(
27
- "text2text-generation",
28
- model=model,
29
- tokenizer=tokenizer,
30
- max_length=512,
31
- temperature=0.5,
32
- top_p=0.95,
33
- repetition_penalty=1.15
34
- )
35
-
36
- local_llm = HuggingFacePipeline(pipeline=pipe)
37
 
38
  # def get_llama_response(prompt: str) -> None:
39
  # """
 
1
  import gradio as gr
2
+ # from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import transformers
4
  import torch
5
  from huggingface_hub import login
6
  from langchain.llms import HuggingFacePipeline
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM
8
  # login(token=token)
9
 
10
  def greet(name):
11
  return str(int(name)+10)
12
 
13
 
14
+
15
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
16
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-7b")
17
+
18
+ input_text = "Write me a poem about Machine Learning."
19
+ input_ids = tokenizer(input_text, return_tensors="pt")
20
+
21
+ outputs = model.generate(**input_ids)
22
+ print(tokenizer.decode(outputs[0]))
23
+
24
+
25
+
26
  # Load model directly
27
  # Load model directly
28
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
29
 
30
+ # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
31
+ # model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
32
 
33
 
34
  # model = "meta-llama/Llama-2-13b-chat-hf" # meta-llama/Llama-2-7b-hf
35
  #
36
  # tokenizer = AutoTokenizer.from_pretrained(model, use_auth_token=True)
37
 
38
+ # pipe = pipeline(
39
+ # "text2text-generation",
40
+ # model=model,
41
+ # tokenizer=tokenizer,
42
+ # max_length=512,
43
+ # temperature=0.5,
44
+ # top_p=0.95,
45
+ # repetition_penalty=1.15
46
+ # )
47
+
48
+ # local_llm = HuggingFacePipeline(pipeline=pipe)
49
 
50
  # def get_llama_response(prompt: str) -> None:
51
  # """