Corame commited on
Commit
ccda82a
·
verified ·
1 Parent(s): 16bcae7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -1,10 +1,6 @@
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
-
5
- # In[48]:
6
-
7
-
8
  import gradio as gr
9
  from llama_cpp import Llama
10
  from langchain_community.llms import LlamaCpp
@@ -18,25 +14,39 @@ import pandas as pd
18
  import re
19
  import os
20
  from sklearn.metrics.pairwise import cosine_similarity
 
 
21
 
22
  model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-mpnet-base-v2',device='cpu')
23
 
24
 
25
- llm = LlamaCpp(
26
- model_path=r"MediaTek-Research/Breeze-7B-Instruct-v0_1",
27
- n_gpu_layers=100,
28
- n_batch=512,
29
- n_ctx=3000,
30
- f16_kv=True,
31
- callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
32
- verbose=False,
33
- )
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  embedd_bk=pd.read_pickle(r".\bk_description1_角色形容詞_677.pkl")
36
  df_bk=pd.read_excel(r".\bk_description1_角色形容詞短文.xlsx")
37
 
38
- def invoke_with_temperature(prompt, temperature=0.4):
39
- return llm.invoke(prompt, temperature=temperature)
40
 
41
  def process_user_input(message):
42
  user_mental_state4= PromptTemplate(
 
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
 
 
 
 
4
  import gradio as gr
5
  from llama_cpp import Llama
6
  from langchain_community.llms import LlamaCpp
 
14
  import re
15
  import os
16
  from sklearn.metrics.pairwise import cosine_similarity
17
+ from transformers import AutoTokenizer, AutoModelForCausalLM
18
+
19
 
20
  model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-mpnet-base-v2',device='cpu')
21
 
22
 
23
+ model1 = AutoModelForCausalLM.from_pretrained("MediaTek-Research/Breeze-7B-Instruct-v0_1")
24
+
25
+ def invoke_with_temperature(prompt, temperature=0.4):
26
+ # 將 prompt 編碼為模型的輸入格式
27
+ inputs = tokenizer(prompt, return_tensors="pt")
28
+
29
+ # 使用生成方法生成文本,設置溫度參數
30
+ output = model1.generate(inputs["input_ids"], max_length=200, temperature=temperature, num_return_sequences=1)
31
+
32
+ # 解碼並返回生成的文本
33
+ return tokenizer.decode(output[0], skip_special_tokens=True)
34
+
35
+ # llm = LlamaCpp(
36
+ # model_path=r"MediaTek-Research/Breeze-7B-Instruct-v0_1",
37
+ # n_gpu_layers=100,
38
+ # n_batch=512,
39
+ # n_ctx=3000,
40
+ # f16_kv=True,
41
+ # callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
42
+ # verbose=False,
43
+ # )
44
 
45
  embedd_bk=pd.read_pickle(r".\bk_description1_角色形容詞_677.pkl")
46
  df_bk=pd.read_excel(r".\bk_description1_角色形容詞短文.xlsx")
47
 
48
+ # def invoke_with_temperature(prompt, temperature=0.4):
49
+ # return llm.invoke(prompt, temperature=temperature)
50
 
51
  def process_user_input(message):
52
  user_mental_state4= PromptTemplate(