Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,147 +1,140 @@
|
|
1 |
-
# This file is .....
|
2 |
-
# Author: Hanbin Wang
|
3 |
-
# Date: 2023/4/18
|
4 |
-
import transformers
|
5 |
import streamlit as st
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
# The heading will be on the right.
|
37 |
-
|
38 |
-
with c2:
|
39 |
-
st.caption("")
|
40 |
-
st.title("MaMaL-Gen(代码生成)")
|
41 |
|
|
|
42 |
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
st.sidebar.markdown("---")
|
48 |
|
49 |
-
|
50 |
"""
|
51 |
-
|
52 |
-
|
53 |
"""
|
54 |
-
|
|
|
55 |
|
56 |
-
st.
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
2)您可以下载[MaMaL-Gen](https://huggingface.co/hanbin/MaMaL-Gen)模型,本地测试。(无需科学上网)
|
62 |
-
"""
|
63 |
)
|
64 |
-
#
|
65 |
|
66 |
-
|
67 |
|
68 |
-
#
|
69 |
-
# "Enter your HuggingFace API key",
|
70 |
-
# help="Once you created you HuggingFace account, you can get your free API token in your settings page: https://huggingface.co/settings/tokens",
|
71 |
-
# type="password",
|
72 |
-
# )
|
73 |
#
|
74 |
-
#
|
75 |
-
#
|
|
|
|
|
|
|
|
|
76 |
#
|
77 |
-
#
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
-
st.sidebar.markdown("---")
|
82 |
-
|
83 |
-
st.write(
|
84 |
-
"> **Tip:** 首次运行需要加载模型,可能需要一定的时间!"
|
85 |
-
)
|
86 |
-
|
87 |
-
st.write(
|
88 |
-
"> **Tip:** 左侧栏给出了一些good case 和 bad case,you can try it!"
|
89 |
-
)
|
90 |
-
st.write(
|
91 |
-
"> **Tip:** 只支持英文输入,输入过长,效果会变差。只支持Python语言"
|
92 |
-
)
|
93 |
-
|
94 |
-
st.sidebar.write(
|
95 |
-
"> **Good case:**"
|
96 |
-
)
|
97 |
-
code_good = """1)Convert a SVG string to a QImage
|
98 |
-
2)Try to seek to given offset"""
|
99 |
-
st.sidebar.code(code_good, language='python')
|
100 |
-
|
101 |
-
st.sidebar.write(
|
102 |
-
"> **Bad cases:**"
|
103 |
-
)
|
104 |
-
code_bad = """Read an OpenAPI binary file ."""
|
105 |
-
st.sidebar.code(code_bad, language='python')
|
106 |
-
|
107 |
-
# Let's add some info about the app to the sidebar.
|
108 |
-
|
109 |
-
st.sidebar.write(
|
110 |
-
"""
|
111 |
-
App使用 [Streamlit](https://streamlit.io/)🎈 和 [HuggingFace](https://huggingface.co/inference-api)'s [MaMaL-Gen](https://huggingface.co/hanbin/MaMaL-Gen) 模型.
|
112 |
-
"""
|
113 |
-
)
|
114 |
-
|
115 |
-
# model, tokenizer = load_model("hanbin/MaMaL-Gen")
|
116 |
-
st.write("### 输入:")
|
117 |
-
input = st.text_area("", height=100)
|
118 |
-
button = st.button('生成')
|
119 |
-
|
120 |
-
tokenizer,model = get_model("hanbin/MaMaL-Gen")
|
121 |
-
|
122 |
-
input_ids = tokenizer(input, return_tensors="pt").input_ids
|
123 |
-
generated_ids = model.generate(input_ids, max_length=100)
|
124 |
-
output = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
125 |
-
# generator = pipeline('text-generation', model="E:\DenseRetrievalGroup\CodeT5-base")
|
126 |
-
# output = generator(input)
|
127 |
-
# code = '''def hello():
|
128 |
-
# print("Hello, Streamlit!")'''
|
129 |
-
if button:
|
130 |
-
st.write("### 输出:")
|
131 |
-
st.code(output, language='python')
|
132 |
else:
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
|
137 |
|
138 |
|
139 |
if __name__ == '__main__':
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import os
|
3 |
+
import openai
|
4 |
+
import backoff
|
5 |
+
# lucaslane5h8a@hotmail.com----hK4H0M64ihK4H0M64i----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
|
6 |
+
# os.environ["http_proxy"]="127.0.0.1:7890"
|
7 |
+
# os.environ["https_proxy"]="127.0.0.1:7890"
|
8 |
+
openai.api_key="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"
|
9 |
+
|
10 |
+
st.set_page_config(
|
11 |
+
page_title="首页",
|
12 |
+
page_icon="🚀",
|
13 |
+
layout="centered",
|
14 |
+
initial_sidebar_state="auto",
|
15 |
+
)
|
16 |
+
|
17 |
+
# set_page_config配置Streamlit应用程序的页面设置。自定义应用程序的标题、图标、布局等方面,以提供更好的用户体验。
|
18 |
+
# 注意:set_page_config必须在应用程序的所有其他元素之前调用,否则会引发异常。
|
19 |
+
# 参数说明:
|
20 |
+
# page_title:可选参数,用于设置应用程序的标题,通常显示在浏览器的标签页上。
|
21 |
+
# page_icon:可选参数,用于设置应用程序的图标,通常显示在浏览器标签页和书签栏中。
|
22 |
+
# layout:可选参数,用于设置应用程序的布局方式,可以是"centered"(居中)或"wide"(宽屏)。
|
23 |
+
# initial_sidebar_state:可选参数,用于设置侧边栏的初始状态。可以是"auto"(自动展开)或"collapsed"(折叠)
|
24 |
+
|
25 |
+
|
26 |
+
def init_sidebar():
|
27 |
+
"""
|
28 |
+
初始化侧边栏
|
29 |
+
:return:
|
30 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
st.sidebar.title("关于我们")
|
33 |
|
34 |
+
markdown = """
|
35 |
+
汇报人:高洺策
|
36 |
+
|
37 |
+
其他小组成员:周小渲(组长)、王瑞琪、杨畔、宣乐卓、雷友素、单宁、王钦、刘亭秀、吴林泽、武俊呈
|
38 |
+
"""
|
39 |
+
st.sidebar.info(markdown)
|
40 |
|
41 |
+
logo = "./image/laomo.png"
|
42 |
+
st.sidebar.image(logo)
|
43 |
+
st.sidebar.title("劳模风范")
|
44 |
+
st.sidebar.image("./image/laomo1.png", use_column_width=True)
|
45 |
+
st.sidebar.image("./image/laomo2.png", use_column_width=True)
|
46 |
+
st.sidebar.image("./image/laomo3.png", use_column_width=True)
|
47 |
+
st.sidebar.image("./image/gongjiang1.png", use_column_width=True)
|
48 |
+
st.sidebar.image("./image/gongjiang2.png", use_column_width=True)
|
49 |
+
st.sidebar.image("./image/gongjiang3.png", use_column_width=True)
|
50 |
|
|
|
51 |
|
52 |
+
def init_content():
|
53 |
"""
|
54 |
+
初始化内容
|
55 |
+
:return:
|
56 |
"""
|
57 |
+
# Customize page title
|
58 |
+
st.title("劳模智能体(Agent)")
|
59 |
|
60 |
+
st.markdown(
|
61 |
+
"""
|
62 |
+
劳模Agent,即劳模智能体,该智能体可以讲述相关劳模的事迹以及与人类进行沟通,可以作为劳模学习和教学的辅助工具。
|
63 |
+
"""
|
|
|
|
|
|
|
64 |
)
|
65 |
+
# 插入图片,让图片自适应
|
66 |
|
67 |
+
st.image("./image/title.png",use_column_width=True)
|
68 |
|
69 |
+
# st.header("Instructions")
|
|
|
|
|
|
|
|
|
70 |
#
|
71 |
+
# markdown = """
|
72 |
+
# 1. For the [GitHub repository](https://github.com/giswqs/geemap-apps) or [use it as a template](https://github.com/new?template_name=geemap-apps&template_owner=giswqs) for your own project.
|
73 |
+
# 2. Customize the sidebar by changing the sidebar text and logo in each Python files.
|
74 |
+
# 3. Find your favorite emoji from https://emojipedia.org.
|
75 |
+
# 4. Add a new app to the `pages/` directory with an emoji in the file name, e.g., `1_🚀_Chart.py`.
|
76 |
+
# """
|
77 |
#
|
78 |
+
# st.markdown(markdown)
|
79 |
+
|
80 |
+
# 我要构建一个交互式的应用程序,让用户可以在应用程序中输入一些内容,然后应用程序会根据用户的输入做出相应的响应。
|
81 |
+
# 输入框,让用户输入内容
|
82 |
+
st.header("输入--")
|
83 |
+
text_area = st.text_area("", "在这里输入你的需求~~~~~~~~比如 你是谁?")
|
84 |
+
|
85 |
+
# 如果文本内容等于“你是谁?”,则输出“我是劳模智能体,我可以讲述相关劳模的事迹以及与人类进行沟通,可以作为劳模学习和教学的辅助工具。”
|
86 |
+
# 写一个标题
|
87 |
+
|
88 |
+
st.header("输出--")
|
89 |
+
# 定义一个输出框,默认输出“在这里输出模型回复~~~~~~~~”
|
90 |
+
text = st.empty()
|
91 |
+
# 修改输出框为多行文本框
|
92 |
+
# output_area = st.text_area("", "在这里输出模型回复~~~~~~~~")
|
93 |
+
# text.text("在这里输出模型回复~~~~~~~~")
|
94 |
+
if text_area == "你是谁?":
|
95 |
+
# st.success("我是劳模智能体,我可以讲述相关劳模的事迹以及与人类进行沟通,可以作为劳模学习和教学的辅助工具。")
|
96 |
+
# 在输出框output_area中显示文本内容"我是劳模智能体,我可以讲述相关劳模的事迹以及与人类进行沟通,可以作为劳模学习和教学的辅助工具。你可以随意向我提出问题,我会尽力回答你的问题。"
|
97 |
+
st.write("我是劳模智能体,我可以讲述相关劳模的事迹以及与人类进行沟通,可以作为劳模学习和教学的辅助工具。你可以随意向我提出问题,我会尽力回答你的问题。")
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
else:
|
100 |
+
@backoff.on_exception(
|
101 |
+
backoff.fibo,
|
102 |
+
# https://platform.openai.com/docs/guides/error-codes/python-library-error-types
|
103 |
+
(
|
104 |
+
openai.error.APIError,
|
105 |
+
openai.error.Timeout,
|
106 |
+
openai.error.RateLimitError,
|
107 |
+
openai.error.ServiceUnavailableError,
|
108 |
+
openai.error.APIConnectionError,
|
109 |
+
KeyError,
|
110 |
+
),
|
111 |
+
)
|
112 |
+
def call_lm(model,messages,max_tokens,temperature,stop_words):
|
113 |
+
response = openai.ChatCompletion.create(
|
114 |
+
model=model,
|
115 |
+
messages=messages,
|
116 |
+
max_tokens=max_tokens,
|
117 |
+
temperature=temperature,
|
118 |
+
stop=stop_words,
|
119 |
+
)
|
120 |
+
return response.choices[0].message["content"].strip()
|
121 |
+
model = "gpt-3.5-turbo-0613"
|
122 |
+
messages=[
|
123 |
+
{"role": "system", "content": "你是一个劳模智能体,了解中国的劳模事迹。下面你需要回答用户提出的问题"},
|
124 |
+
{"role": "user", "content": text_area},
|
125 |
+
]
|
126 |
+
print("messages",messages)
|
127 |
+
max_tokens = 256
|
128 |
+
temperature = 0.9
|
129 |
+
stop_words = []
|
130 |
+
response = call_lm(model,messages,max_tokens,temperature,stop_words)
|
131 |
+
print("response",response)
|
132 |
+
st.write(response)
|
133 |
|
134 |
|
135 |
|
136 |
|
137 |
if __name__ == '__main__':
|
138 |
+
init_sidebar()
|
139 |
+
init_content()
|
140 |
+
pass
|
|
|
|
|
|
|
|
|
|