Tokymin commited on
Commit
b406631
1 Parent(s): 15742ca

https://blog.csdn.net/weixin_40425640/article/details/131850846

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +53 -2
  3. app_history.py +0 -54
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: indigo
5
  colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.31.1
8
- app_file: app_history.py
9
  pinned: false
10
  license: mit
11
  ---
 
5
  colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.31.1
8
+ app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
app.py CHANGED
@@ -1,3 +1,54 @@
1
- import gradio as gr
 
 
 
 
 
 
2
 
3
- gr.load("models/Tokymin/Mood_Anxiety_Disorder_Classify_Model").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import streamlit as st
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
+ import torch
5
+ import os
6
+ from transformers import AutoTokenizer, AutoModel
7
+ import requests
8
 
9
+ # Assuming you have set the HF_TOKEN environment variable with your Hugging Face token
10
+ huggingface_token = os.getenv('HF_TOKEN')
11
+ # Set up the token to use with the Hugging Face API
12
+ if huggingface_token is not None:
13
+ os.environ['HUGGINGFACE_CO_API_TOKEN'] = huggingface_token
14
+ API_URL = "https://api-inference.huggingface.co/models/Tokymin/Mood_Anxiety_Disorder_Classify_Model"
15
+ headers = {"Authorization": f"Tokymin {huggingface_token}"}
16
+ else:
17
+ print("error, no token")
18
+ exit(0)
19
+
20
+ # def query(payload):
21
+ # response = requests.post(API_URL, headers=headers, json=payload)
22
+ # return response.json()
23
+ # data = query("Can you please let us know more details about your ")
24
+ path: Path = Path('Tokymin/Mood_Anxiety_Disorder_Classify_Model')
25
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=path, cache_dir='/home/user', token=huggingface_token)
26
+
27
+ # tokenizer = AutoTokenizer.from_pretrained('Tokymin/Mood_Anxiety_Disorder_Classify_Model')
28
+ model = AutoModelForSequenceClassification.from_pretrained("Tokymin/Mood_Anxiety_Disorder_Classify_Model",num_labels=8)
29
+ model.eval()
30
+
31
+
32
+ def predict(text):
33
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
34
+ with torch.no_grad():
35
+ outputs = model(**inputs)
36
+ logits = outputs.logits
37
+ probabilities = torch.softmax(logits, dim=1).squeeze()
38
+ # 假设每个类别(SAS_Class和SDS_Class)都有4个概率值
39
+ sas_probs = probabilities[:4] # 获取SAS_Class的概率
40
+ sds_probs = probabilities[4:] # 获取SDS_Class的概率
41
+ return sas_probs, sds_probs
42
+
43
+
44
+ # 创建Streamlit应用
45
+ st.title("Multi-label Classification App")
46
+
47
+ # 用户输入文本
48
+ user_input = st.text_area("Enter text here", "Type something...")
49
+
50
+ if st.button("Predict"):
51
+ # 显示预测结果
52
+ sas_probs, sds_probs = predict(user_input)
53
+ st.write("SAS_Class probabilities:", sas_probs.numpy())
54
+ st.write("SDS_Class probabilities:", sds_probs.numpy())
app_history.py DELETED
@@ -1,54 +0,0 @@
1
- from pathlib import Path
2
- import streamlit as st
3
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
- import torch
5
- import os
6
- from transformers import AutoTokenizer, AutoModel
7
- import requests
8
-
9
- # Assuming you have set the HF_TOKEN environment variable with your Hugging Face token
10
- huggingface_token = os.getenv('HF_TOKEN')
11
- # Set up the token to use with the Hugging Face API
12
- if huggingface_token is not None:
13
- os.environ['HUGGINGFACE_CO_API_TOKEN'] = huggingface_token
14
- API_URL = "https://api-inference.huggingface.co/models/Tokymin/Mood_Anxiety_Disorder_Classify_Model"
15
- headers = {"Authorization": f"Tokymin {huggingface_token}"}
16
- else:
17
- print("error, no token")
18
- exit(0)
19
-
20
- # def query(payload):
21
- # response = requests.post(API_URL, headers=headers, json=payload)
22
- # return response.json()
23
- # data = query("Can you please let us know more details about your ")
24
- path: Path = Path('Tokymin/Mood_Anxiety_Disorder_Classify_Model')
25
- tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=path, cache_dir='/home/user', token=huggingface_token)
26
-
27
- # tokenizer = AutoTokenizer.from_pretrained('Tokymin/Mood_Anxiety_Disorder_Classify_Model')
28
- model = AutoModelForSequenceClassification.from_pretrained("Tokymin/Mood_Anxiety_Disorder_Classify_Model",num_labels=8)
29
- model.eval()
30
-
31
-
32
- def predict(text):
33
- inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
34
- with torch.no_grad():
35
- outputs = model(**inputs)
36
- logits = outputs.logits
37
- probabilities = torch.softmax(logits, dim=1).squeeze()
38
- # 假设每个类别(SAS_Class和SDS_Class)都有4个概率值
39
- sas_probs = probabilities[:4] # 获取SAS_Class的概率
40
- sds_probs = probabilities[4:] # 获取SDS_Class的概率
41
- return sas_probs, sds_probs
42
-
43
-
44
- # 创建Streamlit应用
45
- st.title("Multi-label Classification App")
46
-
47
- # 用户输入文本
48
- user_input = st.text_area("Enter text here", "Type something...")
49
-
50
- if st.button("Predict"):
51
- # 显示预测结果
52
- sas_probs, sds_probs = predict(user_input)
53
- st.write("SAS_Class probabilities:", sas_probs.numpy())
54
- st.write("SDS_Class probabilities:", sds_probs.numpy())