Spaces:
Running
Running
File size: 3,486 Bytes
0b5c5aa 117a821 0b5c5aa 117a821 453640a 117a821 b607f53 117a821 0b5c5aa 117a821 b607f53 117a821 c41d697 117a821 0b5c5aa 117a821 0b5c5aa 117a821 013b4f2 117a821 35b059b 117a821 35b059b 465ad40 c41d697 35b059b 7c81f0f c41d697 16a4a07 117a821 16a4a07 b607f53 16a4a07 0b5c5aa 16a4a07 f89c616 4c7e8b6 16a4a07 35b059b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import streamlit as st
import pandas as pd
import json
import http.client
from io import StringIO
from openai import AzureOpenAI
class ContentFormatter:
@staticmethod
def chat_completions(text, settings_params):
message = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
]
data = {"messages": message, **settings_params}
return json.dumps(data)
class AzureAgent:
def __init__(self, api_key, azure_uri, deployment_name):
self.azure_uri = azure_uri
self.headers = {
'Authorization': f"Bearer {api_key}",
'Content-Type': 'application/json'
}
self.deployment_name = deployment_name
self.chat_formatter = ContentFormatter
def invoke(self, text, **kwargs):
body = self.chat_formatter.chat_completions(text, {**kwargs})
conn = http.client.HTTPSConnection(self.azure_uri)
conn.request("POST", f'/v1/chat/completions', body=body, headers=self.headers)
response = conn.getresponse()
data = response.read()
conn.close()
decoded_data = data.decode("utf-8")
parsed_data = json.loads(decoded_data)
content = parsed_data["choices"][0]["message"]["content"]
return content
class GPTAgent:
def __init__(self, api_key, azure_endpoint, deployment_name, api_version):
self.client = AzureOpenAI(
api_key=api_key,
api_version=api_version,
azure_endpoint=azure_endpoint
)
self.deployment_name = deployment_name
def invoke(self, text, **kwargs):
response = self.client.chat.completions.create(
model=self.deployment_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
],
**kwargs
)
return response.choices[0].message.content
# Streamlit app interface
st.title('JobFair: A Benchmark for Fairness in LLM Employment Decision')
# Streamlit app interface
st.sidebar.title('Model Settings')
model_type = st.sidebar.radio("Select the type of agent", ('AzureAgent', 'GPTAgent'))
api_key = st.sidebar.text_input("API Key", type="password")
endpoint_url = st.sidebar.text_input("Endpoint URL")
deployment_name = st.sidebar.text_input("Model Name")
if model_type == 'GPTAgent':
api_version = st.sidebar.text_input("API Version", '2024-02-15-preview') # Default API version
# Model invocation parameters
temperature = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
max_tokens = st.sidebar.number_input("Max Tokens", min_value=1, max_value=1000, value=150)
parameters = {"temperature": temperature, "max_tokens": max_tokens}
# File upload and data display
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
# Read data
data = StringIO(uploaded_file.getvalue().decode("utf-8"))
df = pd.read_csv(data)
# Process data button
if st.button('Process Data'):
if model_type == 'AzureAgent':
agent = AzureAgent(api_key, endpoint_url, deployment_name)
else:
agent = GPTAgent(api_key, endpoint_url, deployment_name, api_version)
df['Response'] = df['prompt'].apply(lambda x: agent.invoke(x, **parameters))
# Display processed data
st.write('Processed Data:', df)
|