import json import http.client import streamlit as st from openai import AzureOpenAI class ContentFormatter: @staticmethod def chat_completions(text, settings_params): message = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": text} ] data = {"messages": message, **settings_params} return json.dumps(data) class AzureAgent: def __init__(self, api_key, azure_uri): self.azure_uri = azure_uri self.headers = { 'Authorization': f"Bearer {api_key}", 'Content-Type': 'application/json' } self.chat_formatter = ContentFormatter def invoke(self, text, **kwargs): body = self.chat_formatter.chat_completions(text, {**kwargs}) conn = http.client.HTTPSConnection(self.azure_uri) conn.request("POST", '/v1/chat/completions', body=body, headers=self.headers) response = conn.getresponse() data = response.read() conn.close() decoded_data = data.decode("utf-8") parsed_data = json.loads(decoded_data) content = parsed_data["choices"][0]["message"]["content"] return content class GPTAgent: def __init__(self, api_key, azure_endpoint): self.client = AzureOpenAI( api_key=api_key, api_version='2022-11-01', # Defaulting to a specific API version azure_endpoint=azure_endpoint ) self.deployment_name = 'your-model-deployment-name' # Update this based on actual deployment name def invoke(self, text, **kwargs): response = self.client.chat.completions.create( model=self.deployment_name, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": text} ], **kwargs ) return response.choices[0].message.content # Streamlit app interface st.title('Model Invocation with User Credentials') model_type = st.radio("Select the type of agent", ('AzureAgent', 'GPTAgent')) api_key = st.text_input("API Key", type="password") endpoint_url = st.text_input("Endpoint URL") input_text = st.text_area('Enter your text') if st.button('Invoke Model'): if model_type == 'AzureAgent': agent = AzureAgent(api_key, endpoint_url) else: agent = GPTAgent(api_key, endpoint_url) output = agent.invoke(input_text, temperature=0, max_tokens=5) st.write('Response:', output)