Spaces:
Runtime error
Runtime error
File size: 2,006 Bytes
37219a0 dd37550 37219a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import streamlit as st
import requests
import json
st.set_page_config(page_title="Generate Therapy Answers🤖",
page_icon='🤖',
layout='centered',
initial_sidebar_state='collapsed')
## Function To get response from LLAma 2 model
# def getLLamaresponse(input_text):
# ### LLama2 model
# llm=CTransformers(model='models/openorca_7b_chat_uncensored_FT_GGUF.gguf',
# model_type='llama',
# config={'max_new_tokens':256,
# 'temperature':0.01,
# 'gpu_layers':25,
# })
# ## Prompt Template
url = "https://localhost/api/generate"
headers = {
'Content-Type': 'application/json',
}
def generate_text():
# prompt = f"Please generate a draft for a legal notice in detail. The notice is to be sent on behalf of {client_name}, located at {client_address}, to {recipient_name} regarding {reason_for_notice}. The notice should include a clear statement of the issue, a request for resolution or action, a deadline for response or action, and any legal consequences of non-compliance. Please use formal language and ensure the notice is legally sound.\n\nCrime Type: include any IPC that applies to this perticular case"
prompt = f"PRovide Response on the below text \n\n{input_text}"
data = {
"model": "openorca_FT_medical",
"stream": False,
"prompt": prompt,
}
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200:
response_text = response.text
data = json.loads(response_text)
actual_response = data["response"]
return actual_response
else:
st.error(f"Error: {response.status_code}, {response.text}")
st.header("Therapy Provider 🤖")
input_text=st.text_area("Enter your Problem/Emotions")
if st.button("Generate Response"):
generated_notice = generate_text()
st.text_area("Generated Legal Notice", generated_notice)
|