kaborg15's picture
Added temperature parameter to query
f2baf1b verified
raw
history blame
864 Bytes
import streamlit as st
import requests
API_URL = "https://g8u06j6fqi4vyi5i.eu-west-1.aws.endpoints.huggingface.cloud"
headers = {
"Accept" : "application/json",
"Content-Type": "application/json"
}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def get_completion(prompt):
output = query({
"inputs": f"{prompt}",
"parameters": {
"temperature":0.6,
}
})
return output[0]["generated_text"]
def main():
st.title('LLM Text Completion Interface')
# Text input
user_input = st.text_area("Enter your prompt:", height=300)
if st.button('Generate'):
# Get the model's completion
completion = get_completion(user_input)
st.text_area("Model Completion:", value=completion, height=300, key="2")
if __name__ == '__main__':
main()