|
import streamlit as st |
|
from translate_app import tr |
|
import numpy as np |
|
import openai |
|
from dotenv import load_dotenv |
|
import os |
|
import warnings |
|
warnings.filterwarnings('ignore') |
|
|
|
title = "Playground ChatGPT" |
|
sidebar_name = "Playground" |
|
|
|
def display_proba_next_token(prompt,temperature,max_token): |
|
|
|
|
|
stop_sequences = [".","\n", "FIN", "Merci"] |
|
response = openai.Completion.create( |
|
engine="davinci-002", |
|
prompt=prompt, |
|
max_tokens=max_token, |
|
logprobs=5, |
|
temperature=temperature, |
|
stop=stop_sequences |
|
) |
|
|
|
|
|
if response['choices'][0]['text'] !="": |
|
logprobs = response['choices'][0]['logprobs']['top_logprobs'][0] |
|
logprobs_dict = dict(logprobs) |
|
|
|
|
|
probabilities = {key: np.exp(value) for key, value in logprobs_dict.items()} |
|
|
|
|
|
st.write("**Token suivant :** ->**:red["+list(probabilities.keys())[0]+"]**<-") |
|
|
|
st.write("") |
|
|
|
|
|
st.write("**"+tr("Probabilité d'apparition des token:")+"**") |
|
for i,(token, proba) in enumerate(probabilities.items()): |
|
st.write(f" {i+1} - Token: ->**:red[{token}]**<- ; proba: {proba:.0%}") |
|
|
|
|
|
st.write("") |
|
try: |
|
text = response['choices'][0]['text'] |
|
st.write("**"+tr("Texte de la réponse complète")+":**") |
|
st.write("->**:red[" + str(text) + "]**<-") |
|
except KeyError: |
|
st.error(tr("La réponse ne contient pas de texte supplémentaire.")) |
|
else: |
|
st.error(tr("La réponse ne contient pas de texte supplémentaire.")) |
|
|
|
|
|
|
|
|
|
|
|
def run(): |
|
global temperature, max_token |
|
|
|
st.write("") |
|
st.write("") |
|
|
|
st.title(tr(title)) |
|
st.markdown(''' |
|
--- |
|
''') |
|
|
|
st.header("**"+tr("Prédiction du token suivant")+"**") |
|
st.markdown(tr( |
|
""" |
|
Cet espace a pour objectif d'observer la génération de token par ChatGPT |
|
""") |
|
, unsafe_allow_html=True) |
|
st.write("") |
|
if 'OPENAI_API_KEY' in st.session_state: |
|
try: |
|
openai.api_key = st.session_state['OPENAI_API_KEY'] |
|
col1, col2 = st.columns([3, 1]) |
|
with col2: |
|
temperature = st.slider( |
|
label=tr("Temperature"), |
|
min_value=0.0, |
|
max_value=1.0, |
|
value=0.0 |
|
) |
|
max_token = st.slider( |
|
label=tr("Max tokens"), |
|
min_value=1, |
|
max_value=500, |
|
value=100 |
|
) |
|
with col1: |
|
prompt = st.text_area(label=tr("Prompt:"), value="La souris est mangée par le",height=100) |
|
display_proba_next_token(prompt,temperature,max_token) |
|
except Exception as e: |
|
st.error(f"An unexpected error occurred: {e}") |
|
else: |
|
st.write(tr("Saisissez d'abord votre clé API OpenAI !")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|