File size: 2,048 Bytes
7ce578b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# main.py

from dotenv import load_dotenv
import os
from openai import OpenAI

client = OpenAI(api_key='sk-proj-PxCAkOqCTsVhVWTJSMKJT3BlbkFJz0J48QsSGmrt9Qjud2Sl')
import pprint
from halo import Halo

# Load environment variables from a .env file
load_dotenv()

# Function to generate a response from the model
def generate_response(messages):
    # Create a loading spinner
    spinner = Halo(text='Loading...', spinner='dots')
    spinner.start()

    # Load the OpenAI key and model name from environment variables
    model_name = 'gpt-3.5-turbo-0301'

    # Create a chat completion with the provided messages
    response = client.chat.completions.create(model=model_name,
    messages=messages,
    temperature=0.5,
    max_tokens=250)

    # Stop the spinner once the response is received
    spinner.stop()

    # Pretty-print the messages sent to the model
    pp = pprint.PrettyPrinter(indent=4)
    print("Request:")
    pp.pprint(messages)

    # Print the usage statistics for the completion
    print(f"Completion tokens: {response.usage.completion_tokens}, Prompt tokens: {response.usage.prompt_tokens}, Total tokens: {response.usage.total_tokens}")

    # Return the message part of the response
    return response.choices[0].message

# Main function to run the chatbot
def main():
    # Initialize the messages with a system message
    messages=[
        {"role": "system", "content": "You are a kind and wise wizard"}
        ]

    # Continue chatting until the user types "quit"
    while True:
        input_text = input("You: ")
        if input_text.lower() == "quit":
            break

        # Add the user's message to the messages
        messages.append({"role": "user", "content": input_text})

        # Get a response from the model and add it to the messages
        response = generate_response(messages)
        messages.append(response)

        # Print the assistant's response
        print(f"Wizard: {response.content}")

# Run the main function when the script is run
if __name__ == "__main__":
    main()