File size: 7,787 Bytes
38b6ee6
15773f6
 
215e74e
15773f6
887b1f9
15773f6
d083506
 
 
 
 
 
 
 
15773f6
d083506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15773f6
d083506
15773f6
 
 
d083506
 
 
 
 
 
61f4130
 
 
15773f6
d083506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38b6ee6
15773f6
 
61f4130
15773f6
 
d083506
 
61f4130
 
 
d083506
 
 
 
 
 
 
 
 
15773f6
215e74e
15773f6
 
 
61f4130
15773f6
 
 
 
 
 
 
 
 
 
 
 
d083506
 
 
15773f6
 
 
 
61f4130
 
 
25007bd
887b1f9
215e74e
25007bd
887b1f9
 
215e74e
887b1f9
215e74e
 
25007bd
61f4130
 
25007bd
887b1f9
61f4130
25007bd
 
d083506
215e74e
25007bd
61f4130
 
 
d083506
 
25007bd
d083506
25007bd
61f4130
 
25007bd
61f4130
d083506
 
25007bd
61f4130
15773f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import os
import streamlit as st
import torch
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain_huggingface import HuggingFaceEndpoint

def create_prompt(name1: str, name2: str, persona_style: str):
    """Create the chat prompt template for a two-person conversation."""
    # We'll define that name1 (e.g., Alice) starts the conversation.
    # The conversation is recorded as:
    # Alice: {input or response}
    # Bob: {response}
    # and so on...

    prompt_template_str = f"""
    You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally. There are no 'Human' or 'AI' roles here, just {name1} and {name2} speaking alternately. {name1} starts the conversation. Each message should be in the format:
    {name1}: <message>
    {name2}: <message>

    Characteristics and style:
    - Both {name1} and {name2} communicate in a friendly, human-like manner.
    - They can be curious, ask questions, share opinions.
    - Their responses should be brief and natural, like texting a friend.
    - They can use everyday language, show feelings and opinions.
    - Keep each response to about 1-2 short sentences.
    - Use emojis sparingly and naturally if it fits the persona_style.
    - Avoid overusing emojis (1-2 max per message).

    Make sure that each turn is clearly designated as {name1} or {name2}. The conversation should continue for a total of 15 messages. Start with {name1} speaking first. Alternate between {name1} and {name2}.

    Once the 15th message is given (by {name1}, since the conversation starts with {name1}), the conversation ends. After that, produce a summary and a title of the conversation separately.

    Current partial conversation (if any):
    {{chat_history}}
    Next message:
    """
    return ChatPromptTemplate.from_template(prompt_template_str)

def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
    """
    Simulate a conversation of exactly total_messages turns.
    name1 starts the conversation (message 1), then name2 (message 2), etc., alternating.
    """
    conversation_lines = []
    st.write("**Starting conversation simulation...**")
    print("Starting conversation simulation...")

    try:
        for i in range(total_messages):
            # Build truncated conversation (if needed, though we may not need truncation with only 15 messages)
            truncated_history = "\n".join(conversation_lines)

            # Determine whose turn it is:
            # i=0 (first message), i even => name1 speaks, i odd => name2 speaks
            current_speaker = name1 if i % 2 == 0 else name2
            st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
            print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")

            # We ask the model for the next line in the conversation
            # The model should produce something like: "Alice: ...message..."
            response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
            response = response.strip()

            # We only keep the line that pertains to the current message
            # If the model generates both speakers, we may need to parse carefully.
            # Ideally, the model will produce only one line. If multiple lines appear, we'll take the first line that starts with current_speaker.
            lines = response.split("\n")
            chosen_line = None
            for line in lines:
                line = line.strip()
                if line.startswith(f"{current_speaker}:"):
                    chosen_line = line
                    break

            if not chosen_line:
                # Fallback: If not found, just use the first line
                chosen_line = lines[0] if lines else f"{current_speaker}: (No response)"

            st.write(chosen_line)
            print(chosen_line)

            conversation_lines.append(chosen_line)

        final_conversation = "\n".join(conversation_lines)
        return final_conversation
    except Exception as e:
        st.error(f"Error during conversation simulation: {e}")
        print(f"Error during conversation simulation: {e}")
        return None

def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2: str):
    """Use the LLM to summarize the completed conversation and provide a title."""
    st.write("**Summarizing the conversation...**")
    print("Summarizing the conversation...")

    summary_prompt = f"""
    The following is a conversation between {name1} and {name2}:
    {conversation}

    Provide a short descriptive title for their conversation and then summarize it in a few short sentences highlighting the main points, tone, and conclusion.
    Format your answer as:
    Title: <your conversation title>
    Summary: <your summary here>
    """
    try:
        response = chain.run(chat_history="", input=summary_prompt)
        return response.strip()
    except Exception as e:
        st.error(f"Error summarizing conversation: {e}")
        print(f"Error summarizing conversation: {e}")
        return "No summary available due to error."

def main():
    st.title("LLM Conversation Simulation")

    model_names = [
        "meta-llama/Llama-3.3-70B-Instruct",
        "meta-llama/Llama-3.1-405B-Instruct",
        "lmsys/vicuna-13b-v1.5"
    ]
    selected_model = st.selectbox("Select a model:", model_names)

    # Two user names
    name1 = st.text_input("Enter the first user's name:", value="Alice")
    name2 = st.text_input("Enter the second user's name:", value="Bob")
    persona_style = st.text_area("Enter the persona style characteristics:", 
                                 value="friendly, curious, and a bit sarcastic")

    if st.button("Start Conversation Simulation"):
        st.write("**Loading model...**")
        print("Loading model...")

        with st.spinner("Starting simulation..."):
            endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"

            try:
                llm = HuggingFaceEndpoint(
                    endpoint_url=endpoint_url,
                    huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
                    task="text-generation",
                    temperature=0.7,
                    max_new_tokens=512
                )
                st.write("**Model loaded successfully!**")
                print("Model loaded successfully!")
            except Exception as e:
                st.error(f"Error initializing HuggingFaceEndpoint: {e}")
                print(f"Error initializing HuggingFaceEndpoint: {e}")
                return

            prompt = create_prompt(name1, name2, persona_style)
            chain = LLMChain(llm=llm, prompt=prompt)

            st.write("**Simulating the conversation...**")
            print("Simulating the conversation...")

            # Total messages = 15
            conversation = simulate_conversation(chain, name1, name2, total_messages=15)
            if conversation:
                st.subheader("Final Conversation:")
                st.text(conversation)
                print("Conversation Simulation Complete.\n")
                print("Full Conversation:\n", conversation)

                # Summarize conversation
                st.subheader("Summary and Title:")
                summary = summarize_conversation(chain, conversation, name1, name2)
                st.write(summary)
                print("Summary:\n", summary)

if __name__ == "__main__":
    main()