sidmanale643 commited on
Commit
adf70a1
·
verified ·
1 Parent(s): 9c444ce

Upload 9 files

Browse files

"initial"

Files changed (9) hide show
  1. .gitattributes +35 -35
  2. .gitignore +1 -0
  3. Dockerfile +20 -0
  4. README.md +10 -10
  5. api.py +52 -0
  6. app.py +34 -0
  7. docker-compose.yml +8 -0
  8. requirements.txt +98 -0
  9. utils.py +396 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ main.py
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image
2
+ FROM python:3.12-slim
3
+
4
+ # Set environment variables
5
+ ENV PYTHONUNBUFFERED=1
6
+ WORKDIR /app
7
+
8
+ # Copy and install dependencies
9
+ COPY requirements.txt ./
10
+ RUN pip install --no-cache-dir -r requirements.txt
11
+
12
+ # Copy application code
13
+ COPY . /app
14
+
15
+ # Expose necessary ports
16
+ EXPOSE 7860 8501
17
+
18
+ # Start both FastAPI and Streamlit services
19
+ CMD ["sh", "-c", "uvicorn api:app --host 0.0.0.0 --port 8000 & streamlit run main.py --server.port 8501 --server.address 0.0.0.0"]
20
+ #
README.md CHANGED
@@ -1,10 +1,10 @@
1
- ---
2
- title: Company Sentiment
3
- emoji: 🚀
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Company Sentiment
3
+ emoji: 🚀
4
+ colorFrom: pink
5
+ colorTo: gray
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
api.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from utils import (
3
+ fetch_from_web,
4
+ analyze_sentiment,
5
+ generate_comparative_sentiment,
6
+ generate_final_report,
7
+ get_summaries_by_sentiment,
8
+ translate,
9
+ text_to_speech,
10
+ )
11
+
12
+ app = FastAPI()
13
+
14
+
15
+ @app.get("/home")
16
+ def main(company_name: str, model_provider: str):
17
+ web_results = fetch_from_web(company_name)
18
+
19
+ if "sources" not in web_results:
20
+ return {"error": "No sources found."}
21
+
22
+ sentiment_output = [
23
+ analyze_sentiment(article, model_provider)
24
+ for article in web_results["sources"][:5]
25
+ ]
26
+
27
+ comparative_sentiment = generate_comparative_sentiment(sentiment_output)
28
+
29
+ positive_summary, negative_summary, neutral_summary = get_summaries_by_sentiment(
30
+ sentiment_output
31
+ )
32
+
33
+ final_report = generate_final_report(
34
+ positive_summary,
35
+ negative_summary,
36
+ neutral_summary,
37
+ comparative_sentiment,
38
+ model_provider,
39
+ )
40
+
41
+ hindi_translation = translate(final_report, model_provider)
42
+ audio_path = text_to_speech(hindi_translation)
43
+
44
+ return {
45
+ "company_name": company_name,
46
+ "articles": sentiment_output,
47
+ "comparative_sentiment": comparative_sentiment,
48
+ "final_report": final_report,
49
+ "hindi_translation": hindi_translation,
50
+ "audio_url": audio_path,
51
+ }
52
+ #
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ st.title("Company Sentiment Analyzer")
5
+
6
+ company_name = st.text_input("Enter Company Name", "Tesla")
7
+ model_provider = st.selectbox("Model Provider", options=["Ollama", "Groq"])
8
+
9
+ if st.button("Fetch Sentiment Data"):
10
+ api_url = (
11
+ f"http://localhost:8000/home?"
12
+ f"company_name={company_name}&model_provider={model_provider}"
13
+ )
14
+
15
+ try:
16
+ response = requests.get(api_url)
17
+ response.raise_for_status()
18
+
19
+ data = response.json()
20
+
21
+ st.subheader("Company Name")
22
+ st.write(data.get("company_name"))
23
+
24
+ st.subheader("Final Report")
25
+ st.write(data.get("final_report"))
26
+
27
+ st.subheader("🔊 Audio Output")
28
+ audio_file = "output.mp3"
29
+ if audio_file:
30
+ st.audio(audio_file)
31
+
32
+ except requests.exceptions.RequestException as e:
33
+ st.error(f"Error fetching data: {e}")
34
+ #
docker-compose.yml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ version: '3'
2
+ services:
3
+ web:
4
+ build: .
5
+ ports:
6
+ - "7860:7860"
7
+ - "8501:8501"
8
+
requirements.txt ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohappyeyeballs==2.6.1
2
+ aiohttp==3.11.14
3
+ aiosignal==1.3.2
4
+ altair==5.5.0
5
+ annotated-types==0.7.0
6
+ anyio==4.9.0
7
+ asttokens==3.0.0
8
+ attrs==25.3.0
9
+ blinker==1.9.0
10
+ cachetools==5.5.2
11
+ certifi==2025.1.31
12
+ charset-normalizer==3.4.1
13
+ click==8.1.8
14
+ colorama==0.4.6
15
+ comm==0.2.2
16
+ debugpy==1.8.13
17
+ decorator==5.2.1
18
+ distro==1.9.0
19
+ docstring-parser==0.16
20
+ dotenv==0.9.9
21
+ executing==2.2.0
22
+ fastapi==0.115.11
23
+ frozenlist==1.5.0
24
+ gitdb==4.0.12
25
+ gitpython==3.1.44
26
+ groq==0.20.0
27
+ h11==0.14.0
28
+ httpcore==1.0.7
29
+ httpx==0.28.1
30
+ idna==3.10
31
+ instructor==1.7.7
32
+ ipykernel==6.29.5
33
+ ipython==9.0.2
34
+ ipython-pygments-lexers==1.1.1
35
+ jedi==0.19.2
36
+ jinja2==3.1.6
37
+ jiter==0.8.2
38
+ jsonschema==4.23.0
39
+ jsonschema-specifications==2024.10.1
40
+ jupyter-client==8.6.3
41
+ jupyter-core==5.7.2
42
+ markdown-it-py==3.0.0
43
+ markupsafe==3.0.2
44
+ matplotlib-inline==0.1.7
45
+ mdurl==0.1.2
46
+ multidict==6.2.0
47
+ narwhals==1.31.0
48
+ nest-asyncio==1.6.0
49
+ numpy==2.2.4
50
+ ollama==0.4.7
51
+ openai==1.66.5
52
+ packaging==24.2
53
+ pandas==2.2.3
54
+ parso==0.8.4
55
+ pillow==11.1.0
56
+ platformdirs==4.3.6
57
+ prompt-toolkit==3.0.50
58
+ propcache==0.3.0
59
+ protobuf==5.29.3
60
+ psutil==7.0.0
61
+ pure-eval==0.2.3
62
+ pyarrow==19.0.1
63
+ pydantic==2.10.6
64
+ pydantic-core==2.27.2
65
+ pydeck==0.9.1
66
+ pygments==2.19.1
67
+ python-dateutil==2.9.0.post0
68
+ python-dotenv==1.0.1
69
+ pytz==2025.1
70
+ pywin32==310
71
+ pyzmq==26.3.0
72
+ referencing==0.36.2
73
+ regex==2024.11.6
74
+ requests==2.32.3
75
+ rich==13.9.4
76
+ rpds-py==0.23.1
77
+ shellingham==1.5.4
78
+ six==1.17.0
79
+ smmap==5.0.2
80
+ sniffio==1.3.1
81
+ stack-data==0.6.3
82
+ starlette==0.46.1
83
+ streamlit==1.43.2
84
+ tavily-python==0.5.1
85
+ tenacity==9.0.0
86
+ tiktoken==0.9.0
87
+ toml==0.10.2
88
+ tornado==6.4.2
89
+ tqdm==4.67.1
90
+ traitlets==5.14.3
91
+ typer==0.15.2
92
+ typing-extensions==4.12.2
93
+ tzdata==2025.1
94
+ urllib3==2.3.0
95
+ uvicorn==0.34.0
96
+ watchdog==6.0.0
97
+ wcwidth==0.2.13
98
+ yarl==1.18.3
utils.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Literal, List
3
+ from tavily import TavilyClient
4
+ from pydantic import BaseModel
5
+ from ollama import chat
6
+ from dotenv import load_dotenv
7
+ from groq import Groq
8
+ import instructor
9
+ import requests
10
+
11
+ GROQ_API_KEY = "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"
12
+ ELEVEN_LABS_API_KEY = "sk_a927222500aab9665f83f078b92e833e7ec1389ee68238c0"
13
+ TAVILY_API_KEY = "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"
14
+
15
+ load_dotenv()
16
+
17
+
18
+ def fetch_from_web(query):
19
+ tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
20
+ response = tavily_client.search(
21
+ query,
22
+ include_raw_content=True,
23
+ max_results=10,
24
+ topic="news",
25
+ search_depth="basic"
26
+ )
27
+ return {"sources": response['results']}
28
+
29
+
30
+ class Sentiment(BaseModel):
31
+ summary: str
32
+ reasoning: str
33
+ topics: List[str]
34
+ sentiment: Literal['positive', 'negative', 'neutral']
35
+
36
+
37
+ def analyze_sentiment(article, model_provider):
38
+ sentiment_prompt = f"""
39
+ Analyze the following news article about a company:
40
+
41
+ 1. **Summary**: Provide a comprehensive summary of the article's key points.
42
+
43
+ 2. **Sentiment Analysis**:
44
+ - Classify the overall sentiment toward the company as: POSITIVE, NEGATIVE, or NEUTRAL
45
+ - Support your classification with specific quotes, tone analysis, and factual evidence from the article
46
+ - Explain your reasoning for this sentiment classification in 2 to 3 lines.
47
+
48
+ 3. **Key Topics**:
49
+ - Identify 3-5 main topics discussed in the article
50
+ - Only give the name of the topics
51
+
52
+ Be as detailed and objective as possible in your reasoning.
53
+
54
+ Article Title: {article['title']}
55
+
56
+ Article: {article['raw_content']}
57
+ """
58
+
59
+ try:
60
+ if model_provider == "Ollama":
61
+ response = chat(
62
+ messages=[
63
+ {
64
+ 'role': 'user',
65
+ 'content': sentiment_prompt
66
+ }
67
+ ],
68
+ model='llama3.2:3b',
69
+ format=Sentiment.model_json_schema(),
70
+ )
71
+
72
+ sentiment_output = Sentiment.model_validate_json(response.message.content)
73
+
74
+ final_dict = {
75
+ "title": article["title"],
76
+ "summary": sentiment_output.summary,
77
+ "reasoning": sentiment_output.reasoning,
78
+ "topics": sentiment_output.topics,
79
+ "sentiment": sentiment_output.sentiment
80
+ }
81
+ else:
82
+ llm = Groq(api_key=GROQ_API_KEY)
83
+ llm = instructor.from_groq(llm, mode=instructor.Mode.TOOLS)
84
+
85
+ resp = llm.chat.completions.create(
86
+ model="llama-3.3-70b-versatile",
87
+ messages=[
88
+ {
89
+ "role": "user",
90
+ "content": sentiment_prompt,
91
+ }
92
+ ],
93
+ response_model=Sentiment,
94
+ )
95
+
96
+ sentiment_output = resp.model_dump()
97
+
98
+ final_dict = {
99
+ "title": article["title"],
100
+ "summary": sentiment_output.get("summary"),
101
+ "reasoning": sentiment_output.get("reasoning"),
102
+ "topics": sentiment_output.get("topics"),
103
+ "sentiment": sentiment_output.get("sentiment")
104
+ }
105
+
106
+ return final_dict
107
+
108
+ except Exception as e:
109
+ print(f"Error parsing sentiment output: {e}")
110
+ return None
111
+
112
+
113
+ def generate_comparative_sentiment(articles):
114
+ sentiment_counts = {"Positive": 0, "Negative": 0, "Neutral": 0}
115
+
116
+ for article in articles:
117
+ sentiment = article.get("sentiment", "").lower()
118
+ if sentiment == "positive":
119
+ sentiment_counts["Positive"] += 1
120
+ elif sentiment == "negative":
121
+ sentiment_counts["Negative"] += 1
122
+ elif sentiment == "neutral":
123
+ sentiment_counts["Neutral"] += 1
124
+
125
+ all_topics = []
126
+ for article in articles:
127
+ all_topics.extend(article.get("topics", []))
128
+
129
+ unique_topics = set(all_topics)
130
+
131
+ topic_counts = {}
132
+
133
+ for topic in unique_topics:
134
+ count = all_topics.count(topic)
135
+ topic_counts[topic] = count
136
+
137
+ common_topics = [topic for topic, count in topic_counts.items() if count > 1]
138
+ unique_topics = {}
139
+
140
+ for i, article in enumerate(articles):
141
+ article_topics = set(article.get("topics", []))
142
+ for j, other_article in enumerate(articles):
143
+ if i != j:
144
+ other_topics = set(other_article.get("topics", []))
145
+ unique_topics[f"Unique Topics in Article {i+1}"] = list(article_topics - other_topics)
146
+
147
+ comparative_sentiment = {
148
+ "Sentiment Distribution": sentiment_counts,
149
+ "Coverage Differences": "coverage_differences",
150
+ "Topic Overlap": {
151
+ "Common Topics": common_topics,
152
+ "Unique Topics in Article 1": unique_topics.get("Unique Topics in Article 1", []),
153
+ "Unique Topics in Article 2": unique_topics.get("Unique Topics in Article 2", []),
154
+ "Unique Topics in Article 3": unique_topics.get("Unique Topics in Article 3", []),
155
+ "Unique Topics in Article 4": unique_topics.get("Unique Topics in Article 4", []),
156
+ "Unique Topics in Article 5": unique_topics.get("Unique Topics in Article 5", []),
157
+ "Unique Topics in Article 6": unique_topics.get("Unique Topics in Article 6", []),
158
+ "Unique Topics in Article 7": unique_topics.get("Unique Topics in Article 7", []),
159
+ "Unique Topics in Article 8": unique_topics.get("Unique Topics in Article 8", []),
160
+ "Unique Topics in Article 9": unique_topics.get("Unique Topics in Article 9", []),
161
+ "Unique Topics in Article 10": unique_topics.get("Unique Topics in Article 10", [])
162
+ },
163
+ }
164
+
165
+ return comparative_sentiment
166
+
167
+
168
+ def get_summaries_by_sentiment(articles):
169
+ pos_sum = []
170
+ neg_sum = []
171
+ neutral_sum = []
172
+
173
+ for article in articles:
174
+ sentiment = article.get("sentiment", "").lower()
175
+ title = article.get("title", "No Title")
176
+ summary = article.get("summary", "No Summary")
177
+
178
+ article_text = f'Title: {title}\nSummary: {summary}'
179
+
180
+ if sentiment == "positive":
181
+ pos_sum.append(article_text)
182
+ elif sentiment == "negative":
183
+ neg_sum.append(article_text)
184
+ elif sentiment == "neutral":
185
+ neutral_sum.append(article_text)
186
+
187
+ pos_sum = "\n\n".join(pos_sum) if pos_sum else "No positive articles available."
188
+ neg_sum = "\n\n".join(neg_sum) if neg_sum else "No negative articles available."
189
+ neutral_sum = "\n\n".join(neutral_sum) if neutral_sum else "No neutral articles available."
190
+
191
+ return pos_sum, neg_sum, neutral_sum
192
+
193
+
194
+ def comparative_analysis(pos_sum, neg_sum, neutral_sum, model_provider):
195
+ prompt = f"""
196
+ Perform a detailed comparative analysis of the sentiment across three categories of articles (Positive, Negative, and Neutral) about a specific company. Address the following aspects:
197
+
198
+ 1. **Sentiment Breakdown**: Identify how each category (positive, negative, and neutral) portrays the company. Highlight the language, tone, and emotional cues that shape the sentiment.
199
+
200
+ 2. **Key Themes and Topics**: Compare the primary themes and narratives within each sentiment group. What aspects of the company's operations, performance, or reputation does each category focus on?
201
+
202
+ 3. **Perceived Company Image**: Analyze how each sentiment type influences public perception of the company. What impression is created by positive vs. negative vs. neutral coverage?
203
+
204
+ 4. **Bias and Framing**: Evaluate whether any of the articles reflect explicit biases or specific agendas regarding the company. Are there patterns in how the company is framed across different sentiments?
205
+
206
+ 5. **Market or Stakeholder Impact**: Discuss potential effects on stakeholders (e.g., investors, customers, regulators) based on the sentiment of each article type.
207
+
208
+ 6. **Comparative Insights**: Provide a concise summary of the major differences and commonalities between the three sentiment groups. What overall narrative emerges about the company?
209
+
210
+ ### Positive Articles:
211
+ {pos_sum}
212
+
213
+ ### Negative Articles:
214
+ {neg_sum}
215
+
216
+ ### Neutral Articles:
217
+ {neutral_sum}
218
+ """
219
+
220
+ if model_provider == "Ollama":
221
+ response = chat(
222
+ messages=[
223
+ {
224
+ 'role': 'user',
225
+ 'content': prompt
226
+ }
227
+ ],
228
+ model='llama3.2:3b'
229
+ )
230
+ response = response.message.content
231
+
232
+ else:
233
+ llm = Groq(api_key=GROQ_API_KEY)
234
+
235
+ chat_completion = llm.chat.completions.create(
236
+ messages=[
237
+ {
238
+ "role": "user",
239
+ "content": prompt[:5000],
240
+ }
241
+ ],
242
+ model="llama-3.3-70b-versatile",
243
+ )
244
+ response = chat_completion.choices[0].message.content
245
+
246
+ return response
247
+
248
+
249
+ def generate_final_report(pos_sum, neg_sum, neutral_sum, comparative_sentiment, model_provider):
250
+ final_report_prompt = f"""
251
+ Corporate News Sentiment Analysis Report:
252
+
253
+ ### 1. Executive Summary
254
+ - Overview of sentiment distribution: {comparative_sentiment["Sentiment Distribution"]['Positive']} positive, {comparative_sentiment["Sentiment Distribution"]['Negative']} negative, {comparative_sentiment["Sentiment Distribution"]['Neutral']} neutral.
255
+ - Highlight the dominant narrative shaping the company's perception.
256
+ - Summarize key drivers behind positive and negative sentiments.
257
+
258
+ ### 2. Media Coverage Analysis
259
+ - Identify major news sources covering the company.
260
+ - Highlight patterns in coverage across platforms (e.g., frequency, timing).
261
+ - Identify whether media sentiment shifts over time.
262
+
263
+ ### 3. Sentiment Breakdown
264
+ - **Positive Sentiment:**
265
+ * Titles and sources: {pos_sum}
266
+ * Key themes, notable quotes, and focal areas (e.g., product, leadership).
267
+ - **Negative Sentiment:**
268
+ * Titles and sources: {neg_sum}
269
+ * Key themes, notable quotes, and areas of concern.
270
+ - **Neutral Sentiment:**
271
+ * Titles and sources: {neutral_sum}
272
+ * Key themes and neutral narratives.
273
+
274
+ ### 4. Narrative Analysis
275
+ - Identify primary storylines about the company.
276
+ - Analyze how the company is positioned (positive, neutral, negative).
277
+ - Detect shifts or emerging narratives over time.
278
+
279
+ ### 5. Key Drivers of Sentiment
280
+ - Identify specific events, announcements, or actions driving media sentiment.
281
+ - Evaluate sentiment linked to industry trends vs. company-specific factors.
282
+ - Highlight company strengths and weaknesses based on media portrayal.
283
+
284
+ ### 6. Competitive Context
285
+ - Identify competitor comparisons.
286
+ - Analyze how media sentiment about the company compares to industry standards.
287
+ - Highlight competitive advantages or concerns raised by the media.
288
+
289
+ ### 7. Stakeholder Perspective
290
+ - Identify how key stakeholders (e.g., investors, customers, regulators) are represented.
291
+ - Analyze stakeholder concerns and reputation risks/opportunities.
292
+
293
+ ### 8. Recommendations
294
+ - Suggest strategies to mitigate negative sentiment.
295
+ - Recommend approaches to amplify positive narratives.
296
+ - Provide messaging suggestions for future announcements.
297
+
298
+ ### 9. Appendix
299
+ - Full article details (title, publication, date, author, URL).
300
+ - Sentiment scoring methodology.
301
+ - Media monitoring metrics (reach, engagement, etc.).
302
+ """
303
+
304
+ if model_provider == "Ollama":
305
+ final_report = chat(
306
+ messages=[
307
+ {
308
+ 'role': 'user',
309
+ 'content': final_report_prompt
310
+ }
311
+ ],
312
+ model='llama3.2:3b'
313
+ )
314
+ response = final_report.message.content
315
+
316
+ else:
317
+ llm = Groq(api_key=GROQ_API_KEY)
318
+
319
+ chat_completion = llm.chat.completions.create(
320
+ messages=[
321
+ {
322
+ "role": "user",
323
+ "content": final_report_prompt[:5000],
324
+ }
325
+ ],
326
+ model="llama-3.3-70b-versatile",
327
+ )
328
+ response = chat_completion.choices[0].message.content
329
+
330
+ return response
331
+
332
+
333
+ def translate(report, model_provider):
334
+ translation_prompt = f"""
335
+ Translate the following corporate sentiment analysis report into Hindi:
336
+
337
+ {report}
338
+
339
+ Ensure the translation maintains professional tone and structure while accurately conveying key insights and details.
340
+ """
341
+ if model_provider == "Ollama":
342
+ translation = chat(
343
+ messages=[
344
+ {
345
+ 'role': 'user',
346
+ 'content': translation_prompt
347
+ }
348
+ ],
349
+ model='llama3.2:3b'
350
+ )
351
+ response = translation.message.content
352
+
353
+ else:
354
+ translation_llm = Groq(api_key=GROQ_API_KEY)
355
+
356
+ chat_completion = translation_llm.chat.completions.create(
357
+ messages=[
358
+ {
359
+ "role": "user",
360
+ "content": translation_prompt[:5000],
361
+ }
362
+ ],
363
+ model="llama-3.3-70b-versatile",
364
+ )
365
+ response = chat_completion.choices[0].message.content
366
+
367
+ return response
368
+
369
+
370
+ def text_to_speech(text):
371
+ url = "https://api.elevenlabs.io/v1/text-to-speech/JBFqnCBsd6RMkjVDRZzb?output_format=mp3_44100_128"
372
+
373
+ model_id = "eleven_multilingual_v2"
374
+ output_file = "output.mp3"
375
+ api_key = "sk_a927222500aab9665f83f078b92e833e7ec1389ee68238c0"
376
+
377
+ headers = {
378
+ "xi-api-key": api_key,
379
+ "Content-Type": "application/json"
380
+ }
381
+
382
+ payload = {
383
+ "text": text,
384
+ "model_id": model_id
385
+ }
386
+
387
+ response = requests.post(url, headers=headers, json=payload)
388
+
389
+ if response.status_code == 200:
390
+ with open(output_file, "wb") as f:
391
+ f.write(response.content)
392
+ print(f"Audio saved to {output_file}")
393
+ else:
394
+ print(f"Error: {response.status_code} - {response.text}")
395
+
396
+ #