Spaces:
Running
Running
File size: 7,339 Bytes
645bb63 ed5a50b df70e6e 6250865 645bb63 8668335 645bb63 e958e6a 55363be e958e6a 645bb63 df70e6e 2ea4fdd 7f00f39 2ea4fdd 6250865 bc63486 f9309d8 6250865 f9309d8 6250865 df70e6e 8668335 645bb63 bc304a6 d05fda2 bc304a6 ed5a50b bc304a6 ed5a50b bc304a6 645bb63 df70e6e 7f00f39 2ea4fdd 7f00f39 2ea4fdd 134d55e bc63486 7f00f39 d6f48b8 62435e7 e958e6a ed5a50b ebcd7c3 8668335 645bb63 62435e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
import streamlit as st
from meta_ai_api import MetaAI
from urllib.parse import urlparse
import pandas as pd
import plotly.express as px
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
import json
# Initialize Meta AI API
ai = MetaAI()
PAGE_CONFIG = {
"page_title": "Meta AI Query Analysis - a Free SEO Tool by WordLift",
"page_icon": "img/fav-ico.png",
"layout": "centered"
}
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(**PAGE_CONFIG)
local_css("style.css")
def fetch_response(query):
response = ai.prompt(message=query)
return response
def display_sources(sources):
if sources:
for source in sources:
# Parse the domain from the URL
domain = urlparse(source['link']).netloc
# Format and display the domain and title
st.markdown(f"- **{domain}**: [{source['title']}]({source['link']})", unsafe_allow_html=True)
else:
st.write("No sources available.")
# ---------------------------------------------------------------------------- #
# Sentiment Analysis Function
# ---------------------------------------------------------------------------- #
# Download the VADER lexicon for sentiment analysis
nltk.download('vader_lexicon')
# Initialize the Sentiment Intensity Analyzer
sid = SentimentIntensityAnalyzer()
def sentiment_analysis(text):
# Split the text into sentences
sentences = [sentence.strip() for sentence in text.split('.') if sentence]
# Create a DataFrame to hold the content and sentiment scores
df = pd.DataFrame(sentences, columns=['content'])
# Calculate sentiment scores for each sentence
df['sentiment_scores'] = df['content'].apply(lambda x: sid.polarity_scores(x))
# Split sentiment_scores into separate columns
df = pd.concat([df.drop(['sentiment_scores'], axis=1), df['sentiment_scores'].apply(pd.Series)], axis=1)
# Determine the dominant sentiment and its confidence
df['dominant_sentiment'] = df[['neg', 'neu', 'pos']].idxmax(axis=1)
df['confidence'] = df[['neg', 'neu', 'pos']].max(axis=1)
return df
# ---------------------------------------------------------------------------- #
# Advanced Analysis
# ---------------------------------------------------------------------------- #
def fetch_advanced_analysis(query, msg):
analysis_prompt = f"""
Analyze the user's request: '{query}', and the response: '{msg}'.
Based on this analysis, generate a detailed JSON response including:
1. The user's intent,
2. Up to four follow-up questions,
3. The main entities mentioned in the response.
Example of expected JSON format:
{{
"user_intent": "Identify the effects of climate change on polar bears",
"follow_up_questions": [
"What are the primary threats to polar bears today?",
"How does the melting ice affect their habitat?",
"What conservation efforts are in place for polar bears?",
"How can individuals contribute to these efforts?"
],
"entities": {{
"animal": ["polar bears"],
"issue": ["climate change"],
"actions": ["conservation efforts"]
}}
}}
"""
# Assume ai is an initialized MetaAI instance that can send prompts to the AI service
advanced_response = ai.prompt(message=analysis_prompt)
return advanced_response
def parse_analysis(analysis_message):
try:
start = analysis_message.find('{')
end = analysis_message.rfind('}') + 1 # Find the last '}' and include it
if start != -1 and end != -1:
json_str = analysis_message[start:end]
print("Debug JSON String:", json_str) # Continue to use this for debugging
analysis_data = json.loads(json_str)
return analysis_data
else:
return {"error": "Valid JSON data not found in the response"}
except json.JSONDecodeError as e:
return {"error": "Failed to decode JSON", "details": str(e)}
# ---------------------------------------------------------------------------- #
# Main Function
# ---------------------------------------------------------------------------- #
def main():
# Path to the image
image_path = 'img/meta-ai-logo.png' # Replace with your image's filename and extension
# Create two columns
col1, col2 = st.columns([1, 2]) # Adjust the ratio as needed for your layout
# Use the first column to display the image
with col1:
st.image(image_path, width=60)
# Use the second column to display the title and other content
with col2:
st.title("Meta AI SEO Tool")
# User input
user_query = st.text_area("Enter your query:", height=150)
submit_button = st.button("Analyze Query")
if submit_button and user_query:
# Fetching response from Meta AI
response = fetch_response(user_query)
msg = response.get('message', 'No response message.')
# Write response
st.write(msg)
# Run sentiment analysis
df_sentiment = sentiment_analysis(msg)
# Fetch advanced analysis
advanced_response = fetch_advanced_analysis(user_query, msg)
advanced_msg = advanced_response.get('message', 'No advanced analysis available.')
# Parse the advanced analysis response
analysis_data = parse_analysis(advanced_msg)
if "error" in analysis_data:
st.error("Error in analysis: " + analysis_data["error"])
if "details" in analysis_data:
st.error("Details: " + analysis_data["details"])
else:
# Display parsed data in a collapsible section
with st.expander("Show Advanced Analysis"):
st.write("### User Intent")
st.write(analysis_data['user_intent'])
st.write("### Follow-up Questions")
for question in analysis_data['follow_up_questions']:
st.write("- " + question)
st.write("### Identified Entities")
for entity_type, entities in analysis_data['entities'].items():
st.write(f"**{entity_type.capitalize()}**: {', '.join(entities)}")
# Display the sentiment in a collapsible section
with st.expander("Show Sentiment"):
# Display negative sentence locations
fig = px.scatter(df_sentiment, y='dominant_sentiment', color='dominant_sentiment', size='confidence',
hover_data=['content'],
color_discrete_map={"neg": "firebrick", "neu": "navajowhite", "pos": "darkgreen"},
labels={'dominant_sentiment': 'Sentiment'},
title='Sentiment Analysis of the Response')
fig.update_layout(width=800, height=300)
st.plotly_chart(fig)
# Display the AI response in a collapsible section
with st.expander("Show Sources"):
# Display sources with clickable links in a collapsible section
display_sources(response.get('sources', []))
if __name__ == "__main__":
main()
|