Spaces:
Running
Running
File size: 5,980 Bytes
645bb63 ed5a50b df70e6e 645bb63 8668335 645bb63 e958e6a 55363be e958e6a 645bb63 df70e6e 2ea4fdd df70e6e 8668335 645bb63 bc304a6 d05fda2 bc304a6 ed5a50b bc304a6 ed5a50b bc304a6 645bb63 df70e6e 2ea4fdd d6f48b8 62435e7 e958e6a ed5a50b ebcd7c3 8668335 645bb63 62435e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import streamlit as st
from meta_ai_api import MetaAI
from urllib.parse import urlparse
import pandas as pd
import plotly.express as px
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
# Initialize Meta AI API
ai = MetaAI()
PAGE_CONFIG = {
"page_title": "Meta AI Query Analysis - a Free SEO Tool by WordLift",
"page_icon": "img/fav-ico.png",
"layout": "centered"
}
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(**PAGE_CONFIG)
local_css("style.css")
def fetch_response(query):
response = ai.prompt(message=query)
return response
def display_sources(sources):
if sources:
for source in sources:
# Parse the domain from the URL
domain = urlparse(source['link']).netloc
# Format and display the domain and title
st.markdown(f"- **{domain}**: [{source['title']}]({source['link']})", unsafe_allow_html=True)
else:
st.write("No sources available.")
# ---------------------------------------------------------------------------- #
# Sentiment Analysis Function
# ---------------------------------------------------------------------------- #
# Download the VADER lexicon for sentiment analysis
nltk.download('vader_lexicon')
# Initialize the Sentiment Intensity Analyzer
sid = SentimentIntensityAnalyzer()
def sentiment_analysis(text):
# Split the text into sentences
sentences = [sentence.strip() for sentence in text.split('.') if sentence]
# Create a DataFrame to hold the content and sentiment scores
df = pd.DataFrame(sentences, columns=['content'])
# Calculate sentiment scores for each sentence
df['sentiment_scores'] = df['content'].apply(lambda x: sid.polarity_scores(x))
# Split sentiment_scores into separate columns
df = pd.concat([df.drop(['sentiment_scores'], axis=1), df['sentiment_scores'].apply(pd.Series)], axis=1)
# Determine the dominant sentiment and its confidence
df['dominant_sentiment'] = df[['neg', 'neu', 'pos']].idxmax(axis=1)
df['confidence'] = df[['neg', 'neu', 'pos']].max(axis=1)
return df
# ---------------------------------------------------------------------------- #
# Advanced Analysis
# ---------------------------------------------------------------------------- #
def advanced_analysis(query, response_message):
analysis_prompt = f"Analyze the question of the user '{query}', analyze the response '{response_message}' and provide a JSON that includes: the user's intent, up to four follow-up questions, the entities in the response."
analysis_response = ai.prompt(message=analysis_prompt)
return analysis_response
def parse_analysis(analysis_message):
# Assuming the response message contains a JSON string starting with 'Here is the JSON:'
json_str = analysis_message.split('Here is the JSON:\n')[1].strip()
analysis_data = json.loads(json_str)
return analysis_data
# ---------------------------------------------------------------------------- #
# Main Function
# ---------------------------------------------------------------------------- #
def main():
# Path to the image
image_path = 'img/meta-ai-logo.png' # Replace with your image's filename and extension
# Create two columns
col1, col2 = st.columns([1, 2]) # Adjust the ratio as needed for your layout
# Use the first column to display the image
with col1:
st.image(image_path, width=60)
# Use the second column to display the title and other content
with col2:
st.title("Meta AI SEO Tool")
# User input
user_query = st.text_area("Enter your query:", height=150)
submit_button = st.button("Analyze Query")
if submit_button and user_query:
# Fetching response from Meta AI
response = fetch_response(user_query)
msg = response.get('message', 'No response message.')
# Write response
st.write(msg)
# Run sentiment analysis
df_sentiment = sentiment_analysis(msg)
# Advanced analysis with second AI call
advanced_response = advanced_analysis(user_query, msg)
advanced_msg = advanced_response.get('message', 'No advanced analysis available.')
# Parse the advanced analysis response
analysis_data = parse_analysis(advanced_msg)
# Display parsed data in a collapsible section
with st.expander("Show Advanced Analysis"):
st.write("### User Intent")
st.write(analysis_data['user_intent'])
st.write("### Follow-up Questions")
for question in analysis_data['follow_up_questions']:
st.write("- " + question)
st.write("### Identified Entities")
for entity_type, entities in analysis_data['entities'].items():
st.write(f"**{entity_type.capitalize()}**: {', '.join(entities)}")
# Display the sentiment in a collapsible section
with st.expander("Show Sentiment"):
# Display negative sentence locations
fig = px.scatter(df_sentiment, y='dominant_sentiment', color='dominant_sentiment', size='confidence',
hover_data=['content'],
color_discrete_map={"neg": "firebrick", "neu": "navajowhite", "pos": "darkgreen"},
labels={'dominant_sentiment': 'Sentiment'},
title='Sentiment Analysis of the Response')
fig.update_layout(width=800, height=300)
st.plotly_chart(fig)
# Display the AI response in a collapsible section
with st.expander("Show Sources"):
# Display sources with clickable links in a collapsible section
display_sources(response.get('sources', []))
if __name__ == "__main__":
main()
|