File size: 6,489 Bytes
bfbb419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import vertexai
import http.client
import typing
import urllib.request
from vertexai.preview.generative_models import GenerativeModel #,Part,Image
import streamlit as st 
from io import StringIO
import pandas as pd
from streamlit_extras.let_it_rain import rain
import time

st.set_page_config (page_icon="icon.jpg",page_title="Content Moderation",layout="wide")
st.markdown("<h1 style='color: var(--black-100, var(--black-100, #1C1C1C));text-align: center;font-feature-settings: 'cv11' on, 'cv01' on, 'ss01' on;font-family: Poppins;font-size: 48px;font-style: normal;font-weight: 600;line-height: 58px;'>Content Moderation</h1>",
            unsafe_allow_html=True)
project_id = "agileai-poc"
loc = "us-central1"
model= GenerativeModel("gemini-pro")
# vision_model = GenerativeModel("gemini-pro-vision")
prompt="""understand the content provided and if any spaces found in content ignore and generate the output only in the given format  
                            format: 1.Tone:find the tone of content 
                               2.Negative sentences :"Only find the negative sentences based on semantic analysis" to ensure accuracy in detecting negative words in pointwise and "must highlight the negative words in bold" in same sentence 
                               if the content is in positive tone give output as "No changes required"
                """
prompt2=""" understand the content provided and generate the output only in the given format
                    format: 1.Tone :provide the generated content tone
                        2.Content:"Don't explain the content" just modify the same content by only "Replacing the negative words by converting the tone into formal",if the content doesn't have any negative words give output as "No changes required" """
# prompt="updated_prompt.txt"
#Analysis:analyse the input text and highlight the points about tone of content,list the sentences which comes under negative,positive ,even mention the negative,postive,normal words which are used in sentence highlight them by providing different font color to easily differentiate and in bold 
#Analysis:analyse the input text and highlight the points about tone of content,list out the negative sentences and highlight the negative words found in sentences in bold
         
# c1,c2,c3=st.columns((5,0.5,7))
# with c1:
#    input=st.text_area("Post your Content",height=500)

# tab1,tab2=st.tabs(["Feedback","Reference"])
# if st.button("Submit"):
#     response=model.generate_content([prompt,input])
#     # with c3 :
#     #     st.write(response.text)
#     # st.divider()
#     # with c3:
#     #     if st.button("Reference "):
#     #         reference=model.generate_content([prompt2,response.text])
#     #         st.write(reference.text)
#     with c3 :
       
#         tab1.write(response.text)
#         # with tab2:
#         reference=model.generate_content([prompt2,response.text])
#         tab2.write(reference.text)


## Define layout and containers
HEIGHT = 1000

cols = st.columns(2)

with cols[0]:
    left_panel = st.container(height=HEIGHT + 15, border=True)

with cols[1]:
    upper_right_panel = st.container(height=HEIGHT//2, border=True)
    lower_right_panel = st.container(height=HEIGHT//2, border=True)


## Add contents
with left_panel:
    st.markdown("<h5 style='font-style:bold,color:blue'>Post your Content</h5>",unsafe_allow_html=True)
    input=st.text_area(label="v",label_visibility="collapsed",placeholder="your content",height=900)

st.toast("Check the input text ,avoid empty spaces if any.", icon="🚨")
time.sleep(10)
submit=st.button(":blue[Submit]")
if submit:
    response=model.generate_content([prompt,input])        # return response
    # with upper_right_panel:
    #     st.write(response.text)
    # with lower_right_panel:
    #     st.markdown("<h6 style='color:green;font-style:bold'>Rephrased content:</h6>",unsafe_allow_html=True)
    #     reference=model.generate_content([prompt2,response.text])
    #     st.write(reference.text)

try:
    with upper_right_panel:
        st.markdown("<h6 style='color:red;font-style:bold'>Analyzed Content:</h6>",unsafe_allow_html=True)
        st.write(response.text)
    with lower_right_panel:
        st.markdown("<h6 style='color:green;font-style:bold'>Rephrased content:</h6>",unsafe_allow_html=True)
        reference=model.generate_content([prompt2,response.text])
        st.write(reference.text)
except:
    st.error("Check the input text ,avoid empty spaces if any.")
    st.stop()
#    rain(emoji="🎈",animation_length="10",falling_speed=10)
#  for chunk in response :
#   print(chunk.text)
# content_type=st.selectbox("Content Type",["Text","Image","Video"])

# # input=st.text_area(":blue[My text here :]",height=500)
# if "Video" in content_type:
#  def generate_text(project_id: str, location: str) -> str:
#     # Initialize Vertex AI
#     # vertexai.init(project=project_id, location=loc)
#     # Load the model
#     # vision_model = GenerativeModel("gemini-pro-vision") 
#     # Generate text
#     response = vision_model.generate_content(
#         [
#             Part.from_uri(
#                 "gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4"
#             ),
#             "What is in the video?",
#         ]
#     )
#     print(response.text)
#     return response.text
#  img_file=st.file_uploader("choose an Image" )
# elif "Image" in content_type:
  

# # create helper function
#     def load_image_from_url(image_url: str) -> Image:
#     with urllib.request.urlopen(image_url) as response:
#         response = typing.cast(http.client.HTTPResponse, response)
#         image_bytes = response.read()
#     return Image.from_bytes(image_bytes)

# # Load images from Cloud Storage URI
# # landmark1 = load_image_from_url(
# #     "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
# # )

# # Pass multimodal prompt
# # model = GenerativeModel("gemini-pro-vision")
#     response = model.generate_content(
#     [
#         landmark1,
#         "city: Rome, Landmark: the Colosseum",
#         landmark2,
#         "city: Beijing, Landmark: Forbidden City",
#         landmark3,
#     ]
# )
#     print(response)
# landmark1 = st.tecload_image_from_url(
#     "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
# # )
# if st.button("Reference "):
#       reference=model.generate_content([prompt2,response])
#       st.write(reference.text)