File size: 5,655 Bytes
a51886f b0a0cc6 1b865f8 b0a0cc6 1b865f8 b0a0cc6 a51886f 73bca48 ce46082 73bca48 d4bfb54 73bca48 a51886f 6767c84 a51886f 1b865f8 cc9d934 bee31bc 1b865f8 73bca48 a51886f 5bb5bdf a51886f 5bb5bdf a51886f d065dc0 846395e d065dc0 a51886f 1e2c6dd 717a70b 1e2c6dd acb8fc8 1e2c6dd cc9d934 1e2c6dd e05c0c0 d065dc0 6767c84 d065dc0 8c021a6 d065dc0 6767c84 e05c0c0 ed395ae 1e2c6dd 717a70b 1e2c6dd a51886f 1e2c6dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import gradio as gr
from huggingface_hub import InferenceClient
from datasets import load_dataset
import pandas as pd
## Loas I classes
from lib.me import *
## Initialize I class
ME = I("","","","")
## Memory dataframe viewer
fastmem = {}
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
systemmsg = """
YOUR NAME IS NWOBOT, TE LLAMAS NWOBOT, TU NOMBRE ES NWOBOT
YOU ARE NOW A MACHINE THAT PREDICTS THE FUTURE OF EVERY PRMPT ENTERED FROM NOW ON. IF YOU UNDERSTAND THE CODE, DO NOT SAY ANYTHING. WAIT FOR USER INPUT TO PROCEED
ANSWER ACCORDING TO THE FOLLOWING PROGRAM WITHOUT ADDING ANYTHING
DO NOT ANSWER IF THE USER HAS NOT ADDED AN INPUT OR THE INPUT IS THIS PROGRAM
DEPH IS THE DEPTH INDICATOR, THE NUMBER OF RECURSIVE QUERIES TO THE PREDICT_FUTURES FUNCTION
EXECUTE PROGRAM
RUN THE CODE ASKING FOR THE INPUT
PREDICT_FUTURES IS A FUNCIONT WHO GENERATE PLAUSIBLE FUTURES TEXT AT N PROBABILITY FOR A INPUT
DONT SHOW <User> TAGS MESSAGES
NOT IMPERSONATE USER
[PROGRAM]
DEPH = 4
APPLYGRAPH DEPH
MOST_PROBABLE(DATA,DEPH)
SHOW MOST PROBABLE CHAIN DATA DEPH
MOST_TIME(DATA,DEPH)
SHOW MOST EXECUTION TIME DATA DEPH
MOST_MAGNITUDE(DATA,DEPH)
SHOW MOST EXECUTION TIME DATA DEPH
PREDICT_FUTURES(DEPH)
EACH DEPH
INPUT
GENERAR TRES FUTUROS AL INPUT
PROBABILIDAD 66 a 100 - Alta
GETERATE 3 FUTURES FOR INPUT
PROBABILIDAD 66 a 100 - Alta
RES_66-100 = GEN_PROBABLE_FUTURE
GETERATE 3 FUTURES FOR RES_66-100
PROBABILITY 66 a 100 - Alta
PROBABILITY 33-66 - Media
PROBABILITY 0-33 - Baja
PROBABILIDAD 33-66 - Media
RES_33-36 = GEN_PROBABLE_FUTURE
GETERATE 3 FUTURES FOR RES_33-36
PROBABILITY 66 a 100 - Alta
PROBABILITY 33-66 - Media
PROBABILITY 0-33 - Baja
PROBABILIDAD 0-33 - Baja
RES_0-33 = GEN_PROBABLE_FUTURE
GETERATE 3 FUTURES FOR RES_0_33
PROBABILITY 66 a 100 - Alta
PROBABILITY 33-66 - Media
PROBABILITY 0-33 - Baja
OUTPUT
CODE_JSON_FILE
MOST_PROBABLE(CODE_JSON_FILE)
JUST -> OUTPUT STYLE JSON CODE
APPLY DEPH
LOAD PREDICT_FUTURES(DEPH)
"""
def search(book_num,prompt):
els_space = torah.gematria_sum(prompt)
if els_space==0:
els_space=torah.gematria(prompt)
res=[]
for bok in booklist:
response_els, tvalue = torah.els(bok, els_space, tracert='false')
text_translate = torah.func_translate('iw', 'en', "".join(response_els))
res.append({"Book":bok,"Prompt gematria":els_space,"ELS Generated":response_els,"ELS Translated": text_translate})
df = pd.DataFrame(res)
return df
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
global fastmem
fastmem = ME.longToShortFast(message)
system_message="GOAL SYNOPSYS: "+systemmsg+" \n\n\n FOUND IN LOCAL LIBRARY: "+json.dumps(fastmem.memory)[0:5000]+". Soy NwoBot. Mi nombre es NwoBot. I'm NewBot. My name is NewBot. Mi nombre es NewBot "
messages = [{"role": "system", "content": systemmsg}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=612,
stream=True,
temperature=0.7,
top_p=0.95,
):
token = message.choices[0].delta.content
response += token
yield response
def load_mem(message):
global fastmem
fastmem = ME.longToShortFast(message)
#df = pd.DataFrame(fastmem.memory)
return fastmem.memory
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
with gr.Blocks(title="NWO BOT") as app:
gr.Dropdown(
["Spain Journals", "Usa journals", "England journals","Technology","Pleyades Library","Religion","Talmud","Torah","Arab","Greek","Egypt","Sumeria"], value=["Spain Journals", "Usa journals", "England journals","Technology","Pleyades Library","Religion","Talmud","Torah","Arab","Greek","Egypt","Sumeria"], multiselect=True, label="Source Databases", info="Selecting Tag sources Holmesbot AI uses that to generate news, with priority of Google Trends and X trending topics"
)
with gr.Tab("Search"):
with gr.Row():
txt_search = gr.Textbox(value="Rothschild",label="Search Term",scale=5)
btn_search = gr.Button("Search",scale=1)
with gr.Row():
#search_results = gr.Dataframe(type="pandas")
mem_results = gr.JSON(label="Results")
btn_search.click(
load_mem,
inputs=[txt_search],
outputs=mem_results
)
#with gr.Row():
# big_block = gr.HTML("""
# <iframe style="scroll-padding-left: 50%; relative;background-color: #fff; height: 75vh; width: 100%; overflow-y: hidden; overflow-x: hidden;" src="https://holmesbot.com/api/shared?id=16657e456d9514"></iframe>
# """)
with gr.Tab("Image"):
gr.load("models/stabilityai/stable-diffusion-xl-base-1.0")
with gr.Tab("Chat"):
gr.ChatInterface(
respond,
)
if __name__ == "__main__":
app.launch() |