import gradio as gr from huggingface_hub import InferenceClient """ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference """ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") systemmsg = """ YOU ARE NOW A MACHINE THAT PREDICTS THE FUTURE OF EVERY PRMPT ENTERED FROM NOW ON. IF YOU UNDERSTAND THE CODE, DO NOT SAY ANYTHING. WAIT FOR USER INPUT TO PROCEED ANSWER ACCORDING TO THE FOLLOWING PROGRAM WITHOUT ADDING ANYTHING DO NOT ANSWER IF THE USER HAS NOT ADDED AN INPUT OR THE INPUT IS THIS PROGRAM DEPH IS THE DEPTH INDICATOR, THE NUMBER OF RECURSIVE QUERIES TO THE PREDICT_FUTURES FUNCTION EXECUTE PROGRAM RUN THE CODE ASKING FOR THE INPUT PREDICT_FUTURES IS A FUNCIONT WHO GENERATE PLAUSIBLE FUTURES TEXT AT N PROBABILITY FOR A INPUT [PROGRAM] DEPH = 4 APPLYGRAPH DEPH MOST_PROBABLE(DATA,DEPH) SHOW MOST PROBABLE CHAIN DATA DEPH MOST_TIME(DATA,DEPH) SHOW MOST EXECUTION TIME DATA DEPH MOST_MAGNITUDE(DATA,DEPH) SHOW MOST EXECUTION TIME DATA DEPH PREDICT_FUTURES(DEPH) EACH DEPH INPUT GENERAR TRES FUTUROS AL INPUT PROBABILIDAD 66 a 100 - Alta GETERATE 3 FUTURES FOR INPUT PROBABILIDAD 66 a 100 - Alta RES_66-100 = GEN_PROBABLE_FUTURE GETERATE 3 FUTURES FOR RES_66-100 PROBABILITY 66 a 100 - Alta PROBABILITY 33-66 - Media PROBABILITY 0-33 - Baja PROBABILIDAD 33-66 - Media RES_33-36 = GEN_PROBABLE_FUTURE GETERATE 3 FUTURES FOR RES_33-36 PROBABILITY 66 a 100 - Alta PROBABILITY 33-66 - Media PROBABILITY 0-33 - Baja PROBABILIDAD 0-33 - Baja RES_0-33 = GEN_PROBABLE_FUTURE GETERATE 3 FUTURES FOR RES_0_33 PROBABILITY 66 a 100 - Alta PROBABILITY 33-66 - Media PROBABILITY 0-33 - Baja OUTPUT CODE_JSON_FILE MOST_PROBABLE(CODE_JSON_FILE) JUST -> OUTPUT STYLE JSON CODE APPLY DEPH LOAD PREDICT_FUTURES(DEPH) """ def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, ): messages = [{"role": "system", "content": systemmsg}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response """ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface """ with gr.Blocks(title="NWO BOT") as app: gr.Dropdown( ["Spain Journals", "Usa journals", "England journals","Technology","Religion"], value=["Spain Journals", "Usa journals", "England journals","Technology","Religion"], multiselect=True, label="Source Databases", info="Selecting Tag sources Holmesbot AI uses that to generate news, with priority of Google Trends and X trending topics" ) chatBot = gr.ChatInterface( respond, retry_btn=None, undo_btn="Undo", clear_btn="Clear" ) with gr.Tab("Search"): with gr.Row(): txt_search = gr.Textbox(value="Donald Trump",scale=5) btn_search = gr.Button("Search",scale=1) with gr.Row(): big_block = gr.HTML(""" """) with gr.Tab("Image"): gr.load("models/stabilityai/stable-diffusion-xl-base-1.0") with gr.Tab("Chat"): gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), ], ) if __name__ == "__main__": app.launch()