Spaces:
Running
Running
File size: 1,696 Bytes
ab02957 71a012f ab02957 e1500b4 f3e7ebc e1500b4 f3e7ebc ca624af 71a012f ab02957 71a012f ab02957 71a012f ca624af c7598d9 9fc0643 ab02957 836f4b5 ab02957 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import gradio as gr
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import keras_nlp
css = """
body {
background-image: url('https://stsci-opo.org/STScI-01J6D97YGSQACWK990TH56K6AF.png');
background-size: cover;
background-repeat: no-repeat;
background-position: center;
}
body::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-image: inherit;
background-size: inherit;
background-repeat: inherit;
background-position: inherit;
opacity: 0.1; /* Adjust the opacity of the background image */
z-index: -1; /* Make sure it doesn't cover the text */
}
"""
gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://sultan-hassan/CosmoGemma_2b_en")
def launch(input):
template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
prompt = template.format(
instruction=input,
response="",
)
out = gemma_lm.generate(prompt, max_length=256)
ind = out.index('Response') + len('Response')+2
return out[ind:]
iface = gr.Interface(launch,
inputs="text",
outputs="text",
css=css,
title="Hello, I am an expert in cosmology, try me!",
description="Gemma_2b_en fine-tuned on QA pairs (~3.5k) generated from Cosmology and Nongalactic Astrophysics articles (arXiv astro-ph.CO) from 2018-2022 and tested on QA pairs (~1k) generated from 2023 articles, scoring over 75% accuracy.")
iface.launch()
|