|
import streamlit as st |
|
from datasets import load_dataset |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
dataset = load_dataset("diylocals/TestData") |
|
|
|
|
|
model_name = "ibm-granite/granite-3.0-8b-instruct" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
st.title("IBM Granite Model Analysis") |
|
|
|
|
|
user_input = st.text_area("Enter text for analysis (e.g., voltage readings):", "") |
|
|
|
if st.button("Analyze"): |
|
if user_input: |
|
|
|
inputs = tokenizer(user_input, return_tensors="pt") |
|
|
|
|
|
outputs = model.generate(**inputs) |
|
|
|
|
|
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
st.write("Model Output:") |
|
st.write(output_text) |
|
else: |
|
st.warning("Please enter some text for analysis.") |