from unsloth import FastLanguageModel import torch import gradio as gr max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally! dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. alpaca_prompt = """Berikut adalah instruksi yang deskripsikan tugas dan sepasang input dan konteksnya. Tulis response sesuai dengan permintaan. ### Instruction: {} ### Input: {} ### Response: {}""" if True: from unsloth import FastLanguageModel model, tokenizer = FastLanguageModel.from_pretrained( model_name = "abdfajar707/llama3_8B_lora_model_rkp_pn2025_v3", # YOUR MODEL YOU USED FOR TRAINING max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit, ) FastLanguageModel.for_inference(model) # Enable native 2x faster inference # Fungsi untuk menghasilkan respons def generate_response(prompt, max_length=1000): inputs = tokenizer( [ alpaca_prompt.format( prompt, # instruction "", # input "", # output - leave this blank for generation! ) ], return_tensors = "pt").to("cuda") outputs = model.generate(**inputs, max_length=max_length, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response history = [] def wrapper_chat_history(chat_history, history): chat_history = history[1:] return chat_history def converse(message, chat_history): response = generate_response(message) print(response) user_msg = {"role": "user", "content": message} history.append(user_msg) ai_msg = {"role": "assistant", "content": response} history.append(ai_msg) return history[-1]["content"] DESCRIPTION = '''

AI-Interlinked System/Bappenas GPT

''' LICENSE = """

--- Dibangun dari Meta Llama 3 """ PLACEHOLDER = """

Asisten Virtual Perencana

Silakan mulai tanya...

""" css = """ h1 { text-align: center; display: block; } #duplicate-button { margin: auto; color: white; background: #1565c0; border-radius: 100vh; } """ chatbot=gr.Chatbot(height=600, placeholder=PLACEHOLDER, label='Interlinked Sytem ChatInterface') with gr.Blocks(css=css) as interface: chatbot=chatbot, with gr.Row(): with gr.Column(scale=1): gr.HTML('Image') with gr.Row(): with gr.Column(scale=1, elem_id='col'): gr.ChatInterface(fn=converse, title=("""

KemenPPN/Bappenas

AI-Interlinked System/Bappenas GPT
""" )) interface.launch()