Skip to content
Snippets Groups Projects
inference.py 2.17 KiB
Newer Older
from unsloth import FastLanguageModel
max_seq_length = 32768 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.

model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "../fine-tuning/models/finetuned_model_with_three_epochs_eval",
    #model_name = "unsloth/Qwen2.5-Coder-7B-Instruct",
    max_seq_length = max_seq_length,
    dtype = dtype,
    load_in_4bit = load_in_4bit,
)
FastLanguageModel.for_inference(model) # Enable native 2x faster inference

code = """
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using ConsignmentCompanyProject.com.app.dataobjects; using ConsignmentCompanyProject.com.app.model; namespace ConsignmentCompanyProject.com.app.business { class CustomerMainWindowHandler { OrderDBProcessHandler customerOrderDataHandler = new OrderDBProcessHandler(); public List<OrderProperties> getOrders(string vendorId,string orderStatus) { if (orderStatus==null) { return customerOrderDataHandler.getMultipleOrdersInfo(vendorId, null); } else { return customerOrderDataHandler.getMultipleOrdersInfo(vendorId, orderStatus); } } public bool cancelExistingOrder(OrderProperties cancelOrderProperties) { return customerOrderDataHandler.cancelOrder(cancelOrderProperties); } } }
Refine the C# code enclosed within tags [C#] and [/C#]. 
Provide the refined code enclosed within tags [refined_C#] and [/refined_C#] and summary of changes enclosed within tags [code_changes] and [/code_changes].
[C#]
{code}
[/C#]
'''
messages = [
    {"role": "user", "content": content},
]
inputs = tokenizer.apply_chat_template(
    messages,
    tokenize = True,
    add_generation_prompt = True, # Must add for generation
    return_tensors = "pt",
).to("cuda")

from transformers import TextStreamer
text_streamer = TextStreamer(tokenizer, skip_prompt = True)
_ = model.generate(input_ids = inputs, 
                   streamer = text_streamer, 
                   max_new_tokens = 10000,
                   temperature = 0.2
                   )