Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • atharvavjadhav21/codebud
1 result
Show changes
Commits on Source (8)
Showing
with 1035 additions and 0 deletions
data_generation/logs
fine-tuning/logs
fine-tuning/models
fine-tuning/outputs
fine-tuning/wandb
data_standardization/logs
inference/logs
inference/logs_old
inference/unsloth_compiled_cache
evaluation/logs
evaluation/__pycache__/prometheus_eval.cpython-311.pyc
import re
from datasets import load_dataset, IterableDataset, concatenate_datasets
import torch
from transformers import AutoTokenizer
import transformers
import datetime
def data_generator(dataset):
for row in dataset:
yield row
start_model_loading = datetime.datetime.now()
model = "meta-llama/Llama-3.1-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model)
pipeline = transformers.pipeline("text-generation",
model=model, model_kwargs={"torch_dtype": torch.bfloat16}, device_map = "auto")
end_model_loading = datetime.datetime.now()
print(f'Model: {model} loaded successfully in {(end_model_loading-start_model_loading).total_seconds()} seconds.')
original_dataset = load_dataset("codeparrot/github-code", streaming=True, split="train", licenses=["mit", "isc"], languages = ['C#'], filter_languages=True, trust_remote_code=True)
BATCH_SIZE = 100
instance_number = 0
batch_dataset = []
is_dataset_created = False
ss=0
for example in original_dataset:
prompt = f"""
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are a C# expert.
Your task is to refine the C# code enclosed within tags [C#] and [/C#].
Refined code should be enclosed with tags [refined_C#] and [/refined_C#].
Summary of changes should be enclosed with [code_changes] and [/code_changes].
You do not do anything more than user asks you do it.
You do not generate any additional text.
<|eot_id|><|start_header_id|>user<|end_header_id|>
You have the refine the code based on principles:
1. Class name: Refine C# class name by making sure:
a. It is PascalCase.
b. It is logical. By logical, I mean, it should be a noun and it should denote what it does.
2. Property name: Refine the properties in the class by making sure:
a. Private properties have _ before their names and public properties are PascalCase.
b. It is logical. By logical, I mean, there should be no unnecessary repetition. For example property name as 'empId' in class Employee is not clean as 'emp' is unnecessary. This is because the class name already sets the context. The correct name would be 'id'. But make sure that the property name makes sense.
3. Object name: Refine the objects instantiated by making sure:
a. It is logical. By logical I mean, the names should be expressive. For example 'Employee e' is not as expressive 'Employee employee'.
4. Method name: Refine methods in the class by making sure:
a. It is logical. By logical, I mean the name of method should express what it does.
b. It is PascalCase.
[C#]
{example["code"]}
[/C#]<|eot_id|><|start_header_id|>assistant<|end_header_id|>[refined_C#]
"""
no_of_retries = 0
should_try = True
while should_try:
sequences = pipeline(prompt,temperature=0.2,
top_p=0.9,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
max_length=5000,
repetition_penalty=1.1)
output = ""
for seq in sequences:
output += seq["generated_text"]
code_pattern = r'\[refined_C#\](.*?)\[/refined_C#\]'
code_matches = re.findall(code_pattern, output, re.DOTALL)
is_code_okay = False
if len(code_matches) == 2:
refined_code = code_matches[1]
is_code_okay = True
is_summary_okay = False
summary_pattern = r'\[code_changes\](.*?)\[/code_changes\]'
summary_matches = re.findall(summary_pattern, output, re.DOTALL)
if len(summary_matches) == 2:
summary = summary_matches[1]
is_summary_okay = True
if (is_code_okay and is_summary_okay):
batch_dataset.append({'code': example["code"], 'refined code': refined_code, 'summary': summary})
instance_number += 1
no_of_retries += 1
if (is_code_okay and is_summary_okay) or no_of_retries == 5:
should_try = False
if instance_number == BATCH_SIZE:
if not is_dataset_created:
new_dataset = IterableDataset.from_generator(data_generator, gen_kwargs={"dataset": batch_dataset})
new_dataset.push_to_hub('llama-3-1-refined-code')
is_dataset_created = True
print('Pushed data for first time')
else:
refined_code_dataset = load_dataset('llama-3-1-refined-code', streaming = True)
new_dataset = concatenate_datasets(refined_code_dataset, IterableDataset.from_generator(data_generator, gen_kwargs={"dataset": batch_dataset}))
new_dataset.push_to_hub('llama-3-1-refined-code')
print('Pushed data again')
instance_number = 0
batch_dataset = []
ss += 1
if ss == 3:
break
\ No newline at end of file
import re
from datasets import load_dataset, Dataset, concatenate_datasets
from transformers import AutoTokenizer, AutoModelForCausalLM
import datetime
def data_generator(dataset):
for row in dataset:
yield row
SKIP_INDEX = 2000
print(f'Starting skipping of dataset at {datetime.datetime.now()}', flush=True)
original_dataset = load_dataset("codeparrot/github-code", streaming=True, split="train", licenses=["mit", "isc"], languages = ['C#'], filter_languages=True, trust_remote_code=True)
original_dataset = original_dataset.skip(SKIP_INDEX)
print(f'Skipped the dataset for {SKIP_INDEX} samples at {datetime.datetime.now()}', flush=True)
start_model_loading = datetime.datetime.now()
model_name = "Qwen/Qwen2.5-Coder-32B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
end_model_loading = datetime.datetime.now()
print(f'Model: {model_name} loaded successfully in {(end_model_loading-start_model_loading).total_seconds()} seconds. Current Time: {datetime.datetime.now()}', flush=True)
BATCH_SIZE = 100
TOTAL_SAMPLES = 4500
NO_OF_BATCHES = TOTAL_SAMPLES // BATCH_SIZE
instance_number = 0
batch_number = 0
batch_dataset = []
is_dataset_created = False
DATASET_NAME = f'qwen-refined-code-{SKIP_INDEX}'
DATASET_NAME_WITH_USERNAME = f'atharva2721/{DATASET_NAME}'
MAX_MODEL_TOKENS = 32768
system_prompt = f"""
You are Qwen, created by Alibaba Cloud. You are a C# expert.
Your task is to refine the C# code enclosed within tags [C#] and [/C#].
Refined code should be enclosed with tags [refined_C#] and [/refined_C#]. It should only contain executable code and no additional text.
Summary of changes should be enclosed with [code_changes] and [/code_changes].
You do not do anything more than user asks you do it.
You do not generate any additional text.
"""
for example in original_dataset:
user_prompt = f"""
Refine C# Code Based on Clean Code and Design Principles.
The goal of this task is to improve the quality, readability, and maintainability of the provided C# code. Apply the following principles step by step, ensuring that the resulting code is clean, modular, and adheres to object-oriented design best practices.
You have been provided with principles and some examples wrapped inside [example_code] and [/example_code]to understand their meanings. Understand them to refine the code.
1. Class Naming
a. Use PascalCase for all class names (e.g., Invoice, Employee).
b. Ensure class names are logical and represent a clear purpose:
- A class name should be a noun that describes its role or entity.
- Avoid vague or overly generic names like ManagerClass. Instead, use meaningful names like InvoiceManager or PayrollCalculator.
- Ensure the name reflects what the class does or represents.
Example:
Before:
[example_code]
public class ManagerClass
{{
public void ManageInvoice()
{{
Console.WriteLine("Managing invoices..");
}}
}}
[/example_code]
After:
[example_code]
public class InvoiceManager
{{
public void Manage()
{{
Console.WriteLine("Managing invoices...");
}}
}}
[/example_code]
2. Property Naming
a. Public Properties:
- Use PascalCase (e.g., FirstName, Salary).
- Ensure names are concise yet descriptive. Avoid redundant prefixes that repeat the class context.
- Example: In a class Employee, name a property Id, not EmpId, since the class name already provides context.
b. Private Fields:
- Use a leading underscore `_` followed by camelCase (e.g., _firstName, _salary).
c. Logical Naming:
- Ensure property names clearly describe what they hold.
- Avoid abbreviations unless widely understood (e.g., use DateOfBirth, not DOB).
Example:
Before:
[example_code]
public class Employee
{{
public string empName {{ get; set; }}
private int emp_age;
}}
[/example_code]
After:
[example_code]
public class Employee
{{
public string Name {{get; set; }}
private int _age;
public void SetAge(int age)
{{
_age = age;
}}
public int GetAge()
{{
return _age;
}}
}}
[/example_code]
3. Object Naming
a. Use expressive names for instantiated objects to make the code self-explanatory:
- Example: Employee employee is more meaningful than Employee e.
b. Ensure names reflect their role or purpose in the code:
- Example: If an object calculates totals, name it totalCalculator instead of calcObj.
Example:
Before:
[example_code]
Employee e = new Employee();
e.Name = "John Doe";
[/example_code]
After:
[example_code]
Employee employee = new Employee();
employee.Name = "John Doe";
[/example_code]
4. Method Naming
a. Use PascalCase for all method names (e.g., CalculateSalary, GetEmployeeDetails).
b. Ensure method names describe what the method does:
- Example: Use GenerateReport instead of GenReport or Process.
c. Avoid vague or overly generic names. Each method name should immediately convey its functionality.
Example:
Before:
[example_code]
public void GenReport()
{{
Console.WriteLine("Report Generated.");
}}
[/example_code]
After:
[example_code]
public void GenerateMonthlyReport()
{{
Console.WriteLine("Monthly report has been generated.");
}}
[/example_code]
5. Method Modularity
a. Ensure that methods follow the Single Responsibility Principle:
- Break down methods that perform multiple tasks into smaller, logically focused methods.
- Example: A method ProcessPayroll that calculates totals, generates reports, and updates the database should be split into:
1. CalculatePayrollTotals
2. GeneratePayrollReport
3. UpdatePayrollDatabase
- Use meaningful names for all extracted methods to reflect their specific task.
b. Consolidate repeated logic into reusable utility methods or helper functions.
Example:
Before:
[example_code]
public void ProcessPayroll()
{{
Calculate();
Generate();
Update();
}}
[/example_code]
After:
[example_code]
public void ProcessPayroll()
{{
CalculatePayrollTotals();
GeneratePayrollReport();
UpdatePayrollDatabase();
}}
private void CalculatePayrollTotals()
{{
Console.WriteLine("Calculating payroll totals...");
}}
private void GeneratePayrollReport()
{{
Console.WriteLine("Generating payroll report...");
}}
private void UpdatePayrollDatabase()
{{
Console.WriteLine("Updating payroll database...");
}}
[/example_code]
6. Single Responsibility Principle (SRP)
a. Ensure each class is responsible for only one distinct task or purpose:
- Example: If a class Invoice has methods for printing, calculating, and saving, split it into:
- Invoice (business logic, such as calculating totals).
- InvoicePrinter (handles formatting and output).
- InvoiceRepository (handles database operations).
b. Make classes precise and cohesive:
- If a class like Employee has methods like ApproveTimeOff, consider whether subclasses such as Manager or Intern would better represent specialized roles.
- Use inheritance to maintain logical separation of behavior.
Example:
Before:
[example_code]
public class Employee
{{
public void ApplyForVacation(){{/* ... */}}
public void ApproveTimeOff() {{ /* ... */ }}
}}
public static void Main(string[] args){{
Employee intern = new Employee();
Employee manager = new Manager();
intern.ApplyForVacation();
manager.ApproveTimeOff();
}}
[/example_code]
After:
[example_code]
public class Employee
{{
public void ApplyForVacation(){{/* ... */}}
}}
public class Intern: Emplyee
{{
}}
public class Manager: Employee
{{
public void ApproveTimeOff() {{ /* ... */ }}
}}
public static void Main(string[] args)
{{
Intern intern = new Employee();
Manager manager = new Manager();
intern.ApplyForVacation();
manager.ApproveTimeOff();
}}
[/example_code]
7. Code Clean-Up
a. Remove unused imports, variables, and comments to reduce clutter and improve readability.
b. Ensure the code is free of dead or redundant logic.
Example:
Before:
[example_code]
using System;
using System.Collections.Generic;
// Unused import
using System.Linq;
public class Employee
{{
public string Name {{ get; set; }}
// Commented-out code
// public int Age {{ get; set; }}
}}
[/example_code]
After:
[example_code]
using System;
public class Employee
{{
public string Name {{ get; set; }}
}}
[/example_code]
End Goal
The refined code should:
1. Adhere to C# naming conventions and clean code principles.
2. Be modular and easy to maintain, with a clear separation of concerns.
3. Follow the Single Responsibility Principle, ensuring each class and method has a well-defined purpose.
4. Be expressive, making it easy for any developer to understand the code's intent at a glance.
[C#]
{example["code"]}
[/C#]
"""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
no_of_input_tokens = model_inputs.input_ids.shape[1]
max_output_tokens = MAX_MODEL_TOKENS - no_of_input_tokens
no_of_retries = 0
should_try = True
if max_output_tokens < no_of_input_tokens:
print(f'Number of input tokens is very large [{no_of_input_tokens}]. Skippping this code refinement for {example["path"]}', flush=True)
should_try = False
while should_try:
try:
generated_ids = model.generate(
**model_inputs,
max_new_tokens=max_output_tokens
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
code_pattern = r'\[refined_C#\](.*?)\[/refined_C#\]'
summary_pattern = r'\[code_changes\](.*?)\[/code_changes\]'
code_matches = re.search(code_pattern, output, re.DOTALL)
summary_matches = re.search(summary_pattern, output, re.DOTALL)
is_generation_okay = False
if code_matches and summary_matches:
refined_code = code_matches.group(1)
summary = summary_matches.group(1)
batch_dataset.append({'code': example["code"], 'refined code': refined_code, 'summary': summary})
instance_number += 1
should_try = False
no_of_retries += 1
if no_of_retries == 3:
print(f'Could not clean the code. The final try output is{output}', flush=True)
should_try = False
except Exception as error:
print(f'Error is: {error}', flush=True)
if instance_number == BATCH_SIZE:
try:
if not is_dataset_created:
new_dataset = Dataset.from_generator(data_generator, gen_kwargs={"dataset": batch_dataset})
new_dataset.push_to_hub(DATASET_NAME)
is_dataset_created = True
else:
refined_code_dataset = load_dataset(DATASET_NAME_WITH_USERNAME, split="train", keep_in_memory=False)
new_dataset = concatenate_datasets([refined_code_dataset, Dataset.from_generator(data_generator, gen_kwargs={"dataset": batch_dataset})])
new_dataset.push_to_hub(DATASET_NAME)
except Exception as push_error:
print(f'Error during push to hub: {push_error}', flush=True)
instance_number = 0
batch_dataset = []
batch_number += 1
print(f'Pushed batch number {batch_number} to hub. Current Time {datetime.datetime.now()}', flush=True)
if batch_number == NO_OF_BATCHES:
break
print('Dataset generation completed.')
\ No newline at end of file
#!/usr/bin/zsh
### Add basic configuration for job
#SBATCH --job-name=dataset_generation
#SBATCH --output=logs/dataset_generation_%j.log
#SBATCH --error=logs/dataset_generation_error_%j.log
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=6
#SBATCH --gres=gpu:1
#SBATCH --time=20:00:00
###------------------------------------------------------------------------------------------------------------------------------
### Run the project in work directory of the cluster (configure based on need!!
### RWTH File System : https://help.itc.rwth-aachen.de/en/service/rhr4fjjutttf/article/da307ec2c60940b29bd42ac483fc3ea7/
cd $HPCWORK
cd codebud/data_generation
###------------------------------------------------------------------------------------------------------------------------------
### JOB SCRIPT RUN
module load GCCcore/.13.2.0
module load Python/3.11.5
module load CUDA
source ../../venvs/codebud/bin/activate
echo $VIRTUAL_ENV
python --version
python main.py
module unload CUDA
module unload Python/3.11.5
deactivate
echo "Script ran successfully"
\ No newline at end of file
#!/usr/bin/zsh
### Add basic configuration for job
#SBATCH --job-name=dataset_generation
#SBATCH --output=logs/dataset_generation_%j.log
#SBATCH --error=logs/dataset_generation_error_%j.log
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH --gres=gpu:1
#SBATCH --time=2-15:00:00
###------------------------------------------------------------------------------------------------------------------------------
### Run the project in work directory of the cluster (configure based on need!!
### RWTH File System : https://help.itc.rwth-aachen.de/en/service/rhr4fjjutttf/article/da307ec2c60940b29bd42ac483fc3ea7/
cd $HPCWORK
cd codebud/data_generation
###------------------------------------------------------------------------------------------------------------------------------
### JOB SCRIPT RUN
module load GCCcore/.13.2.0
module load Python/3.11.5
module load CUDA
source ../../venvs/codebud/bin/activate
echo $VIRTUAL_ENV
python --version
python main_qwen.py
module unload CUDA
module unload Python/3.11.5
deactivate
echo "Script ran successfully"
\ No newline at end of file
from datasets import load_dataset
from unsloth import FastLanguageModel
def formatting_prompts_func(examples):
convos = examples["conversations"]
texts = [tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = False) for convo in convos]
return { "text" : texts, }
pass
def format_to_conversations(examples):
conversations = []
codes = examples["code"]
refined_codes = examples["refined code"]
summaries = examples["summary"]
for i in range(len(refined_codes)):
user_content = f'''Refine the C# code enclosed within tags [C#] and [/C#].
[C#]
{codes[i]}
[/C#]
'''
assistant_content = f'''
[refined_C#]
{refined_codes[i]}
[/refined_C#]
[code_changes]
{summaries[i]}
[/code_changes]
'''
conversation = []
user_dict = {'content': user_content, 'role': 'user'}
assistant_dict = {'content': assistant_content, 'role': 'assistant'}
conversation.append(user_dict)
conversation.append(assistant_dict)
conversations.append(conversation)
return { "conversations" : conversations }
pass
max_seq_length = 32768 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Qwen2.5-Coder-7B-Instruct",
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
)
dataset = load_dataset("atharva2721/refined-test-aggregated", split = "train")
dataset = dataset.map(format_to_conversations, batched = True,)
dataset = dataset.map(formatting_prompts_func, batched = True,)
dataset.push_to_hub('standardized-refined-test-aggregated')
print('Dataset pushed to hub')
\ No newline at end of file
from datasets import load_dataset
from unsloth import FastLanguageModel
from unsloth.chat_templates import get_chat_template
def formatting_prompts_func(examples):
convos = examples["conversations"]
texts = [tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = False) for convo in convos]
return { "text" : texts, }
pass
def format_to_conversations(examples):
conversations = []
codes = examples["code"]
refined_codes = examples["refined code"]
summaries = examples["summary"]
for i in range(len(refined_codes)):
user_content = f'''Refine the C# code enclosed within tags [C#] and [/C#].
[C#]
{codes[i]}
[/C#]
'''
assistant_content = f'''
[refined_C#]
{refined_codes[i]}
[/refined_C#]
[code_changes]
{summaries[i]}
[/code_changes]
'''
conversation = []
user_dict = {'content': user_content, 'role': 'user'}
assistant_dict = {'content': assistant_content, 'role': 'assistant'}
conversation.append(user_dict)
conversation.append(assistant_dict)
conversations.append(conversation)
return { "conversations" : conversations }
pass
max_seq_length = 32768 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Meta-Llama-3.1-8B-Instruct",
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
dataset = load_dataset("atharva2721/refined-train-aggregated", split = "train")
dataset = dataset.map(format_to_conversations, batched = True,)
dataset = dataset.map(formatting_prompts_func, batched = True,)
dataset.push_to_hub('llama-standardized-refined-test-aggregated')
print('Dataset pushed to hub')
\ No newline at end of file
#!/usr/bin/zsh
### Add basic configuration for job
#SBATCH --job-name=dataset_standardization
#SBATCH --output=logs/dataset_standardization_%j.log
#SBATCH --error=logs/dataset_standardization_error_%j.log
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=5
#SBATCH --gres=gpu:1
#SBATCH --time=00:30:00
###------------------------------------------------------------------------------------------------------------------------------
### Run the project in work directory of the cluster (configure based on need!!
### RWTH File System : https://help.itc.rwth-aachen.de/en/service/rhr4fjjutttf/article/da307ec2c60940b29bd42ac483fc3ea7/
cd $HPCWORK
cd codebud/data_standardization
###------------------------------------------------------------------------------------------------------------------------------
### JOB SCRIPT RUN
module load GCCcore/.13.2.0
module load Python/3.11.5
module load CUDA
source ../../venvs/codebud/bin/activate
echo $VIRTUAL_ENV
python --version
python llama_dataset_standardization.py
module unload CUDA
module unload Python/3.11.5
deactivate
echo "Script ran successfully"
\ No newline at end of file
File added
File added
File added
File added
File added
# Absolute Grading: Outputs score of 1 to 5
import csv
from datasets import load_dataset
from prometheus_eval.vllm import VLLM
from prometheus_eval import PrometheusEval
from prometheus_eval.prompts import ABSOLUTE_PROMPT, SCORE_RUBRIC_TEMPLATE
model = VLLM(model="prometheus-eval/prometheus-7b-v2.0")
judge = PrometheusEval(model=model, absolute_grade_template=ABSOLUTE_PROMPT)
reference_dataset = load_dataset("atharva2721/llama_inference_output_complete", split="train", trust_remote_code=True)
for eval_no in range(10):
with open(f'eval_reports/llama-finetuned-responses-evaluation-pass-{eval_no+1}.csv', 'w') as f:
fieldnames = ['original code', 'model inference', 'reference inference', 'evaluation feedback', 'score']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for example in reference_dataset:
instruction = "A C# developer is struggling to develop high quality C# code. The developer explains a high-quality C# program as a program that should adhere to proper naming conventions, maintain modularity for methods, and ensure that classes follow the Single Responsibility Principle. Additionally, the code should be clean, free from unnecessary comments, imports, and unused code.Classes and methods should be named using PascalCase and should have logical, meaningful names that clearly indicate their purpose. Similarly, object names should be intuitive and relevant to their function. Public properties should follow PascalCase naming, while private properties should be prefixed with an underscore (_) for clarity and distinction. Each method should be designed to perform a single logical task, ensuring better maintainability and readability. Code duplication should be avoided by utilizing reusable methods instead of repeating logic. Likewise, classes should encapsulate only related data and functions, focusing on a single responsibility. If a method within a class performs an unrelated function, it violates the Single Responsibility Principle. To maintain code cleanliness, unnecessary comments, unused imports, and redundant code should be removed. The codebase should remain efficient and streamlined by eliminating any unused variables or functions. Following these principles ensures that the code is well-structured, readable, maintainable, and efficient. It is also important that these principles should be applied only at applicable places in the code. They ask to a model generate the high-quality code."
response = f"{example['finetuned inference']}",
reference_answer = f"{example['reference inference']}",
rubric_data = {
"criteria":"Does the model generate high quality C# code?",
"score1_description":"The model neglects to incorporate the mentioned coding principles in the generated code. The code rather violates the coding principles mentioned above.",
"score2_description":"The model intermittently understands the coding principles but fails to apply it on the program it generates.",
"score3_description":"The model typically understands the high code quality standards and attempts to apply it. However, it might miss to apply some of the priciples.",
"score4_description":"The model consistently understands the stated coding principles but still sporadically misses them while generating the C# code. ",
"score5_description":"The model excels in understanding the coding principles and persistently applying it over the code it generates. The final code consists of all applicable principles wherever necessary."
}
score_rubric = SCORE_RUBRIC_TEMPLATE.format(**rubric_data)
feedback, score = judge.single_absolute_grade(
instruction=instruction,
response=response,
rubric=score_rubric,
reference_answer=reference_answer
)
print(f'Score: {score}')
writer.writerow({'original code': example['code'], 'model inference': example['finetuned inference'], 'reference inference':example['reference inference'], 'evaluation feedback':feedback, 'score':score})
print(f'Evaluation {eval_no+1} done.')
# Relative Grading: Outputs A or B
from datasets import load_dataset
from prometheus_eval.vllm import VLLM
from prometheus_eval import PrometheusEval
from prometheus_eval.prompts import RELATIVE_PROMPT
model = VLLM(model="prometheus-eval/prometheus-7b-v2.0")
judge = PrometheusEval(model=model, relative_grade_template=RELATIVE_PROMPT)
reference_dataset = load_dataset("atharva2721/qwen_inference_output_complete", split="train", trust_remote_code=True)
for example in reference_dataset:
data = {
"instruction": "A couple of senior C# developers are arguing about quality of a C# program. A high-quality C# program should adhere to proper naming conventions, maintain modularity, and ensure that classes follow the Single Responsibility Principle. Additionally, the code should be clean, free from unnecessary comments, imports, and unused code. Classes and methods should be named using PascalCase and should have logical, meaningful names that clearly indicate their purpose. Similarly, object names should be intuitive and relevant to their function. Public properties should follow PascalCase naming, while private properties should be prefixed with an underscore (_) for clarity and distinction. Each method should be designed to perform a single logical task, ensuring better maintainability and readability. Code duplication should be avoided by utilizing reusable methods instead of repeating logic. Likewise, classes should encapsulate only related data and functions, focusing on a single responsibility. If a method within a class performs an unrelated function, it violates the Single Responsibility Principle. To maintain code cleanliness, unnecessary comments, unused imports, and redundant code should be removed. The codebase should remain efficient and streamlined by eliminating any unused variables or functions. Following these principles ensures that the code is well-structured, readable, maintainable, and efficient. How should the C# developer arguing about quality of choose the better code?",
"response_A": f"{example['finetuned inference']}",
"response_B": f"{example['base inference']}",
"reference_answer": f"{example['reference inference']}",
"rubric": "Is the developed C# program of high quality?"
}
feedback, score = judge.single_relative_grade(**data)
print("Feedback:", feedback)
print("Score:", score)
# Output
# Feedback: Both Response A and Response B correctly identify economic troubles and overreliance on slave labor as significant contributing factors to the fall of the Roman Empire. However, Response B is more effective in presenting the historian's argument due to its inclusion of scholarly sources to back up its claims. Specifically, it references works by Harper, Scheidel, and Temin, which adds credibility to the historian's argument and aligns well with the score rubric's emphasis on evidence and citations. While Response A provides a similar argument, it lacks any form of citations or attributions, which lessens the strength of the evidence presented. Therefore, based on the provided rubric, Response B is the superior response due to its use of scholarly evidence to support the historian's claims.
# Score: B
\ No newline at end of file
import csv
from datasets import load_dataset
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def create_dataframe(matrix, tokens):
doc_names = [f"doc_{i+1}" for i in range(len(matrix))]
df = pd.DataFrame(data=matrix, index=doc_names, columns=tokens)
return df
reference_dataset = load_dataset("atharva2721/llama_inference_output_complete", split="train", trust_remote_code=True)
code_number = 0
with open(f'eval_reports/cosine-similarity-llama-finetuned-responses-evaluation-pass.csv', 'w') as f:
fieldnames = ['code number', 'cosine similarity score (CountVectorizer)', 'cosine similarity score (TfidfVectorizer)']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for example in reference_dataset:
code_number +=1
doc_1 = f"{example['finetuned inference']}"
doc_2 = f"{example['reference inference']}"
data = [doc_1, doc_2]
count_vectorizer = CountVectorizer()
vector_matrix_count = count_vectorizer.fit_transform(data)
count_tokens = count_vectorizer.get_feature_names_out()
df_count_tokens = create_dataframe(vector_matrix_count.toarray(), count_tokens)
cosine_similarity_matrix_count = cosine_similarity(vector_matrix_count)
df_cosine_count = create_dataframe(cosine_similarity_matrix_count, ["doc_1","doc_2"])
print(f"Cosine Similarity (CountVectorizer) for code number {code_number} :: {df_cosine_count}")
#-----------------------------------------------------------------------------------------------
tfidf_vectorizer = TfidfVectorizer()
vector_matrix_tfidf = tfidf_vectorizer.fit_transform(data)
tfidf_tokens = tfidf_vectorizer.get_feature_names_out()
df_tfidf_tokens = create_dataframe(vector_matrix_tfidf.toarray(), tfidf_tokens)
cosine_similarity_matrix_tfidf = cosine_similarity(vector_matrix_tfidf)
df_cosine_tfidf = create_dataframe(cosine_similarity_matrix_tfidf, ["doc_1","doc_2"])
print(f"Cosine Similarity (TfidfVectorizer) for code number {code_number} :: {df_cosine_tfidf}")
writer.writerow({'code number': code_number, 'cosine similarity score (CountVectorizer)': df_cosine_count.loc["doc_1", "doc_2"], 'cosine similarity score (TfidfVectorizer)': df_cosine_tfidf.loc["doc_1", "doc_2"]})
print("Calculate cosine similarity")
code number,cosine similarity score (CountVectorizer),cosine similarity score (TfidfVectorizer)
1,0.6635617048365998,0.5175115774478412
2,0.4624659777674932,0.34891617720323037
3,0.48275302027392886,0.3365447884706486
4,0.2631904377971031,0.16992238879945065
5,0.7471964071392845,0.627610109269567
6,0.4319530092924749,0.3618426077304704
7,0.7468146161930583,0.6610217990595632
8,0.5272497301964962,0.38113553688994545
9,0.7817918835970099,0.6818448573992538
10,0.8698947202590355,0.7846867347988806
11,0.9574096853400555,0.9400233869493119
12,0.9707335044986306,0.9497438105627173
13,0.5930406760107875,0.4676494580754359
14,0.6161965943865132,0.48708540828517854
15,0.6639315337465327,0.5260893348304845
16,0.7791222395271432,0.6749531762482429
17,0.7953074226315868,0.7112337750835673
18,0.8299976529461417,0.7560245778382114
19,0.9823737260340384,0.9794154180740418
20,0.45217842491591503,0.3509552632035376
21,0.9125391570657638,0.8605622455591271
22,0.8360323840871062,0.7827823535542533
23,0.5901647500688745,0.46889884722309216
24,0.5943801378652676,0.466758704481132
25,0.6945265082348545,0.5670149128946722
26,0.2635508178956107,0.19112103528237545
27,0.5338577136668144,0.431153814049387
28,0.6829688998461585,0.5582812276374248
29,0.7463347491223283,0.6704908954645881
30,0.806146942142962,0.7352308286364446
31,0.6820364658025417,0.5964265397975019
32,0.7456301140290179,0.657253459507972
33,0.7616899366015265,0.6535414330250102
34,0.9596720522286895,0.9370120550250337
35,0.691962016974302,0.5877266899667245
code number,cosine similarity score (CountVectorizer),cosine similarity score (TfidfVectorizer)
1,0.9999999999999991,0.9999999999999997
2,0.6359347937451568,0.5433685643841156
3,0.8178403591911844,0.6993118470199522
4,0.5100695466415388,0.38848676215325034
5,1.0000000000000004,1.0000000000000002
6,0.9454031224825896,0.8982825026290794
7,0.9711669972861515,0.9458347391544176
8,0.8015273407421913,0.701077861299189
9,0.9213443080915618,0.9017210224593273
10,0.9826348693449453,0.9787346762841795
11,0.938690304935082,0.9016529199745136
12,0.901410335203223,0.8572330905046887
13,0.8058475314241763,0.6869497614360619
14,0.8574002187631717,0.7643391472385281
15,1.0000000000000007,1.0
16,0.8789826663110892,0.831075438865755
17,0.9154145260165719,0.8636550075200284
18,0.7264831572567789,0.6175047667368999
19,0.9528104465265231,0.9214158903600982
20,0.5520668413544675,0.3914624758911717
21,0.9534883720930234,0.9121111022894451
22,0.7647829247492465,0.6980874038556475
23,0.9933025063732357,0.9872323358593544
24,0.9999999999999998,1.0000000000000002
25,0.9744805811819828,0.9690013158466692
26,0.953125,0.9114544897450435
27,0.9090909090909088,0.8482830591874028
28,0.9946034541093071,0.9907193189663245
29,0.9543229514680195,0.9227020932364982
30,0.9468506844943757,0.918617283108967
31,0.9253319320603061,0.89851723509004
32,0.9388673189171574,0.8997300610464697
33,0.9223938808289259,0.9005727600596343
34,0.9564600005663547,0.94182527428758
35,0.9686292243461511,0.9429699330715006
code number,cosine similarity score (CountVectorizer),cosine similarity score (TfidfVectorizer)
1,0.9829590656993242,0.9669012267796804
2,0.9905181941480535,0.9881702658468443
3,0.9579932440261575,0.9360869024484828
4,0.6642038652847895,0.5672351996087892
5,0.5179533782291896,0.40291367040422665
6,0.4319312868010303,0.31829582177823007
7,0.5896482075326526,0.4382943894216728
8,0.35163633764743635,0.22466601519503562
9,0.967150520150034,0.9492452906461634
10,0.9859071344374633,0.9803953956218493
11,0.9529220779220777,0.9110880036477345
12,0.7519060529531866,0.6711803046733371
13,0.9562166326624674,0.9303368295815007
14,0.5721208860316485,0.4145838273203879
15,0.6168762782575283,0.5083145112170189
16,0.8044818277847895,0.6756519867952987
17,0.995727575352705,0.9918137796456394
18,0.8690955388663847,0.7718155973688722
19,0.8406891761549491,0.7652744089784883
20,0.8640699102191518,0.801415428263494
21,0.9833092087209556,0.9689895110588486
22,0.9515953832852092,0.9367795707002863
23,0.49169966286739647,0.38166351453176733
24,0.9635293180817429,0.9304757734604177
25,0.9461817959669848,0.9239677244327656
26,0.404623693746184,0.2906781234742853
27,0.8574719598590294,0.809708060160666
28,0.7348469228349535,0.6306777057184418
29,0.6929202119807368,0.5853719913353513
30,0.34829888285959554,0.23780907592894035
31,0.9565692372573213,0.9288467058303025
32,0.969166010214795,0.9460906606477772
33,0.9122455416873133,0.8650673921100038
34,0.9676633447888103,0.9461259532586188
35,0.6259698357701571,0.5026188038264596
36,0.9899486164622846,0.9874566875086238
37,0.8963720835433008,0.8206547886714061
38,0.828618615335137,0.7686977099905108
39,0.8960787490449202,0.8266887304757253
40,0.46483484010068415,0.3252126926673298
41,0.9424949289491302,0.9112053490715386
42,0.7729715070359924,0.6574654448019361
43,0.7008262319629648,0.5834363407606968
44,0.799168959218739,0.7918816341208937
45,0.33887715959556886,0.24967743734456235
46,0.655671427128544,0.5336658704807009
47,0.9442278164425926,0.9082977098881965
48,0.9985945179280502,0.9972293990417763
49,0.7884449104233787,0.6770542320965478
50,0.7467528773091743,0.6305537845363173
51,0.7155668192482633,0.5916404085815423
52,0.8214486599543218,0.7260953596418699
53,0.9644917517533788,0.9542247492020246
54,0.5612091839337283,0.44493742743571235
55,0.8885542168674707,0.8014403316244239
56,0.9656040244825144,0.9441698275152057
57,0.9852675084097874,0.9805833006882317
58,0.9130854212497931,0.8463744693755527
59,0.8314876315471741,0.7838899214351002
60,0.46245444833152677,0.3421745944258118
61,0.9920588282895633,0.989943710795231
62,0.8556199275317793,0.762263661274845
63,0.5120537441696312,0.372159750190583
64,0.27383278566830965,0.1987313324015373
65,0.8796894875233187,0.8670708710428576
66,0.9285255785984343,0.8830059870247231
67,0.7206804901169632,0.5972163537537634
68,0.5521610645599694,0.41810575047485454
69,0.5318442867591466,0.44176882006729185
70,0.7980378792007151,0.7028409227791071
71,0.9067452960520181,0.864664550178747
72,0.9490407233483938,0.916263112180336
73,0.8657059739530922,0.825247299380852
74,0.7419604810332548,0.6480269660878143
75,0.9334226886211596,0.8922630189032867
76,0.6763264125613534,0.5165066071060762
77,0.005713912572995367,0.00406552862555413
78,0.8409727994175388,0.764123742925926
79,0.8244661756273934,0.7418756052644963
80,0.7499540005176346,0.6517902018541523
81,0.9227053833742914,0.9026126945641325
82,0.9279609284904535,0.8693826593938924
83,0.8485281374238571,0.7478786017823759
84,0.7617025894181404,0.6506611788881385
85,0.9930317775253296,0.9891589943448454
86,0.9620736927999172,0.938520117613679
87,0.41478067789217016,0.31077653333922794
88,0.7311587354999313,0.6181338057867134
89,0.41045953246761585,0.2882055468618911