
# REPO_NAME := textfooler
# VENV := ../envs/$(REPO_NAME)
# PYENV_PYTHON := $(shell pyenv which python3)
# PYTHON := $(VENV)/bin/python
# PIP := $(VENV)/bin/pip

# MAX_SEQ_LEN = 256

# # Set pyenv local version (writes .python-version file)
# set_pyenv:
# 	pyenv local 3.11.0


# # Create virtualenv using pyenv's python3
# setup: set_pyenv
# 	if [ ! -d "$(VENV)" ]; then \
# 		$(PYENV_PYTHON) -m venv $(VENV); \
# 	fi
# 	$(PIP) install --upgrade -r requirements.txt

# one: ft lora
# attack: attack_with_budget_ft attack_with_budget_ft_head_only attack_with_budget_lora


# # model tested: bert-base-uncased, roberta-base, gpt2, mistralai/Mistral-7B-v0.1
# # dataset tested: ag_news, yelp_polarity


# ft:
# 	$(PYTHON) ./nlp_training/finetune.py \
# 	--dataset_name ag_news \
# 	--model_name roberta-base \
# 	--output_dir ../data_files/TextFooler/saved_model/ft \
# 	--num_epochs 1 \
# 	--batch_size 16 \
# 	--learning_rate 2e-5 \
# 	--seed 42 \
# 	--max_seq_length $(MAX_SEQ_LEN) \

# ft_head_only:
# 	$(PYTHON) ./nlp_training/finetune.py \
# 	--dataset_name ag_news \
# 	--model_name bert-base-uncased \
# 	--output_dir ../data_files/TextFooler/saved_model/ft_head_only \
# 	--num_epochs 5 \
# 	--batch_size 16 \
# 	--learning_rate 2e-5 \
# 	--seed 42 \
# 	--freeze_base \
# 	--max_seq_length $(MAX_SEQ_LEN) \

# lora:
# 	$(PYTHON) ./nlp_training/lora.py \
# 	--dataset_name ag_news \
# 	--model_name bert-base-uncased \
# 	--output_dir ../data_files/TextFooler/saved_model/lora \
# 	--num_epochs 1 \
# 	--batch_size 16 \
# 	--learning_rate 2e-5 \
# 	--lora_r 8 \
# 	--lora_alpha 16 \
# 	--seed 42 \
# 	--max_seq_length $(MAX_SEQ_LEN) \

# eval_model:
# 	$(PYTHON) ./eval_helper.py

# attack_with_budget_ft:
# 	$(PYTHON) attack_classification_with_budget.py \
# 	--dataset_name ag_news \
# 	--max_attack_changes 10 \
# 	--target_model seq_classifier \
# 	--attack_sample_size 200 \
# 	--target_model_path ../data_files/TextFooler/saved_model/ft/model_gpt2_ds_ag_news_train_epoch_1_run_20250717_224152_seed_42/final_model \
# 	--counter_fitting_embeddings_path ../data_files/TextFooler/embeddings/counter-fitted-vectors.txt \
# 	--counter_fitting_cos_sim_path ../data_files/TextFooler/vocab_cosine_sim/ag_cosine_sim_ag.npy \
# 	--USE_cache_path ../data_files/TextFooler/USE \
# 	--device mps \
# 	--seed 42 \
# 	--use_amp \
# 	--batch_size 16 \
# 	--max_seq_length $(MAX_SEQ_LEN) \

# attack_with_budget_ft_head_only:
# 	$(PYTHON) attack_classification_with_budget.py \
# 	--dataset_name ag_news \
# 	--max_attack_changes 5 \
# 	--target_model seq_classifier \
# 	--target_model_path ../data_files/from_dataland/saved_model/ft_head_only/model_gpt2_ds_ag_news_train_epoch_5run_20250717_085117_seed_42/final_model \
# 	--counter_fitting_embeddings_path ../data_files/TextFooler/embeddings/counter-fitted-vectors.txt \
# 	--counter_fitting_cos_sim_path ../data_files/TextFooler/vocab_cosine_sim/ag_cosine_sim_ag.npy \
# 	--USE_cache_path ../data_files/TextFooler/USE \
# 	--device mps \
# 	--seed 42 \
# 	--use_amp \
# 	--batch_size 16 \
# 	--max_seq_length $(MAX_SEQ_LEN) \

# attack_with_budget_lora:
# 	$(PYTHON) attack_classification_with_budget.py \
# 	--dataset_name ag_news \
# 	--max_attack_changes 5 \
# 	--target_model seq_classifier \
# 	--target_model_path ../data_files/from_dataland/saved_model/lora/model_gpt2_ds_ag_news_train_epoch_5run_20250716_235514_seed_42/final_model \
# 	--output_dir ../data_files/TextFooler/results/attack_bert_lora \
# 	--counter_fitting_embeddings_path ../data_files/TextFooler/embeddings/counter-fitted-vectors.txt \
# 	--counter_fitting_cos_sim_path ../data_files/TextFooler/vocab_cosine_sim/ag_cosine_sim_ag.npy \
# 	--USE_cache_path ../data_files/TextFooler/USE \
# 	--device mps \
# 	--seed 42 \
# 	--use_amp \
# 	--batch_size 16 \
# 	--max_seq_length $(MAX_SEQ_LEN) \


# check:
# 	@echo "Using Python from: $(PYTHON)"
# 	@$(PYTHON) --version