chore: Also add datapaths to job

This commit is contained in:
Tibo De Peuter 2025-11-30 21:58:57 +01:00
parent 81c767371e
commit b74ae7083a
Signed by: tdpeuter
GPG key ID: 38297DE43F75FFE2
2 changed files with 86 additions and 45 deletions

View file

@ -7,58 +7,95 @@ from torch.utils.data import DataLoader
from dataset_loaders import dataset_called
from trainers import OptunaTrainer, Trainer, FullTrainer
BATCH_SIZE = 64
if torch.accelerator.is_available():
def parse_arguments():
parser = ArgumentParser(prog="NeuralCompression")
parser.add_argument("--verbose", "-v", action="store_true", required=False,
help="Enable verbose mode")
dataparser = ArgumentParser(add_help=False)
dataparser.add_argument("--data-root", type=str, required=False)
dataparser.add_argument("--dataset", choices=dataset_called.keys(), required=True)
modelparser = ArgumentParser(add_help=False)
modelparser.add_argument("--model-path", type=str, required=True,
help="Path to the model to load/save")
fileparser = ArgumentParser(add_help=False)
fileparser.add_argument("--input-file", "-i", required=False, type=str)
fileparser.add_argument("--output-file", "-o", required=False, type=str)
subparsers = parser.add_subparsers(dest="mode", required=True,
help="Mode to run in")
# TODO
fetch_parser = subparsers.add_parser("fetch", parents=[dataparser],
help="Only fetch the dataset, then exit")
train_parser = subparsers.add_parser("train", parents=[dataparser, modelparser])
# TODO
compress_parser = subparsers.add_parser("compress", parents=[modelparser, fileparser])
# TODO
decompress_parser = subparsers.add_parser("decompress", parents=[modelparser, fileparser])
return parser.parse_args()
def main():
BATCH_SIZE = 64
# hyper parameters
context_length = 128
args = parse_arguments()
if torch.accelerator.is_available():
DEVICE = torch.accelerator.current_accelerator().type
else:
else:
DEVICE = "cpu"
print(f"Running on device: {DEVICE}...")
# hyper parameters
context_length = 128
print(f"Running on device: {DEVICE}...")
parser = ArgumentParser()
parser.add_argument("--method", choices=["optuna", "train"], required=True)
parser.add_argument("--model-path", type=str, required=False)
parser.add_argument_group("Data", "Data files or dataset to use")
parser.add_argument("--data-root", type=str, required=False)
parser.add_argument("dataset")
args = parser.parse_args()
print("Loading in the dataset...")
if args.dataset in dataset_called:
print("Loading in the dataset...")
if args.dataset in dataset_called:
dataset = dataset_called[args.dataset](root=args.data_root, transform=lambda x: x.to(DEVICE))
else:
else:
# TODO Allow to import arbitrary files
raise NotImplementedError(f"Importing external datasets is not implemented yet")
dataset_length = len(dataset)
print(f"Dataset size = {dataset_length}")
if args.mode == 'fetch':
# TODO More to earlier in chain, because now everything is converted into tensors as well?
exit(0)
training_size = ceil(0.8 * dataset_length)
dataset_length = len(dataset)
print(f"Dataset size = {dataset_length}")
print(f"Training set size = {training_size}, Validation set size {dataset_length - training_size}")
training_size = ceil(0.8 * dataset_length)
train_set, validate_set = torch.utils.data.random_split(dataset,
[training_size, dataset_length - training_size])
training_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False)
loss_fn = torch.nn.CrossEntropyLoss()
print(f"Training set size = {training_size}, Validation set size {dataset_length - training_size}")
model = None
if args.model_path is not None:
train_set, validate_set = torch.utils.data.random_split(dataset, [training_size, dataset_length - training_size])
training_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False)
loss_fn = torch.nn.CrossEntropyLoss()
model = None
if args.model_path is not None:
print("Loading the model...")
model = torch.load(args.model_path)
trainer: Trainer = OptunaTrainer() if args.method == "optuna" else FullTrainer()
trainer: Trainer = OptunaTrainer() if args.method == "optuna" else FullTrainer()
trainer.execute(
trainer.execute(
model=model,
train_loader=training_loader,
validation_loader=validation_loader,
loss_fn=loss_fn,
n_epochs=200,
device=DEVICE
)
)
if __name__ == "__main__":
main()

View file

@ -7,7 +7,9 @@
CACHE_DIR="${VSC_SCRATCH}/.cache" # Directory to use as cache
UV_DIR="${VSC_SCRATCH}/uv" # Directory to install packages
#HF_DIR="${CACHE_DIR}/huggingface" # Directory to save models
DATA_DIR="${VSC_DATA}/datasets"
RESULTS_DIR="${VSC_DATA}/neural-compression/$( date +%Y%m%d-%H%M-%S%N)-results"
module purge
module load PyTorch-bundle/2.1.2-foss-2023a-CUDA-12.1.1
@ -28,4 +30,6 @@ UV_PYTHON_INSTALL_DIR="${UV_DIR}/python" UV_PYTHON_INSTALL_DIR="${UV_DIR}/python
cd "${PBS_O_WORKDIR}/CNN-model"
python main_cnn.py --method train
python main_cnn.py train \
--dataset=enwik9 --data-root="${DATA_DIR}" \
--model-path="${RESULTS_DIR}/model.pt"