chore: Start compression path

This commit is contained in:
Tibo De Peuter 2025-12-05 12:38:10 +01:00
parent f32f4678e1
commit 9dff723bba
Signed by: tdpeuter
GPG key ID: 38297DE43F75FFE2
4 changed files with 147 additions and 94 deletions

61
src/train.py Normal file
View file

@ -0,0 +1,61 @@
import torch
from torch.utils.data import DataLoader
from src.dataset_loaders import dataset_called
from src.trainers import OptunaTrainer, Trainer, FullTrainer
def train(
device,
dataset: str,
data_root: str,
n_trials: int | None = None,
size: int | None = None,
mode: str = "train",
method: str = 'optuna',
model_path: str | None = None,
):
batch_size = 2
dataset_common_args = {
'root': data_root,
'transform': lambda x: x.to(device),
}
if size:
dataset_common_args['size'] = size
print("Loading in the dataset...")
if dataset in dataset_called:
training_set = dataset_called[dataset](split='train', **dataset_common_args)
validate_set = dataset_called[dataset](split='validation', **dataset_common_args)
else:
# TODO Allow to import arbitrary files
raise NotImplementedError(f"Importing external datasets is not implemented yet")
if mode == 'fetch':
# TODO More to earlier in chain, because now everything is converted into tensors as well?
exit(0)
print(f"Training set size = {len(training_set)}, Validation set size {len(validate_set)}")
training_loader = DataLoader(training_set, batch_size=batch_size, shuffle=True)
validation_loader = DataLoader(validate_set, batch_size=batch_size, shuffle=False)
loss_fn = torch.nn.CrossEntropyLoss()
model = None
if model_path is not None:
print("Loading the models...")
model = torch.load(model_path)
trainer: Trainer = OptunaTrainer(n_trials=n_trials) if method == "optuna" else FullTrainer()
print("Training")
trainer.execute(
model=model,
train_loader=training_loader,
validation_loader=validation_loader,
loss_fn=loss_fn,
n_epochs=200,
device=device
)