from argparse import ArgumentParser from math import ceil import torch from torch.utils.data import DataLoader from dataset_loaders import dataset_called from trainers import OptunaTrainer, Trainer, FullTrainer def parse_arguments(): parser = ArgumentParser(prog="NeuralCompression") parser.add_argument("--debug", "-d", action="store_true", required=False, help="Enable debug mode: smaller datasets, more information") parser.add_argument("--verbose", "-v", action="store_true", required=False, help="Enable verbose mode") dataparser = ArgumentParser(add_help=False) dataparser.add_argument("--data-root", type=str, required=False) dataparser.add_argument("--dataset", choices=dataset_called.keys(), required=True) modelparser = ArgumentParser(add_help=False) modelparser.add_argument("--model-path", type=str, required=False, help="Path to the model to load/save") fileparser = ArgumentParser(add_help=False) fileparser.add_argument("--input-file", "-i", required=False, type=str) fileparser.add_argument("--output-file", "-o", required=False, type=str) subparsers = parser.add_subparsers(dest="mode", required=True, help="Mode to run in") # TODO fetch_parser = subparsers.add_parser("fetch", parents=[dataparser], help="Only fetch the dataset, then exit") train_parser = subparsers.add_parser("train", parents=[dataparser, modelparser]) train_parser.add_argument("--method", choices=["optuna", "full"], required=True, help="Method to use for training") # TODO compress_parser = subparsers.add_parser("compress", parents=[modelparser, fileparser]) # TODO decompress_parser = subparsers.add_parser("decompress", parents=[modelparser, fileparser]) return parser.parse_args() def main(): BATCH_SIZE = 2 # hyper parameters context_length = 128 args = parse_arguments() if torch.accelerator.is_available(): DEVICE = torch.accelerator.current_accelerator().type else: DEVICE = "cpu" print(f"Running on device: {DEVICE}...") dataset_common_args = { 'root': args.data_root, 'transform': lambda x: x.to(DEVICE) } if args.debug: dataset_common_args['size'] = 2**10 print("Loading in the dataset...") if args.dataset in dataset_called: training_set = dataset_called[args.dataset](split='train', **dataset_common_args) validate_set = dataset_called[args.dataset](split='validation', **dataset_common_args) else: # TODO Allow to import arbitrary files raise NotImplementedError(f"Importing external datasets is not implemented yet") if args.mode == 'fetch': # TODO More to earlier in chain, because now everything is converted into tensors as well? exit(0) print(f"Training set size = {len(training_set)}, Validation set size {len(validate_set)}") training_loader = DataLoader(training_set, batch_size=BATCH_SIZE, shuffle=True) validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False) loss_fn = torch.nn.CrossEntropyLoss() model = None if args.model_path is not None: print("Loading the models...") model = torch.load(args.model_path) trainer: Trainer = OptunaTrainer(n_trials=3 if args.debug else None) if args.method == "optuna" else FullTrainer() print("Training") trainer.execute( model=model, train_loader=training_loader, validation_loader=validation_loader, loss_fn=loss_fn, n_epochs=200, device=DEVICE ) if __name__ == "__main__": main()