101 lines
3.4 KiB
Python
101 lines
3.4 KiB
Python
from argparse import ArgumentParser
|
|
from math import ceil
|
|
|
|
import torch
|
|
from torch.utils.data import DataLoader
|
|
|
|
from dataset_loaders import dataset_called
|
|
from trainers import OptunaTrainer, Trainer, FullTrainer
|
|
|
|
|
|
def parse_arguments():
|
|
parser = ArgumentParser(prog="NeuralCompression")
|
|
parser.add_argument("--verbose", "-v", action="store_true", required=False,
|
|
help="Enable verbose mode")
|
|
|
|
dataparser = ArgumentParser(add_help=False)
|
|
dataparser.add_argument("--data-root", type=str, required=False)
|
|
dataparser.add_argument("--dataset", choices=dataset_called.keys(), required=True)
|
|
|
|
modelparser = ArgumentParser(add_help=False)
|
|
modelparser.add_argument("--model-path", type=str, required=True,
|
|
help="Path to the model to load/save")
|
|
|
|
fileparser = ArgumentParser(add_help=False)
|
|
fileparser.add_argument("--input-file", "-i", required=False, type=str)
|
|
fileparser.add_argument("--output-file", "-o", required=False, type=str)
|
|
|
|
subparsers = parser.add_subparsers(dest="mode", required=True,
|
|
help="Mode to run in")
|
|
|
|
# TODO
|
|
fetch_parser = subparsers.add_parser("fetch", parents=[dataparser],
|
|
help="Only fetch the dataset, then exit")
|
|
|
|
train_parser = subparsers.add_parser("train", parents=[dataparser, modelparser])
|
|
|
|
# TODO
|
|
compress_parser = subparsers.add_parser("compress", parents=[modelparser, fileparser])
|
|
|
|
# TODO
|
|
decompress_parser = subparsers.add_parser("decompress", parents=[modelparser, fileparser])
|
|
|
|
return parser.parse_args()
|
|
|
|
|
|
def main():
|
|
BATCH_SIZE = 64
|
|
|
|
# hyper parameters
|
|
context_length = 128
|
|
|
|
args = parse_arguments()
|
|
|
|
if torch.accelerator.is_available():
|
|
DEVICE = torch.accelerator.current_accelerator().type
|
|
else:
|
|
DEVICE = "cpu"
|
|
print(f"Running on device: {DEVICE}...")
|
|
|
|
print("Loading in the dataset...")
|
|
if args.dataset in dataset_called:
|
|
dataset = dataset_called[args.dataset](root=args.data_root, transform=lambda x: x.to(DEVICE))
|
|
else:
|
|
# TODO Allow to import arbitrary files
|
|
raise NotImplementedError(f"Importing external datasets is not implemented yet")
|
|
|
|
if args.mode == 'fetch':
|
|
# TODO More to earlier in chain, because now everything is converted into tensors as well?
|
|
exit(0)
|
|
|
|
dataset_length = len(dataset)
|
|
print(f"Dataset size = {dataset_length}")
|
|
|
|
training_size = ceil(0.8 * dataset_length)
|
|
|
|
print(f"Training set size = {training_size}, Validation set size {dataset_length - training_size}")
|
|
|
|
train_set, validate_set = torch.utils.data.random_split(dataset, [training_size, dataset_length - training_size])
|
|
training_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
|
|
validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False)
|
|
loss_fn = torch.nn.CrossEntropyLoss()
|
|
|
|
model = None
|
|
if args.model_path is not None:
|
|
print("Loading the model...")
|
|
model = torch.load(args.model_path)
|
|
|
|
trainer: Trainer = OptunaTrainer() if args.method == "optuna" else FullTrainer()
|
|
|
|
trainer.execute(
|
|
model=model,
|
|
train_loader=training_loader,
|
|
validation_loader=validation_loader,
|
|
loss_fn=loss_fn,
|
|
n_epochs=200,
|
|
device=DEVICE
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|