chore: Start compression path

This commit is contained in:
Tibo De Peuter 2025-12-05 12:38:10 +01:00
parent f32f4678e1
commit 9dff723bba
Signed by: tdpeuter
GPG key ID: 38297DE43F75FFE2
4 changed files with 147 additions and 94 deletions

115
main.py
View file

@ -1,108 +1,35 @@
from argparse import ArgumentParser
from math import ceil
import torch import torch
from torch.utils.data import DataLoader
from dataset_loaders import dataset_called from src.args import parse_arguments
from trainers import OptunaTrainer, Trainer, FullTrainer from src.process import compress
from src.train import train
def parse_arguments():
parser = ArgumentParser(prog="NeuralCompression")
parser.add_argument("--debug", "-d", action="store_true", required=False,
help="Enable debug mode: smaller datasets, more information")
parser.add_argument("--verbose", "-v", action="store_true", required=False,
help="Enable verbose mode")
dataparser = ArgumentParser(add_help=False)
dataparser.add_argument("--data-root", type=str, required=False)
dataparser.add_argument("--dataset", choices=dataset_called.keys(), required=True)
modelparser = ArgumentParser(add_help=False)
modelparser.add_argument("--model-path", type=str, required=False,
help="Path to the model to load/save")
fileparser = ArgumentParser(add_help=False)
fileparser.add_argument("--input-file", "-i", required=False, type=str)
fileparser.add_argument("--output-file", "-o", required=False, type=str)
subparsers = parser.add_subparsers(dest="mode", required=True,
help="Mode to run in")
# TODO
fetch_parser = subparsers.add_parser("fetch", parents=[dataparser],
help="Only fetch the dataset, then exit")
train_parser = subparsers.add_parser("train", parents=[dataparser, modelparser])
train_parser.add_argument("--method", choices=["optuna", "full"], required=True,
help="Method to use for training")
# TODO
compress_parser = subparsers.add_parser("compress", parents=[modelparser, fileparser])
# TODO
decompress_parser = subparsers.add_parser("decompress", parents=[modelparser, fileparser])
return parser.parse_args()
def main(): def main():
BATCH_SIZE = 2 args, print_help = parse_arguments()
# hyper parameters
context_length = 128
args = parse_arguments()
if torch.accelerator.is_available(): if torch.accelerator.is_available():
DEVICE = torch.accelerator.current_accelerator().type device = torch.accelerator.current_accelerator().type
else: else:
DEVICE = "cpu" device = "cpu"
print(f"Running on device: {DEVICE}...") print(f"Running on device: {device}...")
dataset_common_args = { match args.mode:
'root': args.data_root, case 'train':
'transform': lambda x: x.to(DEVICE) train(
} device = device,
dataset = args.dataset,
data_root = args.data_root,
n_trials = 3 if args.debug else None,
size = 2**10 if args.debug else None,
model_path = args.model_path
)
if args.debug: case 'compress':
dataset_common_args['size'] = 2**10 compress(args.input_file)
print("Loading in the dataset...") case _:
if args.dataset in dataset_called: raise NotImplementedError(f"Mode {args.mode} is not implemented yet")
training_set = dataset_called[args.dataset](split='train', **dataset_common_args)
validate_set = dataset_called[args.dataset](split='validation', **dataset_common_args)
else:
# TODO Allow to import arbitrary files
raise NotImplementedError(f"Importing external datasets is not implemented yet")
if args.mode == 'fetch':
# TODO More to earlier in chain, because now everything is converted into tensors as well?
exit(0)
print(f"Training set size = {len(training_set)}, Validation set size {len(validate_set)}")
training_loader = DataLoader(training_set, batch_size=BATCH_SIZE, shuffle=True)
validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False)
loss_fn = torch.nn.CrossEntropyLoss()
model = None
if args.model_path is not None:
print("Loading the models...")
model = torch.load(args.model_path)
trainer: Trainer = OptunaTrainer(n_trials=3 if args.debug else None) if args.method == "optuna" else FullTrainer()
print("Training")
trainer.execute(
model=model,
train_loader=training_loader,
validation_loader=validation_loader,
loss_fn=loss_fn,
n_epochs=200,
device=DEVICE
)
if __name__ == "__main__": if __name__ == "__main__":

42
src/args.py Normal file
View file

@ -0,0 +1,42 @@
from argparse import ArgumentParser
from src.dataset_loaders import dataset_called
def parse_arguments():
parser = ArgumentParser(prog="NeuralCompression")
parser.add_argument("--debug", "-d", action="store_true", required=False,
help="Enable debug mode: smaller datasets, more information")
parser.add_argument("--verbose", "-v", action="store_true", required=False,
help="Enable verbose mode")
dataparser = ArgumentParser(add_help=False)
dataparser.add_argument("--data-root", type=str, required=False)
dataparser.add_argument("--dataset", choices=dataset_called.keys(), required=True)
modelparser = ArgumentParser(add_help=False)
modelparser.add_argument("--model-path", type=str, required=False,
help="Path to the model to load/save")
fileparser = ArgumentParser(add_help=False)
fileparser.add_argument("--input-file", "-i", required=False, type=str)
fileparser.add_argument("--output-file", "-o", required=False, type=str)
subparsers = parser.add_subparsers(dest="mode", required=True,
help="Mode to run in")
# TODO
fetch_parser = subparsers.add_parser("fetch", parents=[dataparser],
help="Only fetch the dataset, then exit")
train_parser = subparsers.add_parser("train", parents=[dataparser, modelparser])
train_parser.add_argument("--method", choices=["optuna", "full"], required=True,
help="Method to use for training")
# TODO
compress_parser = subparsers.add_parser("compress", parents=[modelparser, fileparser])
# TODO
decompress_parser = subparsers.add_parser("decompress", parents=[modelparser, fileparser])
return parser.parse_args(), parser.print_help

23
src/process.py Normal file
View file

@ -0,0 +1,23 @@
import torch
def compress(
input_file: str | None = None
):
if input_file:
with open(input_file, "rb") as file:
byte_data = file.read()
else:
# Read from stdin
text = input()
byte_data = text.encode('utf-8', errors='replace')
tensor = torch.tensor(list(byte_data), dtype=torch.long)
print(tensor)
# TODO Feed to model for compression, store result
return
def decompress():
return NotImplementedError("Decompression is not implemented yet")

61
src/train.py Normal file
View file

@ -0,0 +1,61 @@
import torch
from torch.utils.data import DataLoader
from src.dataset_loaders import dataset_called
from src.trainers import OptunaTrainer, Trainer, FullTrainer
def train(
device,
dataset: str,
data_root: str,
n_trials: int | None = None,
size: int | None = None,
mode: str = "train",
method: str = 'optuna',
model_path: str | None = None,
):
batch_size = 2
dataset_common_args = {
'root': data_root,
'transform': lambda x: x.to(device),
}
if size:
dataset_common_args['size'] = size
print("Loading in the dataset...")
if dataset in dataset_called:
training_set = dataset_called[dataset](split='train', **dataset_common_args)
validate_set = dataset_called[dataset](split='validation', **dataset_common_args)
else:
# TODO Allow to import arbitrary files
raise NotImplementedError(f"Importing external datasets is not implemented yet")
if mode == 'fetch':
# TODO More to earlier in chain, because now everything is converted into tensors as well?
exit(0)
print(f"Training set size = {len(training_set)}, Validation set size {len(validate_set)}")
training_loader = DataLoader(training_set, batch_size=batch_size, shuffle=True)
validation_loader = DataLoader(validate_set, batch_size=batch_size, shuffle=False)
loss_fn = torch.nn.CrossEntropyLoss()
model = None
if model_path is not None:
print("Loading the models...")
model = torch.load(model_path)
trainer: Trainer = OptunaTrainer(n_trials=n_trials) if method == "optuna" else FullTrainer()
print("Training")
trainer.execute(
model=model,
train_loader=training_loader,
validation_loader=validation_loader,
loss_fn=loss_fn,
n_epochs=200,
device=device
)