feat: Choose dataset with options
This commit is contained in:
parent
20bdd4f566
commit
81c767371e
5 changed files with 67 additions and 60 deletions
|
|
@ -4,61 +4,61 @@ from math import ceil
|
|||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from dataset_loaders import EnWik9DataSet, LoremIpsumDataset, Dataset
|
||||
from dataset_loaders import dataset_called
|
||||
from trainers import OptunaTrainer, Trainer, FullTrainer
|
||||
|
||||
BATCH_SIZE = 64
|
||||
|
||||
if torch.cuda.is_available():
|
||||
DEVICE = "cuda"
|
||||
elif torch.backends.mps.is_available():
|
||||
DEVICE = "mps"
|
||||
if torch.accelerator.is_available():
|
||||
DEVICE = torch.accelerator.current_accelerator().type
|
||||
else:
|
||||
DEVICE = "cpu"
|
||||
|
||||
# hyper parameters
|
||||
context_length = 128
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f"Running on device: {DEVICE}...")
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--method", choices=["optuna", "train"], required=True)
|
||||
parser.add_argument("--model-path", type=str, required=False)
|
||||
args = parser.parse_args()
|
||||
print(f"Running on device: {DEVICE}...")
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--method", choices=["optuna", "train"], required=True)
|
||||
parser.add_argument("--model-path", type=str, required=False)
|
||||
|
||||
print("Loading in the dataset...")
|
||||
if args.method == "train":
|
||||
dataset: Dataset = EnWik9DataSet(transform=lambda x: x.to(DEVICE))
|
||||
elif args.method == "optuna":
|
||||
dataset: Dataset = LoremIpsumDataset(transform=lambda x: x.to(DEVICE))
|
||||
else:
|
||||
raise ValueError(f"Unknown method: {args.method}")
|
||||
parser.add_argument_group("Data", "Data files or dataset to use")
|
||||
parser.add_argument("--data-root", type=str, required=False)
|
||||
parser.add_argument("dataset")
|
||||
args = parser.parse_args()
|
||||
|
||||
dataset_length = len(dataset)
|
||||
print(f"Dataset size = {dataset_length}")
|
||||
print("Loading in the dataset...")
|
||||
if args.dataset in dataset_called:
|
||||
dataset = dataset_called[args.dataset](root=args.data_root, transform=lambda x: x.to(DEVICE))
|
||||
else:
|
||||
# TODO Allow to import arbitrary files
|
||||
raise NotImplementedError(f"Importing external datasets is not implemented yet")
|
||||
|
||||
training_size = ceil(0.8 * dataset_length)
|
||||
dataset_length = len(dataset)
|
||||
print(f"Dataset size = {dataset_length}")
|
||||
|
||||
print(f"Training set size = {training_size}, Validation set size {dataset_length - training_size}")
|
||||
training_size = ceil(0.8 * dataset_length)
|
||||
|
||||
train_set, validate_set = torch.utils.data.random_split(dataset,
|
||||
[training_size, dataset_length - training_size])
|
||||
training_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
|
||||
validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False)
|
||||
loss_fn = torch.nn.CrossEntropyLoss()
|
||||
print(f"Training set size = {training_size}, Validation set size {dataset_length - training_size}")
|
||||
|
||||
model = None
|
||||
if args.model_path is not None:
|
||||
print("Loading the model...")
|
||||
model = torch.load(args.model_path)
|
||||
train_set, validate_set = torch.utils.data.random_split(dataset,
|
||||
[training_size, dataset_length - training_size])
|
||||
training_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
|
||||
validation_loader = DataLoader(validate_set, batch_size=BATCH_SIZE, shuffle=False)
|
||||
loss_fn = torch.nn.CrossEntropyLoss()
|
||||
|
||||
trainer: Trainer = OptunaTrainer() if args.method == "optuna" else FullTrainer()
|
||||
model = None
|
||||
if args.model_path is not None:
|
||||
print("Loading the model...")
|
||||
model = torch.load(args.model_path)
|
||||
|
||||
trainer.execute(
|
||||
model=model,
|
||||
train_loader=training_loader,
|
||||
validation_loader=validation_loader,
|
||||
loss_fn=loss_fn,
|
||||
n_epochs=200,
|
||||
device=DEVICE
|
||||
)
|
||||
trainer: Trainer = OptunaTrainer() if args.method == "optuna" else FullTrainer()
|
||||
|
||||
trainer.execute(
|
||||
model=model,
|
||||
train_loader=training_loader,
|
||||
validation_loader=validation_loader,
|
||||
loss_fn=loss_fn,
|
||||
n_epochs=200,
|
||||
device=DEVICE
|
||||
)
|
||||
|
|
|
|||
Reference in a new issue