Updates to spatial throughput test

parent 81bf349d
......@@ -13,7 +13,7 @@ parser.add_argument('--batch_size', type=int, help='Batch size')
parser.add_argument('--data', help='Root folder for the dataset')
args = parser.parse_args()
spatial_dataset = data.jpeg_dataset_map[args.dataset](args.batch_size, args.data)
dataset = data.jpeg_dataset_map[args.dataset](args.batch_size, args.data)
dataset_info = data.dataset_info[args.dataset]
spatial_model = models.SpatialResNet(dataset_info['channels'], dataset_info['classes'])
jpeg_model = models.JpegResNet(spatial_model, n_freqs=6).to(device)
......@@ -21,18 +21,18 @@ optimizer = optim.Adam(jpeg_model.parameters())
t0 = time.perf_counter()
models.train(jpeg_model, device, spatial_dataset[0], optimizer, 0)
models.train(jpeg_model, device, dataset[0], optimizer, 0)
torch.cuda.synchronize()
t1 = time.perf_counter()
training_time = t1 - t0
t0 = time.perf_counter()
models.test(jpeg_model, device, spatial_dataset[1])
models.test(jpeg_model, device, dataset[1])
torch.cuda.synchronize()
t1 = time.perf_counter()
testing_time = t1 - t0
with open('{}_jpeg_throughput.csv'.format(args.dataset), 'w') as f:
f.write('Training, Testing\n')
f.write('{}, {}\n'.format(training_time / len(spatial_dataset[0]), testing_time / len(spatial_dataset[1])))
f.write('{}, {}\n'.format(training_time / len(dataset[0]), testing_time / len(dataset[1])))
......@@ -14,25 +14,25 @@ parser.add_argument('--batch_size', type=int, help='Batch size')
parser.add_argument('--data', help='Root folder for the dataset')
args = parser.parse_args()
spatial_dataset = data.spatial_dataset_map[args.dataset](args.batch_size, args.data)
dataset = data.jpeg_dataset_map[args.dataset](args.batch_size, args.data)
dataset_info = data.dataset_info[args.dataset]
spatial_model = models.SpatialResNet(dataset_info['channels'], dataset_info['classes']).to(device)
optimizer = optim.Adam(spatial_model.parameters())
t0 = time.perf_counter()
models.train(spatial_model, device, spatial_dataset[0], optimizer, 0)
models.train(spatial_model, device, dataset[0], optimizer, 0, do_decode=True)
torch.cuda.synchronize()
t1 = time.perf_counter()
training_time = t1 - t0
t0 = time.perf_counter()
models.test(spatial_model, device, spatial_dataset[1])
models.test(spatial_model, device, dataset[1], do_decode=True)
torch.cuda.synchronize()
t1 = time.perf_counter()
testing_time = t1 - t0
with open('{}_spatial_throughput.csv'.format(args.dataset), 'w') as f:
f.write('Training, Testing\n')
f.write('{}, {}\n'.format(training_time / len(spatial_dataset[0]), testing_time / len(spatial_dataset[1])))
f.write('{}, {}\n'.format(training_time / len(dataset[0]), testing_time / len(dataset[1])))
import torch
import torch.nn.functional as F
from jpeg_codec import decode
def train(model, device, train_loader, optimizer, epoch):
def train(model, device, train_loader, optimizer, epoch, do_decode=False):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
if do_decode:
data, target = decode(data).to(device), target.to(device)
else:
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
......@@ -17,13 +21,16 @@ def train(model, device, train_loader, optimizer, epoch):
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, test_loader):
def test(model, device, test_loader, do_decode=False):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
if do_decode:
data, target = decode(data).to(device), target.to(device)
else:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment