# prject name
project_name = 'course-project-humanface-dcgan'
# install opendatasets module
!pip install opendatasets --upgrade --quiet
# downloading dataset
import opendatasets as od
dataset_url = 'https://www.kaggle.com/jessicali9530/celeba-dataset'
od.download(dataset_url)
0%| | 5.00M/1.33G [00:00<00:39, 35.8MB/s]
Downloading celeba-dataset.zip to ./celeba-dataset
100%|██████████| 1.33G/1.33G [00:17<00:00, 80.8MB/s]
# checking up directory
import os
DATA_DIR = '/content/celeba-dataset/img_align_celeba'
print(os.listdir(DATA_DIR))
['images', '.ipynb_checkpoints']
# listing few images from dataset
print(os.listdir(DATA_DIR+'/images')[:10])
['169176.jpg', '107803.jpg', '001507.jpg', '199394.jpg', '080649.jpg', '163543.jpg', '119611.jpg', '070090.jpg', '178744.jpg', '058965.jpg']
** What type of data is it? **
** What type of problem is it? **
# import essential modules
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torchvision.transforms as T
# define image size , batch size normaliozation parameters
image_size = 64
batch_size = 128
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
# performing image transformations
train_ds = ImageFolder(DATA_DIR, transform=T.Compose([
T.Resize(image_size),
T.CenterCrop(image_size),
T.ToTensor(),
T.Normalize(*stats)]))
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=3, pin_memory=True)
Let's create helper functions to denormalize the image tensors and display some sample images from a training batch.
import torch
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
%matplotlib inline
# function to denormalize images
def denorm(img_tensors):
return img_tensors * stats[1][0] + stats[0][0]
# function to show images
def show_images(images, nmax=64):
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(denorm(images.detach()[:nmax]), nrow=8).permute(1, 2, 0))
def show_batch(dl, nmax=64):
for images, _ in dl:
show_images(images, nmax)
break
# let's see few images
show_batch(train_dl)
!pip install jovian --upgrade --quiet
import jovian
jovian.commit(project=project_name, environment=None)
[jovian] Detected Colab notebook...
[jovian] Uploading colab notebook to Jovian...
[jovian] Attaching records (metrics, hyperparameters, dataset etc.)
[jovian] Committed successfully! https://jovian.ai/junaidkhangec/course-project-humanface-dcgan
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
# get default device ('cuda' if GPU available)
device = get_default_device()
device
device(type='cuda')
We can now move our training data loader using DeviceDataLoader
for automatically transferring batches of data to the GPU (if available).
# moving data on default device (on GPU if available)
train_dl = DeviceDataLoader(train_dl, device)
import torch.nn as nn
discriminator = nn.Sequential(
# in: 3 x 64 x 64
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# out: 64 x 32 x 32
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# out: 128 x 16 x 16
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# out: 256 x 8 x 8
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# out: 512 x 4 x 4
nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=False),
# out: 1 x 1 x 1
nn.Flatten(),
nn.Sigmoid())
# moving discriminator model to GPU (if available)
discriminator = to_device(discriminator, device)
# defininf size of the input noice to generator model
latent_size = 128
generator = nn.Sequential(
# in: latent_size x 1 x 1
nn.ConvTranspose2d(latent_size, 512, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# out: 512 x 4 x 4
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# out: 256 x 8 x 8
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# out: 128 x 16 x 16
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# out: 64 x 32 x 32
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
# out: 3 x 64 x 64
)
# testing generator model before training
xb = torch.randn(batch_size, latent_size, 1, 1) # random latent tensors
fake_images = generator(xb)
print(fake_images.shape)
show_images(fake_images)
torch.Size([128, 3, 64, 64])