Mxnet.gluon Load Pre Training

import mxnet as mx
from mxnet.gluon import nn
from mxnet import gluon,nd,autograd,init
from mxnet.gluon.data.vision import datasets,transforms
from IPython import display
import matplotlib.pyplot as plt
import time
import numpy as np

# download fashionMNIST data
fashion_train_data = datasets.FashionMNIST(train=True)
#get the infos of the img and the corrrsponding lable
images,labels = fashion_train_data[:]

#transforms swithc datas
transformer = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(0.13,0.31)])
#switch datas
fashion_data = fashion_train_data.transform_first(transformer)

#Set the size of the batch
batch_size = 256
# On windows system, please set num_workers to 0, otherwise it will cause thread error
train_data = gluon.data.DataLoader(fashion_data,batch_size=batch_size,shuffle=True,num_workers=0)

#Loading validation data
fashion_val_data = gluon.data.vision.FashionMNIST(train=False)
val_data = gluon.data.DataLoader(fashion_val_data.transform_first(transformer),
                                   batch_size=batch_size,num_workers=0)
#Define the GPU to be used, use GPU to accelerate training, if there are multiple GPUs, you can define more than one
gpu_devices = [mx.gpu(0)]
#define network structure
LeNet = nn.HybridSequential()
#Build a LeNet network structure
LeNet.add(
    nn.Conv2D(channels=6,kernel_size=5,activation="relu"),
    nn.MaxPool2D(pool_size=2,strides=2),
    nn.Conv2D(channels=16,kernel_size=3,activation="relu"),
    nn.MaxPool2D(pool_size=2,strides=2),
    nn.Flatten(),
    nn.Dense(120,activation="relu"),
    nn.Dense(84,activation="relu"),
    nn.Dense(10)
)
LeNet.hybridize()
#Initialize the weight parameters of the neural network, use GPU to accelerate the training
LeNet.collect_params().initialize(force_reinit=True,ctx=gpu_devices)
# Define softmax loss function
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
# set optimization algorithm, use stochastic gradient descent sgd algorithm, learning rate set to 0.1
trainer = gluon.trainer(LeNet.collect_params(), "sgd",{"learning_rate":0.1})
# Calculate the accuracy rate
def acc(output,label):
    return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()

#Set the number of iterations
epochs = 10
#training model
for epoch in range(epochs):
    train_loss,train_acc,val_acc = 0,0,0
    epoch_start_time = time.time()
    for data,label in train_data:
        #Use GPU to load data to accelerate training
        data_list = gluon.utils.split_and_load(data,gpu_devices)
        label_list = gluon.utils.split_and_load(label,gpu_devices)
        # forward propagation
        with autograd.record():
            #Get prediction results on multiple GPUs
            pred_Y = [LeNet(x) for x in data_list]
            #Calculate the loss of predicted values on multiple GPUs
            losses = [softmax_cross_entropy(pred_y,Y) for pred_y,Y in zip(pred_Y,label_list)]
        # backpropagation update parameters
        for l in losses:
            l.backward()
        trainer.step(batch_size)
        #Calculate the total loss on the training set
        train_loss += sum([l.sum().asscalar() for l in losses])
        # Calculate the accuracy on the training set
        train_acc += sum([acc(output_y,y) for output_y,y in zip(pred_Y,label_list)])

    for data,label in val_data:
        data_list = gluon.utils.split_and_load(data,ctx_list=gpu_devices)
        label_list = gluon.utils.split_and_load(label,ctx_list=gpu_devices)
        #Calculate the accuracy on the validation set
        val_acc += sum(acc(LeNet(val_X),val_Y) for val_X,val_Y in zip(data_list,label_list))

    print("epoch %d,loss:%.3f,train acc:%.3f,test acc:%.3f,in %.1f sec"%
          (epoch+1,train_loss/len(labels),train_acc/len(train_data),val_acc/len(val_data),time.time()-epoch_start_time))
#save
LeNet.export("lenet",epoch=1)

#loading the moudle files
LeNet = gluon.nn.SymbolBlock.imports("lenet-symbol.json",["data"],"lenet-0001.params")

Read More: