AE(auto encoder)

自动编码器

可以用于特征的转换(mxnet实现的代码模板)

def get_manifold(X):
    from mxnet import nd
    from mxnet import ndarray as F
    from mxnet.gluon import Block, nn
    from mxnet.initializer import Uniform
    
    class Model(Block):
        def __init__(self, num_dim, **kwargs):
            super(Model, self).__init__(**kwargs)
            #Unifrom产生随机数
            wi1 = Uniform(0.25)
            wi2 = Uniform(0.1)
            with self.name_scope():
                self.encoder1 = nn.Dense(num_dim//2, in_units=num_dim, weight_initializer=wi1)
                self.encoder2 = nn.Dense(num_dim//4, in_units=num_dim//2, weight_initializer=wi1)
                self.encoder3 = nn.Dense(num_dim//8, in_units=num_dim//4, weight_initializer=wi2)
                self.encoder4 = nn.Dense(num_dim//16, in_units=num_dim//8, weight_initializer=wi2)
                self.encoder5 = nn.Dense(num_dim//32, in_units=num_dim//16, weight_initializer=wi2)
                self.decoder5 = nn.Dense(num_dim//16, in_units=num_dim//32, weight_initializer=wi2)
                self.decoder4 = nn.Dense(num_dim//8, in_units=num_dim//16, weight_initializer=wi2)
                self.decoder3 = nn.Dense(num_dim//4, in_units=num_dim//8, weight_initializer=wi2)
                self.decoder2 = nn.Dense(num_dim//2, in_units=num_dim//4, weight_initializer=wi1)
                self.decoder1 = nn.Dense(num_dim, in_units=num_dim//2, weight_initializer=wi1)
            self.layers = [(self.encoder1,self.decoder1),
                        (self.encoder2,self.decoder2),
                        (self.encoder3,self.decoder3),
                        (self.encoder4,self.decoder4),
                        (self.encoder5,self.decoder5)]

        def onelayer(self, x, layer):
            xx = F.tanh(layer[0](x))
            return layer[1](xx)

        def oneforward(self, x, layer):
            return F.tanh(layer[0](x))

        def forward(self, x):
            n_layer = len(self.layers)
            for i in range(n_layer):
                x = F.tanh(self.layers[i][0](x))
            for i in range(n_layer-1):
                x = F.tanh(self.layers[n_layer-i-1][1](x))
            return self.layers[0][1](x)

        def manifold(self, x):
            n_layer = len(self.layers)
            for i in range(n_layer-1):
                x = F.tanh(self.layers[i][0](x))
            return self.layers[n_layer-1][0](x)
    from mxnet import autograd
    from mxnet import cpu
    from mxnet.gluon import Trainer
    from mxnet.gluon.loss import L2Loss

    # Stacked AutoEncoder
    model = Model(X.shape[1])
    model.initialize(ctx=[cpu(0),cpu(1),cpu(2),cpu(3)])

    # Select Trainign Algorism
    trainer = Trainer(model.collect_params(),'adam')
    loss_func = L2Loss()

    # Start Pretraining
    print('start pretraining of StackedAE...')
    loss_n = [] # for log

    buffer = nd.array(X.values)
    for layer_id, layer in enumerate(model.layers):
        print('layer %d of %d...'%(layer_id+1,len(model.layers)))
        trainer.set_learning_rate(0.02)
        for epoch in range(1, epochs[layer_id] + 1):
            # random indexs for all datas
            indexs = np.random.permutation(buffer.shape[0])
            for bs in range(0,buffer.shape[0],batch_size):
                be = min(buffer.shape[0],bs+batch_size)
                data = buffer[indexs[bs:be]]
                # forward
                with autograd.record():
                    output = model.onelayer(data, layer)
                    # make loss
                    loss = loss_func(output, data)
                    # for log
                    loss_n.append(np.mean(loss.asnumpy()))
                    del output
                # backward
                loss.backward()
                # step training to one batch
                trainer.step(batch_size, ignore_stale_grad=True)
                del data, loss
            # show log
            print('%d/%d epoch loss=%f...'%(epoch,epochs[layer_id],np.mean(loss_n)))
            loss_n = []
            del bs, be, indexs
        buffer = model.oneforward(buffer, layer)
    del layer, loss_n, buffer

    print('start training of StackedAE...')
    loss_n = []
    buffer = nd.array(X.values)
    trainer.set_learning_rate(0.02)
    for epoch in range(1, epochs[-1] + 1):
        # random indexs for all datas
        indexs = np.random.permutation(buffer.shape[0])
        for bs in range(0,buffer.shape[0],batch_size):
            be = min(buffer.shape[0],bs+batch_size)
            data = buffer[indexs[bs:be]]
            # forward
            with autograd.record():
                output = model(data)
                # make loss
                loss = loss_func(output, data)
                # for log
                loss_n.append(np.mean(loss.asnumpy()))
                del output
            # backward
            loss.backward()
            # step training to one batch
            trainer.step(batch_size, ignore_stale_grad=True)
            del data, loss
        # show log
        print('%d/%d epoch loss=%f...'%(epoch,epochs[-1],np.mean(loss_n)))
        loss_n = []
        del bs, be, indexs
    del trainer, loss_func, loss_n, buffer

    print('making manifold...')
    manifold_X = pd.DataFrame()
    for bs in range(0,X.shape[0],batch_size):
        be = min(X.shape[0],bs + batch_size)
        nx = nd.array(X.iloc[bs:be].values)
        df = pd.DataFrame(model.manifold(nx).asnumpy())
        manifold_X = manifold_X.append(df, ignore_index=True, sort=False)
        del be, df, nx
    del model, bs
    return manifold_X
原文地址:https://www.cnblogs.com/zhengzhe/p/9264814.html