输入

  • 1.sparse_shape = torch.LongTensor([87, 87])

  • 2.input = scn.InputBatch(2, spase_shape) # dimension sparse shape

  • 3.输入稀疏张量

    # add_sample的一种方式
    input.add_sample()
    location = torch.LongTensor([y, x])  
    featureVector = torch.FloatTensor([2])
    input.set_location(location, featureVector, 0)
    
    # 另一种方式
    input.add_sample()
    locations.append([y, x])
    features.append([1])
    locations = torch.LongTensor(locations)
    features = torch.FloatTensor(features)
    input.set_locations(locations, features, 0)
    
    
  • 另外一种方法:

    self.inputLayer = scn.InputLayer(2, self.spatial_size, 2) # dimension, spatial_size, mode
    input = self.inputLayer(x)   # 这里有一个问题,这个x是什么样的呢?也有坐标和features
    # 其中变量x是一个(coors, features, batch_size)的一个元组。
    
    

model搭建

model1 = scn.Sequential().add(
    scn.SubmanifoldConvolution(2, 1, 8, 3, False)
).add(
    scn.SubmanifoldConvolution(2, 8, 16, 3, False)
).add(
    scn.SubmanifoldConvolution(2, 16, 32, 3, False)
).add(
    scn.BatchNormalization(32)
).add(
    scn.Convolution(2, 32, 32, 3, 2, False),
).add(
    scn.SparseToDense(2, 32)
)


# 稀疏2d卷积模型

import torch
import torch.nn as nn
import sparseconvnet as scn
from data import get_iterators

# two-dimensional SparseConvNet
class Model(nn.Module):
    def __init__(self):
        nn.Module.__init__(self)
        self.sparseModel = scn.Sequential(
            scn.SubmanifoldConvolution(2, 3, 8, 3, False),
            scn.MaxPooling(2, 3, 2),
            scn.SparseResNet(2, 8, [           # dimension,输入通道数,['basic block',输出通道数,]
                        ['b', 8, 2, 1],
                        ['b', 16, 2, 2],
                        ['b', 24, 2, 2],
                        ['b', 32, 2, 2]]),
            scn.Convolution(2, 32, 64, 5, 1, False),
            scn.BatchNormReLU(64),
            scn.SparseToDense(2, 64))
        self.spatial_size= self.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
        self.inputLayer = scn.InputLayer(2,self.spatial_size,2)
        self.linear = nn.Linear(64, 183)

    def forward(self, x):
        x = self.inputLayer(x)
        x = self.sparseModel(x)
        x = x.view(-1, 64)
        x = self.linear(x)
        return x

model = Model()
print('model: ', model)


## 稀疏卷积 ResNet结构实现
def SparseResNet(dimension, nInputPlanes, layers):
    """
    pre-activated ResNet
    e.g. layers = {{'basic',16,2,1}, {'basic',32,2}}
    """
    nPlanes = nInputPlanes
    m = scn.Sequential()

    def residual(nIn, nOut, stride):
        if stride > 1:
            return scn.Convolution(dimension, nIn, nOut, 3, stride, False)
        elif nIn != nOut:
            return scn.NetworkInNetwork(nIn, nOut, False)
        else:
            return scn.Identity()
    for blockType, n, reps, stride in layers:
        for rep in range(reps):
            if blockType[0] == 'b':  # basic block
                if rep == 0:
                    m.add(scn.BatchNormReLU(nPlanes))
                    m.add(
                        scn.ConcatTable().add(
                            scn.Sequential().add(
                                scn.SubmanifoldConvolution(
                                    dimension,
                                    nPlanes,
                                    n,
                                    3,
                                    False) if stride == 1 else scn.Convolution(
                                    dimension,
                                    nPlanes,
                                    n,
                                    3,
                                    stride,
                                    False)) .add(
                                scn.BatchNormReLU(n)) .add(
                                scn.SubmanifoldConvolution(
                                    dimension,
                                    n,
                                    n,
                                    3,
                                    False))) .add(
                            residual(
                                nPlanes,
                                n,
                                stride)))
                else:
                    m.add(
                        scn.ConcatTable().add(
                            scn.Sequential().add(
                                scn.BatchNormReLU(nPlanes)) .add(
                                scn.SubmanifoldConvolution(
                                    dimension,
                                    nPlanes,
                                    n,
                                    3,
                                    False)) .add(
                                scn.BatchNormReLU(n)) .add(
                                scn.SubmanifoldConvolution(
                                    dimension,
                                    n,
                                    n,
                                    3,
                                    False))) .add(
                            scn.Identity()))
            nPlanes = n
            m.add(scn.AddTable())
    m.add(scn.BatchNormReLU(nPlanes))
    return m

将稀疏转成稠密张量

  • scn.SparseToDense(2, 32)

总结

稀疏卷积网络的搭建基本就是这个样子。我觉得难点还是具体的实现。

分类:

技术点:

相关文章: