GongXinyuu
3/13/2019 - 6:46 PM

pytorch version FLOPs_calculator

pytorch version FLOPs_calculator

import torch

def print_model_parm_nums(model):
    """
    Used for calculate models' parameter number.
    :param model:
    :return:
    """
    total = sum([param.nelement() for param in model.parameters()])
    print('  + Number of params: %.2fM' % (total / 1e6))
    return '  + Number of params: %.2fM' % (total / 1e6)


def print_disc_model_flops(model, inp_h=32, inp_w=32, multiply_adds=False):
    """
    Used for calculate the FLOPs of discriminative model.
    :param model: nn.Module object.
    :param inp_h: The height of the input image.
    :param inp_w:The width of the input image.
    :param multiply_adds: enable multiply_adds or not.
    :return:
    """
    print('Using input shape: %d * %d * 3' % (inp_h, inp_w))
    list_conv = []
    list_linear = []
    list_bn = []
    list_relu = []
    list_pooling = []

    def conv_hook(self, input, output):
        batch_size, input_channels, input_height, input_width = input[0].size()
        output_channels, output_height, output_width = output[0].size()

        kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (
            2 if multiply_adds else 1)
        bias_ops = 1 if self.bias is not None else 0

        params = output_channels * (kernel_ops + bias_ops)
        flops = batch_size * params * output_height * output_width

        list_conv.append(flops)

    def linear_hook(self, input, output):
        batch_size = input[0].size(0) if input[0].dim() == 2 else 1

        weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
        bias_ops = self.bias.nelement()

        flops = batch_size * (weight_ops + bias_ops)
        list_linear.append(flops)

    def bn_hook(self, input, output):
        list_bn.append(input[0].nelement())

    def relu_hook(self, input, output):
        list_relu.append(input[0].nelement())

    def pooling_hook(self, input, output):
        batch_size, input_channels, input_height, input_width = input[0].size()
        output_channels, output_height, output_width = output[0].size()

        kernel_ops = self.kernel_size * self.kernel_size
        bias_ops = 0
        params = output_channels * (kernel_ops + bias_ops)
        flops = batch_size * params * output_height * output_width

        list_pooling.append(flops)

    def adaptive_pooling_hook(self, input, output):
        batch_size, input_channels, input_height, input_width = input[0].size()
        output_channels, output_height, output_width = output[0].size()

        if output_height == 1 and output_width == 1:
            kernel_ops = input_height * input_width
        else:
            raise NotImplementedError

        bias_ops = 0
        params = output_channels * (kernel_ops + bias_ops)
        flops = batch_size * params * output_height * output_width

        list_pooling.append(flops)

    def foo(net):
        childrens = list(net.children())
        if not childrens:
            if isinstance(net, torch.nn.Conv2d):
                net.register_forward_hook(conv_hook)
            if isinstance(net, torch.nn.Linear):
                net.register_forward_hook(linear_hook)
            if isinstance(net, torch.nn.BatchNorm2d):
                net.register_forward_hook(bn_hook)
            if isinstance(net, torch.nn.ReLU) or isinstance(net, torch.nn.Sigmoid) or isinstance(net, torch.nn.PReLU):
                net.register_forward_hook(relu_hook)
            if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
                net.register_forward_hook(pooling_hook)
            if isinstance(net, torch.nn.AdaptiveAvgPool2d):
                net.register_forward_hook(adaptive_pooling_hook)
            return
        for c in childrens:
            foo(c)

    foo(model)
    input = Variable(torch.rand(3, inp_h, inp_w).unsqueeze(0), requires_grad=False)
    with torch.no_grad():
        out = model(input)
    del out
    total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))

    print('  + Number of FLOPs: %.2fG' % (total_flops / 1e9))


def print_gen_model_flops(model, latent_dim, multiply_adds=False):
    """
    Used for calculate the FLOPs of generative model.
    :param model: nn.Module object.
    :param latent_dim: The latent dimension of the input noise.
    :param multiply_adds: enable multiply_adds or not.
    :return:
    """
    print('Using input latent vector of dimension %d' % latent_dim)
    list_conv = []
    list_linear = []
    list_bn = []
    list_relu = []
    list_pooling = []

    def conv_hook(self, input, output):
        batch_size, input_channels, input_height, input_width = input[0].size()
        output_channels, output_height, output_width = output[0].size()

        kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (
            2 if multiply_adds else 1)
        bias_ops = 1 if self.bias is not None else 0

        params = output_channels * (kernel_ops + bias_ops)
        flops = batch_size * params * output_height * output_width

        list_conv.append(flops)

    def linear_hook(self, input, output):
        batch_size = input[0].size(0) if input[0].dim() == 2 else 1

        weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
        bias_ops = self.bias.nelement()

        flops = batch_size * (weight_ops + bias_ops)
        list_linear.append(flops)

    def bn_hook(self, input, output):
        list_bn.append(input[0].nelement())

    def relu_hook(self, input, output):
        list_relu.append(input[0].nelement())

    def pooling_hook(self, input, output):
        batch_size, input_channels, input_height, input_width = input[0].size()
        output_channels, output_height, output_width = output[0].size()

        kernel_ops = self.kernel_size * self.kernel_size
        bias_ops = 0
        params = output_channels * (kernel_ops + bias_ops)
        flops = batch_size * params * output_height * output_width

        list_pooling.append(flops)

    def adaptive_pooling_hook(self, input, output):
        batch_size, input_channels, input_height, input_width = input[0].size()
        output_channels, output_height, output_width = output[0].size()

        if output_height == 1 and output_width == 1:
            kernel_ops = input_height * input_width
        else:
            raise NotImplementedError

        bias_ops = 0
        params = output_channels * (kernel_ops + bias_ops)
        flops = batch_size * params * output_height * output_width

        list_pooling.append(flops)

    def foo(net):
        childrens = list(net.children())
        if not childrens:
            if isinstance(net, torch.nn.Conv2d):
                net.register_forward_hook(conv_hook)
            if isinstance(net, torch.nn.Linear):
                net.register_forward_hook(linear_hook)
            if isinstance(net, torch.nn.BatchNorm2d):
                net.register_forward_hook(bn_hook)
            if isinstance(net, torch.nn.ReLU) or isinstance(net, torch.nn.Sigmoid) or isinstance(net, torch.nn.PReLU):
                net.register_forward_hook(relu_hook)
            if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
                net.register_forward_hook(pooling_hook)
            if isinstance(net, torch.nn.AdaptiveAvgPool2d):
                net.register_forward_hook(adaptive_pooling_hook)
            return
        for c in childrens:
            foo(c)

    foo(model)
    input = Variable(
        torch.cuda.FloatTensor(np.random.normal(0, 1, (1, latent_dim))), requires_grad=False)
    with torch.no_grad():
        out = model(input)
    del out
    total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))

    print('  + Number of FLOPs: %.2fG' % (total_flops / 1e9))