在 Python 中的 class 的初始化部分使用 self

Using self in init part of a class in Python

下面两段代码在Python中初始化一个class有什么区别吗?

class summation: 
    def __init__(self, f, s): 
        self.first = f 
        self.second = s 
        self.summ = self.first + self.second
    .
    .
    .

class summation: 
    def __init__(self, f, s): 
        self.first = f 
        self.second = s 
        self.summ = f + s
    .
    .
    .

如果存在任何差异,那是什么,哪个代码更可取?

编辑: 我打算用 Python(和 Pytorch)编写一个人工神经网络。其实上面两段代码只是一些例子。在实际案例中,我在各种资源中看到,当 class 的初始化中存在 self.input = input 时,在其他部分它被用作 self.input,而不是 input .

我的问题:这两种方法有什么区别?为什么在我的情况下使用 self.input 更可取?

示例:(来自 https://docs.dgl.ai/en/latest/tutorials/models/1_gnn/4_rgcn.html#sphx-glr-tutorials-models-1-gnn-4-rgcn-py

import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
import dgl.function as fn
from functools import partial

class RGCNLayer(nn.Module):
    def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None,
                 activation=None, is_input_layer=False):
        super(RGCNLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.num_rels = num_rels
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.is_input_layer = is_input_layer

        # sanity check
        if self.num_bases <= 0 or self.num_bases > self.num_rels:
            self.num_bases = self.num_rels

        # weight bases in equation (3)
        self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat,
                                                self.out_feat))
        if self.num_bases < self.num_rels:
            # linear combination coefficients in equation (3)
            self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases))

        # add bias
        if self.bias:
            self.bias = nn.Parameter(torch.Tensor(out_feat))

        # init trainable parameters
        nn.init.xavier_uniform_(self.weight,
                                gain=nn.init.calculate_gain('relu'))
        if self.num_bases < self.num_rels:
            nn.init.xavier_uniform_(self.w_comp,
                                    gain=nn.init.calculate_gain('relu'))
        if self.bias:
            nn.init.xavier_uniform_(self.bias,
                                    gain=nn.init.calculate_gain('relu'))

    def forward(self, g):
        if self.num_bases < self.num_rels:
            # generate all weights from bases (equation (3))
            weight = self.weight.view(self.in_feat, self.num_bases, self.out_feat)
            weight = torch.matmul(self.w_comp, weight).view(self.num_rels,
                                                        self.in_feat, self.out_feat)
        else:
            weight = self.weight

        if self.is_input_layer:
            def message_func(edges):
                # for input layer, matrix multiply can be converted to be
                # an embedding lookup using source node id
                embed = weight.view(-1, self.out_feat)
                index = edges.data['rel_type'] * self.in_feat + edges.src['id']
                return {'msg': embed[index] * edges.data['norm']}
        else:
            def message_func(edges):
                w = weight[edges.data['rel_type']]
                msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()
                msg = msg * edges.data['norm']
                return {'msg': msg}

        def apply_func(nodes):
            h = nodes.data['h']
            if self.bias:
                h = h + self.bias
            if self.activation:
                h = self.activation(h)
            return {'h': h}

        g.update_all(message_func, fn.sum(msg='msg', out='h'), apply_func)

。在这种级别的信息下,这两种方法在您的情况下没有区别。但他们可以吗? 。他们可以。如果他们对 settersgetters 进行了一些修改。稍后在我的回答中我会告诉你如何。

首先,我更喜欢用这个:

class summation: 
    def __init__(self, f, s): 
        self.first = f 
        self.second = s 
    @property
    def summ(self):
        return self.first+self.second

以上实现按需求和。因此,当您更改 self.firstself.second 时,将自动计算 summ。您可以像以前一样访问总和。

s = summation(1,9)
print(s.summ)
# 10
s.first = 2
s.second = 3
print(s.summ)
# 5

那么,它们怎么可能不同呢?

让我们按如下方式实现它们。在 setters 中,我将输入加倍以向您展示 setters 如何影响结果。这只是一个想象的例子,并不是你写的。

class summation1: 
    def __init__(self, f, s): 
        self.first = f 
        self.second = s 
        self.summ = self.first + self.second

    @property
    def first(self):
      return self.__first

    @first.setter
    def first(self,f):
      self.__first = f*2

    @property
    def second(self):
      return self.__second

    @second.setter
    def second(self,s):
      self.__second = s*2


class summation2: 
    def __init__(self, f, s): 
        self.first = f 
        self.second = s 
        self.summ = f + s

    @property
    def first(self):
      return self.__first

    @first.setter
    def first(self,f):
      self.__first = f*2

    @property
    def second(self):
      return self.__second

    @second.setter
    def second(self,s):
      self.__second = s*2

现在让我们看一下输出:

a = 3
b = 2
s1 = summation1(a,b)
s2 = summation2(a,b)

print(s1.summ)
# 10
print(s2.summ)
# 5

所以,如果您不确定在这两者之间做出选择,也许第一种方法就是您所需要的。