parent class python 中 super 关键字的含义
meaning of the super keyword in the parent class python
我不明白 super 关键字不在 child class 中使用时的含义。
问题来自这个 class here,我在我工作的 git 中心项目中找到了这个(link 是 https://github.com/statsmodels/statsmodels/pull/2374/files)
例如,在代码 res = super(PenalizedMixin, self).fit(method=method, **kwds)
+
出现的 fit
方法中查找
"""
+Created on Sun May 10 08:23:48 2015
+
+Author: Josef Perktold
+License: BSD-3
+"""
+
+import numpy as np
+from ._penalties import SCADSmoothed
+
+class PenalizedMixin(object):
+ """Mixin class for Maximum Penalized Likelihood
+
+
+ TODO: missing **kwds or explicit keywords
+
+ TODO: do we really need `pen_weight` keyword in likelihood methods?
+
+ """
+
+ def __init__(self, *args, **kwds):
+ super(PenalizedMixin, self).__init__(*args, **kwds)
+
+ penal = kwds.pop('penal', None)
+ # I keep the following instead of adding default in pop for future changes
+ if penal is None:
+ # TODO: switch to unpenalized by default
+ self.penal = SCADSmoothed(0.1, c0=0.0001)
+ else:
+ self.penal = penal
+
+ # TODO: define pen_weight as average pen_weight? i.e. per observation
+ # I would have prefered len(self.endog) * kwds.get('pen_weight', 1)
+ # or use pen_weight_factor in signature
+ self.pen_weight = kwds.get('pen_weight', len(self.endog))
+
+ self._init_keys.extend(['penal', 'pen_weight'])
+
+
+
+ def loglike(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ llf = super(PenalizedMixin, self).loglike(params)
+ if pen_weight != 0:
+ llf -= pen_weight * self.penal.func(params)
+
+ return llf
+
+
+ def loglikeobs(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ llf = super(PenalizedMixin, self).loglikeobs(params)
+ nobs_llf = float(llf.shape[0])
+
+ if pen_weight != 0:
+ llf -= pen_weight / nobs_llf * self.penal.func(params)
+
+ return llf
+
+
+ def score(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ sc = super(PenalizedMixin, self).score(params)
+ if pen_weight != 0:
+ sc -= pen_weight * self.penal.grad(params)
+
+ return sc
+
+
+ def scoreobs(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ sc = super(PenalizedMixin, self).scoreobs(params)
+ nobs_sc = float(sc.shape[0])
+ if pen_weight != 0:
+ sc -= pen_weight / nobs_sc * self.penal.grad(params)
+
+ return sc
+
+
+ def hessian_(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+ loglike = self.loglike
+ else:
+ loglike = lambda p: self.loglike(p, pen_weight=pen_weight)
+
+ from statsmodels.tools.numdiff import approx_hess
+ return approx_hess(params, loglike)
+
+
+ def hessian(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ hess = super(PenalizedMixin, self).hessian(params)
+ if pen_weight != 0:
+ h = self.penal.deriv2(params)
+ if h.ndim == 1:
+ hess -= np.diag(pen_weight * h)
+ else:
+ hess -= pen_weight * h
+
+ return hess
+
+
+ def fit(self, method=None, trim=None, **kwds):
+ # If method is None, then we choose a default method ourselves
+
+ # TODO: temporary hack, need extra fit kwds
+ # we need to rule out fit methods in a model that will not work with
+ # penalization
+ if hasattr(self, 'family'): # assume this identifies GLM
+ kwds.update({'max_start_irls' : 0})
+
+ # currently we use `bfgs` by default
+ if method is None:
+ method = 'bfgs'
+
+ if trim is None:
+ trim = False # see below infinite recursion in `fit_constrained
+
+ res = super(PenalizedMixin, self).fit(method=method, **kwds)
+
+ if trim is False:
+ # note boolean check for "is False" not evaluates to False
+ return res
+ else:
+ # TODO: make it penal function dependent
+ # temporary standin, only works for Poisson and GLM,
+ # and is computationally inefficient
+ drop_index = np.nonzero(np.abs(res.params) < 1e-4) [0]
+ keep_index = np.nonzero(np.abs(res.params) > 1e-4) [0]
+ rmat = np.eye(len(res.params))[drop_index]
+
+ # calling fit_constrained raise
+ # "RuntimeError: maximum recursion depth exceeded in __instancecheck__"
+ # fit_constrained is calling fit, recursive endless loop
+ if drop_index.any():
+ # todo : trim kwyword doesn't work, why not?
+ #res_aux = self.fit_constrained(rmat, trim=False)
+ res_aux = self._fit_zeros(keep_index, **kwds)
+ return res_aux
+ else:
+ return res
+
+
我试图用一个更简单的例子重现这段代码,但它不起作用:
class A(object):
def __init__(self):
return
def funz(self, x):
print(x)
def funz2(self, x):
llf = super(A, self).funz2(x)
print(x + 1)
a = A()
a.funz(3)
a.funz2(4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/donbeo/Desktop/prova.py", line 15, in <module>
a.funz2(4)
File "/home/donbeo/Desktop/prova.py", line 10, in funz2
llf = super(A, self).funz2(x)
AttributeError: 'super' object has no attribute 'funz2'
>>>
PenalizedMixin 是一个child class:它是object
的child。
不过,顾名思义,它的意思就是做一个mixin。也就是说,它旨在在多重继承场景中用作一个 parent。 super
调用方法解析顺序中的下一个class,不一定是那个class的parent。
无论如何,我不明白你的"simpler"例子。原始代码起作用的原因是 superclass 确实有一个 __init__
方法。 object
没有 funz2
方法。
你应该总是使用 super
,否则 类 可能会被遗漏,特别是 在多重继承场景中(这是不可避免的混合 -在 类 中被使用)。例如:
class BaseClass(object):
def __init__(self):
print 'BaseClass.__init__'
class MixInClass(object):
def __init__(self):
print 'MixInClass.__init__'
class ChildClass(BaseClass, MixInClass):
def __init__(self):
print 'ChildClass.__init__'
super(ChildClass, self).__init__() # -> BaseClass.__init__
if __name__ == '__main__':
child = ChildClass()
给出:
ChildClass.__init__
BaseClass.__init__
遗漏了MixInClass.__init__
,而:
class BaseClass(object):
def __init__(self):
print 'BaseClass.__init__'
super(BaseClass, self).__init__() # -> MixInClass.__init__
class MixInClass(object):
def __init__(self):
print 'MixInClass.__init__'
super(MixInClass, self).__init__() # -> object.__init__
class ChildClass(BaseClass, MixInClass):
def __init__(self):
print 'ChildClass.__init__'
super(ChildClass, self).__init__() # -> BaseClass.__init__
if __name__ == '__main__':
child = ChildClass()
给出:
ChildClass.__init__
BaseClass.__init__
MixInClass.__init__
ChildClass.__mro__
,"method resolution order",两种情况是一样的:
(<class '__main__.ChildClass'>, <class '__main__.BaseClass'>, <class '__main__.MixInClass'>, <type 'object'>)
BaseClass
和MixInClass
都只继承自object
(即它们是"new-style"类) ,但您仍然需要使用 super
来确保调用 MRO 中 类 中方法的任何 other 实现。为了启用这种用法,实现了 object.__init__
,但实际上并没有做太多!
我不明白 super 关键字不在 child class 中使用时的含义。
问题来自这个 class here,我在我工作的 git 中心项目中找到了这个(link 是 https://github.com/statsmodels/statsmodels/pull/2374/files)
例如,在代码 res = super(PenalizedMixin, self).fit(method=method, **kwds)
+
出现的 fit
方法中查找
"""
+Created on Sun May 10 08:23:48 2015
+
+Author: Josef Perktold
+License: BSD-3
+"""
+
+import numpy as np
+from ._penalties import SCADSmoothed
+
+class PenalizedMixin(object):
+ """Mixin class for Maximum Penalized Likelihood
+
+
+ TODO: missing **kwds or explicit keywords
+
+ TODO: do we really need `pen_weight` keyword in likelihood methods?
+
+ """
+
+ def __init__(self, *args, **kwds):
+ super(PenalizedMixin, self).__init__(*args, **kwds)
+
+ penal = kwds.pop('penal', None)
+ # I keep the following instead of adding default in pop for future changes
+ if penal is None:
+ # TODO: switch to unpenalized by default
+ self.penal = SCADSmoothed(0.1, c0=0.0001)
+ else:
+ self.penal = penal
+
+ # TODO: define pen_weight as average pen_weight? i.e. per observation
+ # I would have prefered len(self.endog) * kwds.get('pen_weight', 1)
+ # or use pen_weight_factor in signature
+ self.pen_weight = kwds.get('pen_weight', len(self.endog))
+
+ self._init_keys.extend(['penal', 'pen_weight'])
+
+
+
+ def loglike(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ llf = super(PenalizedMixin, self).loglike(params)
+ if pen_weight != 0:
+ llf -= pen_weight * self.penal.func(params)
+
+ return llf
+
+
+ def loglikeobs(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ llf = super(PenalizedMixin, self).loglikeobs(params)
+ nobs_llf = float(llf.shape[0])
+
+ if pen_weight != 0:
+ llf -= pen_weight / nobs_llf * self.penal.func(params)
+
+ return llf
+
+
+ def score(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ sc = super(PenalizedMixin, self).score(params)
+ if pen_weight != 0:
+ sc -= pen_weight * self.penal.grad(params)
+
+ return sc
+
+
+ def scoreobs(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ sc = super(PenalizedMixin, self).scoreobs(params)
+ nobs_sc = float(sc.shape[0])
+ if pen_weight != 0:
+ sc -= pen_weight / nobs_sc * self.penal.grad(params)
+
+ return sc
+
+
+ def hessian_(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+ loglike = self.loglike
+ else:
+ loglike = lambda p: self.loglike(p, pen_weight=pen_weight)
+
+ from statsmodels.tools.numdiff import approx_hess
+ return approx_hess(params, loglike)
+
+
+ def hessian(self, params, pen_weight=None):
+ if pen_weight is None:
+ pen_weight = self.pen_weight
+
+ hess = super(PenalizedMixin, self).hessian(params)
+ if pen_weight != 0:
+ h = self.penal.deriv2(params)
+ if h.ndim == 1:
+ hess -= np.diag(pen_weight * h)
+ else:
+ hess -= pen_weight * h
+
+ return hess
+
+
+ def fit(self, method=None, trim=None, **kwds):
+ # If method is None, then we choose a default method ourselves
+
+ # TODO: temporary hack, need extra fit kwds
+ # we need to rule out fit methods in a model that will not work with
+ # penalization
+ if hasattr(self, 'family'): # assume this identifies GLM
+ kwds.update({'max_start_irls' : 0})
+
+ # currently we use `bfgs` by default
+ if method is None:
+ method = 'bfgs'
+
+ if trim is None:
+ trim = False # see below infinite recursion in `fit_constrained
+
+ res = super(PenalizedMixin, self).fit(method=method, **kwds)
+
+ if trim is False:
+ # note boolean check for "is False" not evaluates to False
+ return res
+ else:
+ # TODO: make it penal function dependent
+ # temporary standin, only works for Poisson and GLM,
+ # and is computationally inefficient
+ drop_index = np.nonzero(np.abs(res.params) < 1e-4) [0]
+ keep_index = np.nonzero(np.abs(res.params) > 1e-4) [0]
+ rmat = np.eye(len(res.params))[drop_index]
+
+ # calling fit_constrained raise
+ # "RuntimeError: maximum recursion depth exceeded in __instancecheck__"
+ # fit_constrained is calling fit, recursive endless loop
+ if drop_index.any():
+ # todo : trim kwyword doesn't work, why not?
+ #res_aux = self.fit_constrained(rmat, trim=False)
+ res_aux = self._fit_zeros(keep_index, **kwds)
+ return res_aux
+ else:
+ return res
+
+
我试图用一个更简单的例子重现这段代码,但它不起作用:
class A(object):
def __init__(self):
return
def funz(self, x):
print(x)
def funz2(self, x):
llf = super(A, self).funz2(x)
print(x + 1)
a = A()
a.funz(3)
a.funz2(4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/donbeo/Desktop/prova.py", line 15, in <module>
a.funz2(4)
File "/home/donbeo/Desktop/prova.py", line 10, in funz2
llf = super(A, self).funz2(x)
AttributeError: 'super' object has no attribute 'funz2'
>>>
PenalizedMixin 是一个child class:它是object
的child。
不过,顾名思义,它的意思就是做一个mixin。也就是说,它旨在在多重继承场景中用作一个 parent。 super
调用方法解析顺序中的下一个class,不一定是那个class的parent。
无论如何,我不明白你的"simpler"例子。原始代码起作用的原因是 superclass 确实有一个 __init__
方法。 object
没有 funz2
方法。
你应该总是使用 super
,否则 类 可能会被遗漏,特别是 在多重继承场景中(这是不可避免的混合 -在 类 中被使用)。例如:
class BaseClass(object):
def __init__(self):
print 'BaseClass.__init__'
class MixInClass(object):
def __init__(self):
print 'MixInClass.__init__'
class ChildClass(BaseClass, MixInClass):
def __init__(self):
print 'ChildClass.__init__'
super(ChildClass, self).__init__() # -> BaseClass.__init__
if __name__ == '__main__':
child = ChildClass()
给出:
ChildClass.__init__
BaseClass.__init__
遗漏了MixInClass.__init__
,而:
class BaseClass(object):
def __init__(self):
print 'BaseClass.__init__'
super(BaseClass, self).__init__() # -> MixInClass.__init__
class MixInClass(object):
def __init__(self):
print 'MixInClass.__init__'
super(MixInClass, self).__init__() # -> object.__init__
class ChildClass(BaseClass, MixInClass):
def __init__(self):
print 'ChildClass.__init__'
super(ChildClass, self).__init__() # -> BaseClass.__init__
if __name__ == '__main__':
child = ChildClass()
给出:
ChildClass.__init__
BaseClass.__init__
MixInClass.__init__
ChildClass.__mro__
,"method resolution order",两种情况是一样的:
(<class '__main__.ChildClass'>, <class '__main__.BaseClass'>, <class '__main__.MixInClass'>, <type 'object'>)
BaseClass
和MixInClass
都只继承自object
(即它们是"new-style"类) ,但您仍然需要使用 super
来确保调用 MRO 中 类 中方法的任何 other 实现。为了启用这种用法,实现了 object.__init__
,但实际上并没有做太多!