在“tf.py_func”的输入函数中返回多个值
Returning mutiple values in the input function for `tf.py_func`
我正在尝试使用 tf.py_func
和 tf.RegisterGradient
设置自定义渐变。具体来说,我正在尝试采用拉普拉斯算子的特征值 w.r.t 的梯度。我得到了基本的工作,其中我的 python
函数 return 有一个值,即特征值。但是为了使梯度起作用,我还需要 return 特征向量。但是尝试 return 2 个值会导致 pyfunc_1 returns 2 values, but expects to see 1 values
。我该如何解决这个错误?
这是我的自定义渐变的完整代码。
import numpy as np
import networkx as nx
from scipy import sparse
import tensorflow as tf
from tensorflow.python.framework import ops
# python function to calculate the second eigen value
def calc_second_eigval(X):
G = nx.from_numpy_matrix(X)
degree_dict = nx.degree(G)
degree_list = [x[1] for x in degree_dict]
lap_matrix = sparse.diags(degree_list, 0)-nx.adjacency_matrix(G)
eigval, eigvec = sparse.linalg.eigsh(lap_matrix, 2, sigma=0, which='LM')
return float(eigval[0]), eigvec[:,0]
# define custom py_func which takes also a grad op as argument:
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
# define custom second_eigval function for tensorflow
def custom_second_eigval(x, name=None):
with ops.op_scope([x], name, "SecondEigValGrad") as name:
eigval = py_func(calc_second_eigval,
[x],
[tf.float64],
name=name,
grad=_SecondEigValGrad) # <-- here's the call to the gradient
return eigval[0]
# actual gradient:
def _SecondEigValGrad(op, grad):
# TODO: this should involve eigen vectors
x = op.inputs[0]
return grad * 20 * x
X = tf.Variable(tf.random_normal([200,200],dtype=tf.float64))
second_eigval = custom_second_eigval(X)
optimizer = tf.train.AdamOptimizer(0.01)
update = tf.contrib.slim.learning.create_train_op(second_eigval, optimizer,summarize_gradients=True)
with tf.Session() as sess:
tf.initialize_all_variables().run()
print(update.eval())
您的 Tout
必须是 (tf.float64,tf.float64)
而不是 [tf.float64]
eigval = py_func(calc_second_eigval,
[x],
(tf.float64,tf.float64),
name=name,
grad=_SecondEigValGrad)
这是一个工作演示
import tensorflow as tf
# Function in python
def dummy(x):
return [x,x]
print(dummy([1.0,2.0]))
tf_fun = tf.py_func(dummy,[[1.0,2.0]],(tf.float32,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf_fun))
我正在尝试使用 tf.py_func
和 tf.RegisterGradient
设置自定义渐变。具体来说,我正在尝试采用拉普拉斯算子的特征值 w.r.t 的梯度。我得到了基本的工作,其中我的 python
函数 return 有一个值,即特征值。但是为了使梯度起作用,我还需要 return 特征向量。但是尝试 return 2 个值会导致 pyfunc_1 returns 2 values, but expects to see 1 values
。我该如何解决这个错误?
这是我的自定义渐变的完整代码。
import numpy as np
import networkx as nx
from scipy import sparse
import tensorflow as tf
from tensorflow.python.framework import ops
# python function to calculate the second eigen value
def calc_second_eigval(X):
G = nx.from_numpy_matrix(X)
degree_dict = nx.degree(G)
degree_list = [x[1] for x in degree_dict]
lap_matrix = sparse.diags(degree_list, 0)-nx.adjacency_matrix(G)
eigval, eigvec = sparse.linalg.eigsh(lap_matrix, 2, sigma=0, which='LM')
return float(eigval[0]), eigvec[:,0]
# define custom py_func which takes also a grad op as argument:
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
# define custom second_eigval function for tensorflow
def custom_second_eigval(x, name=None):
with ops.op_scope([x], name, "SecondEigValGrad") as name:
eigval = py_func(calc_second_eigval,
[x],
[tf.float64],
name=name,
grad=_SecondEigValGrad) # <-- here's the call to the gradient
return eigval[0]
# actual gradient:
def _SecondEigValGrad(op, grad):
# TODO: this should involve eigen vectors
x = op.inputs[0]
return grad * 20 * x
X = tf.Variable(tf.random_normal([200,200],dtype=tf.float64))
second_eigval = custom_second_eigval(X)
optimizer = tf.train.AdamOptimizer(0.01)
update = tf.contrib.slim.learning.create_train_op(second_eigval, optimizer,summarize_gradients=True)
with tf.Session() as sess:
tf.initialize_all_variables().run()
print(update.eval())
您的 Tout
必须是 (tf.float64,tf.float64)
而不是 [tf.float64]
eigval = py_func(calc_second_eigval,
[x],
(tf.float64,tf.float64),
name=name,
grad=_SecondEigValGrad)
这是一个工作演示
import tensorflow as tf
# Function in python
def dummy(x):
return [x,x]
print(dummy([1.0,2.0]))
tf_fun = tf.py_func(dummy,[[1.0,2.0]],(tf.float32,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf_fun))