1、tensorflow常用函数
TensorFlow 将图形定义转换成分布式执行的操作, 以充分利用可用的计算资源(如 CPU 或 GPU。一般你不需要显式指定使用 CPU 还是 GPU, TensorFlow 能自动检测。如果检测到 GPU, TensorFlow 会尽可能地利用到的第一个 GPU 来执行操作.
并行计算能让代价大的算法计算加速执行,TensorFlow也在实现上对复杂操作进行了有效的改进。大部分核相关的操作都是设备相关的实现,比如GPU。下面是一些重要的操作/核:
操作组 | 操作 |
Maths | Add, Sub, Mul, Div, Exp, Log, Greater, Less, Equal |
Array | Concat, Slice, Split, Constant, Rank, Shape, Shuffle |
Matrix | MatMul, MatrixInverse, MatrixDeterminant |
Neuronal Network | SoftMax, Sigmoid, ReLU, Convolution2D, MaxPool |
Checkpointing | Save, Restore |
Queues and syncronizations | Enqueue, Dequeue, MutexAcquire, MutexRelease |
Flow control | Merge, Switch, Enter, Leave, NextIteration |
TensorFlow的算术操作如下:
操作 | 描述 |
tf.add(x, y, name=None) | 求和 |
tf.sub(x, y, name=None) | 减法 |
tf.mul(x, y, name=None) | 乘法 |
tf.div(x, y, name=None) | 除法 |
tf.mod(x, y, name=None) | 取模 |
tf.abs(x, name=None) | 求绝对值 |
tf.neg(x, name=None) | 取负 (y = -x). |
tf.sign(x, name=None) | 返回符号 y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0. |
tf.inv(x, name=None) | 取反 |
tf.square(x, name=None) | 计算平方 (y = x * x = x^2). |
tf.round(x, name=None) | 舍入最接近的整数 # ‘a’ is [0.9, 2.5, 2.3, -4.4] tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ] |
tf.sqrt(x, name=None) | 开根号 (y = \sqrt{x} = x^{1/2}). |
tf.pow(x, y, name=None) | 幂次方 # tensor ‘x’ is [[2, 2], [3, 3]] # tensor ‘y’ is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] |
tf.exp(x, name=None) | 计算e的次方 |
tf.log(x, name=None) | 计算log,一个输入计算e的ln,两输入以第二输入为底 |
tf.maximum(x, y, name=None) | 返回最大值 (x > y ? x : y) |
tf.minimum(x, y, name=None) | 返回最小值 (x < y ? x : y) |
tf.cos(x, name=None) | 三角函数cosine |
tf.sin(x, name=None) | 三角函数sine |
tf.tan(x, name=None) | 三角函数tan |
tf.atan(x, name=None) | 三角函数ctan |
张量操作Tensor Transformations
数据类型转换Casting
操作 | 描述 |
tf.string_to_number (string_tensor, out_type=None, name=None) | 字符串转为数字 |
tf.to_double(x, name=’ToDouble’) | 转为64位浮点类型–float64 |
tf.to_float(x, name=’ToFloat’) | 转为32位浮点类型–float32 |
tf.to_int32(x, name=’ToInt32’) | 转为32位整型–int32 |
tf.to_int64(x, name=’ToInt64’) | 转为64位整型–int64 |
tf.cast(x, dtype, name=None) | 将x或者x.values转换为dtype # tensor a is [1.8, 2.2], dtype=tf.float tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32 |
形状操作Shapes and Shaping
操作 | 描述 |
tf.shape(input, name=None) | 返回数据的shape # ‘t’ is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] shape(t) ==> [2, 2, 3] |
tf.size(input, name=None) | 返回数据的元素数量 # ‘t’ is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] size(t) ==> 12 |
tf.rank(input, name=None) | 返回tensor的rank 注意:此rank不同于矩阵的rank, tensor的rank表示一个tensor需要的索引数目来唯一表示任何一个元素 也就是通常所说的 “order”, “degree”或”ndims” #’t’ is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] # shape of tensor ‘t’ is [2, 2, 3] rank(t) ==> 3 |
tf.reshape(tensor, shape, name=None) | 改变tensor的形状 # tensor ‘t’ is [1, 2, 3, 4, 5, 6, 7, 8, 9] # tensor ‘t’ has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9]] #如果shape有元素[-1],表示在该维度打平至一维 # -1 将自动推导得为 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] |
tf.expand_dims(input, dim, name=None) | 插入维度1进入一个tensor中 #该操作要求-1-input.dims() # ‘t’ is a tensor of shape [2] shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] <= dim <= input.dims() |
切片与合并(Slicing and Joining)
操作 | 描述 |
tf.slice(input_, begin, size, name=None) | 对tensor进行切片操作 其中size[i] = input.dim_size(i) - begin[i] 该操作要求 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n] #’input’ is #[[[1, 1, 1], [2, 2, 2]],[[3, 3, 3], [4, 4, 4]],[[5, 5, 5], [6, 6, 6]]] tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]] tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3], [4, 4, 4]]] tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]], [[5, 5, 5]]] |
tf.split(split_dim, num_split, value, name=’split’) | 沿着某一维度将tensor分离为num_split tensors # ‘value’ is a tensor with shape [5, 30] # Split ‘value’ into 3 tensors along dimension 1 split0, split1, split2 = tf.split(1, 3, value) tf.shape(split0) ==> [5, 10] |
tf.concat(concat_dim, values, name=’concat’) | 沿着某一维度连结tensor t1 = [[1, 2, 3], [4, 5, 6]] t2 = [[7, 8, 9], [10, 11, 12]] tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]] 如果想沿着tensor一新轴连结打包,那么可以: tf.concat(axis, [tf.expand_dims(t, axis) for t in tensors]) 等同于tf.pack(tensors, axis=axis) |
tf.pack(values, axis=0, name=’pack’) | 将一系列rank-R的tensor打包为一个rank-(R+1)的tensor # ‘x’ is [1, 4], ‘y’ is [2, 5], ‘z’ is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # 沿着第一维pack pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] 等价于tf.pack([x, y, z]) = np.asarray([x, y, z]) |
tf.reverse(tensor, dims, name=None) | 沿着某维度进行序列反转 其中dim为列表,元素为bool型,size等于rank(tensor) # tensor ‘t’ is [[[[ 0, 1, 2, 3], #[ 4, 5, 6, 7], #[ 8, 9, 10, 11]], #[[12, 13, 14, 15], #[16, 17, 18, 19], #[20, 21, 22, 23]]]] # tensor ‘t’ shape is [1, 2, 3, 4] # ‘dims’ is [False, False, False, True] reverse(t, dims) ==> [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]]]] |
tf.transpose(a, perm=None, name=’transpose’) | 调换tensor的维度顺序 按照列表perm的维度排列调换tensor顺序, 如为定义,则perm为(n-1…0) # ‘x’ is [[1 2 3],[4 5 6]] tf.transpose(x) ==> [[1 4], [2 5],[3 6]] # Equivalently tf.transpose(x, perm=[1, 0]) ==> [[1 4],[2 5], [3 6]] |
tf.gather(params, indices, validate_indices=None, name=None) | 合并索引indices所指示params中的切片 |
tf.one_hot merge函数(indices, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None) | indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 axis = -1 #Then output is [4 x 3]: output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) |
矩阵相关运算
操作 | 描述 |
tf.diag(diagonal, name=None) | 返回一个给定对角值的对角tensor # ‘diagonal’ is [1, 2, 3, 4] tf.diag(diagonal) ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] |
tf.diag_part(input, name=None) | 功能与上面相反 |
tf.trace(x, name=None) | 求一个2维tensor足迹,即对角值diagonal之和 |
tf.transpose(a, perm=None, name=’transpose’) | 调换tensor的维度顺序 按照列表perm的维度排列调换tensor顺序, 如为定义,则perm为(n-1…0) # ‘x’ is [[1 2 3],[4 5 6]] tf.transpose(x) ==> [[1 4], [2 5],[3 6]] # Equivalently tf.transpose(x, perm=[1, 0]) ==> [[1 4],[2 5], [3 6]] |
tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None) | 矩阵相乘 |
tf.matrix_determinant(input, name=None) | 返回方阵的行列式 |
tf.matrix_inverse(input, adjoint=None, name=None) | 求方阵的逆矩阵,adjoint为True时,计算输入共轭矩阵的逆矩阵 |
tf.cholesky(input, name=None) | 对输入方阵cholesky分解, 即把一个对称正定的矩阵表示成一个下三角矩阵L和其转置的乘积的分解A=LL^T |
tf.matrix_solve(matrix, rhs, adjoint=None, name=None) | 求解tf.matrix_solve(matrix, rhs, adjoint=None, name=None) matrix为方阵shape为[M,M],rhs的shape为[M,K],output为[M,K] |
复数操作
操作 | 描述 |
tfplex(real, imag, name=None) | 将两实数转换为复数形式 # tensor ‘real’ is [2.25, 3.25] # tensor imag is [4.75, 5.75] tfplex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] |
tfplex_abs(x, name=None) | 计算复数的绝对值,即长度。 # tensor ‘x’ is [[-2.25 + 4.75j], [-3.25 + 5.75j]] tfplex_abs(x) ==> [5.25594902, 6.60492229] |
tf.conj(input, name=None) | 计算共轭复数 |
tf.imag(input, name=None) tf.real(input, name=None) | 提取复数的虚部和实部 |
tf.fft(input, name=None) | 计算一维的离散傅里叶变换,输入数据类型为complex64 |
归约计算(Reduction)
操作 | 描述 |
tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 计算输入tensor元素的和,或者安照reduction_indices指定的轴进行求和 # ‘x’ is [[1, 1, 1] # [1, 1, 1]] tf.reduce_sum(x) ==> 6 tf.reduce_sum(x, 0) ==> [2, 2, 2] tf.reduce_sum(x, 1) ==> [3, 3] tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]] tf.reduce_sum(x, [0, 1]) ==> 6 |
tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 计算输入tensor元素的乘积,或者安照reduction_indices指定的轴进行求乘积 |
tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 求tensor中最小值 |
tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 求tensor中最大值 |
tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 求tensor中平均值 |
tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 对tensor中各个元素求逻辑’与’ # ‘x’ is # [[True, True] # [False, False]] tf.reduce_all(x) ==> False tf.reduce_all(x, 0) ==> [False, False] tf.reduce_all(x, 1) ==> [True, False] |
tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None) | 对tensor中各个元素求逻辑’或’ |
tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None) | 计算一系列tensor的和 # tensor ‘a’ is [[1, 2], [3, 4]] # tensor b is [[5, 0], [0, 6]] tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]] |
tf.cumsum(x, axis=0, exclusive=False, reverse=False, name=None) | 求累积和 tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] |
分割(Segmentation)
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论