使用TensorBoard
import tensorflow as tf
a=tf.constant(2)
b=tf.constant(3)
x=tf.add(a,b)
with tf.Session() as sess:
writer = tf.summary.FileWriter('./graphs',sess.graph)
print(sess.run(x))
writer.close()
5
(以Anaconda3為例)創(chuàng)建python文件并保存在桌面
python TFtest.py
打開Anaconda Prompt并來到桌面路徑下運(yùn)行PYTHON腳本
tensorboard --logdir="./graphs" --port 6006
運(yùn)行tensorboad, 它會(huì)訪問之前腳本創(chuàng)建的graphs文件夾甘萧。之后在控制臺(tái)會(huì)顯示訪問路徑葱她。例如 http://WIN10-711010523:6006 將地址粘到瀏覽器可以看到graph
[圖片上傳失敗...(image-21c182-1513071247284)]
接下來將輸入節(jié)點(diǎn)的名稱做一下修改
import tensorflow as tf
a=tf.constant(2,name="a")#用name修改節(jié)點(diǎn)名稱
b=tf.constant(3,name="b")
x=tf.add(a,b,name="add")
with tf.Session() as sess:
writer = tf.summary.FileWriter('./graphs',sess.graph)
print(sess.run(x))
5
還像之前一樣在終端分別運(yùn)行PYTHON腳本與tensorboard。如果之前瀏覽器還開著之前tensorboard頁面惹挟,需要先關(guān)閉再重新開啟蒲凶,不然終端會(huì)有這樣的提示
Found more than one graph event per run, or there was a metagraph containing a graph_def, as well as one or more graph events. Overwriting the graph with the newest event.
Constant 類型
import tensorflow as tf
a=tf.constant([2,2],name='a')#創(chuàng)建constant
b=tf.constant([[0,1],[2,3]],name="b")
add=tf.add(a,b,name="add")
mul=tf.multiply(a,b,name="mul")
with tf.Session() as sess:
x,y=sess.run([add,mul])
print(x,y)
[[2 3]
[4 5]] [[0 2]
[4 6]]
創(chuàng)建constant類型的例子
特殊賦值
zeros=tf.zeros([2,3],tf.int32)
with tf.Session() as sess:
result=sess.run(zeros)
print(result)
[[0 0 0]
[0 0 0]]
創(chuàng)建指定大小的zeros矩陣
input_tensor=tf.constant([[0,1],[2,3],[4,5]],name="input_tensor")
zeros=tf.zeros_like(input_tensor)#指定zeros矩陣的大小
with tf.Session() as sess:
result=sess.run(zeros)
print(result)
[[0 0]
[0 0]
[0 0]]
根據(jù)傳入的矩陣尺寸創(chuàng)建zeros矩陣
其它類似的函數(shù):
** ones **
- tf.ones(shape,dtype=tf.float32,name=None)
- tf.ones_like(input_tensor,dtype=None,name=None,optimize=True)
matrix=tf.fill([2,3],8)
with tf.Session() as sess:
result=sess.run(matrix)
print(result)
[[8 8 8]
[8 8 8]]
在矩陣內(nèi)添加指定數(shù)值
TF數(shù)據(jù)類型可以和numpy連用
import numpy as np
tf.int32==np.int32
True
matrix=tf.ones([2,2],np.float32)
with tf.Session() as sess:
result=sess.run(matrix)
print(result)
[[ 1. 1.]
[ 1. 1.]]
tf.Session.run(fetches)
tf數(shù)據(jù)輸出為numpy array
my_const=tf.constant([1.0,2.0],name="my_const")
with tf.Session() as sess:b
print(sess.graph.as_graph_def())
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 2
}
}
}
}
node {
name: "Const_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 3
}
}
}
}
node {
name: "Add"
op: "Add"
input: "Const"
input: "Const_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "a"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 2
}
}
}
}
node {
name: "b"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 3
}
}
}
}
node {
name: "add"
op: "Add"
input: "a"
input: "b"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "a_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\002\000\000\000\002\000\000\000"
}
}
}
}
node {
name: "b_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000"
}
}
}
}
node {
name: "add_1"
op: "Add"
input: "a_1"
input: "b_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "a_2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\002\000\000\000\002\000\000\000"
}
}
}
}
node {
name: "b_2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000"
}
}
}
}
node {
name: "add_2"
op: "Add"
input: "a_2"
input: "b_2"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "mul"
op: "Mul"
input: "a_2"
input: "b_2"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "a_3"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\002\000\000\000\002\000\000\000"
}
}
}
}
node {
name: "b_3"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000"
}
}
}
}
node {
name: "add_3"
op: "Add"
input: "a_3"
input: "b_3"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "mul_1"
op: "Mul"
input: "a_3"
input: "b_3"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "a_4"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\002\000\000\000\002\000\000\000"
}
}
}
}
node {
name: "b_4"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000"
}
}
}
}
node {
name: "add_4"
op: "Add"
input: "a_4"
input: "b_4"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "mul_2"
op: "Mul"
input: "a_4"
input: "b_4"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "zeros"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 3
}
}
float_val: 0.0
}
}
}
}
node {
name: "zeros_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 3
}
}
float_val: 0.0
}
}
}
}
node {
name: "zeros_2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 3
}
}
int_val: 0
}
}
}
}
node {
name: "input_tensor"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000\004\000\000\000\005\000\000\000"
}
}
}
}
node {
name: "zeros_like"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
int_val: 0
}
}
}
}
node {
name: "input_tensor_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000\004\000\000\000\005\000\000\000"
}
}
}
}
node {
name: "zeros_like_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
int_val: 0
}
}
}
}
node {
name: "input_tensor_2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000\004\000\000\000\005\000\000\000"
}
}
}
}
node {
name: "zeros_like_2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
int_val: 0
}
}
}
}
node {
name: "input_tensor_3"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000\004\000\000\000\005\000\000\000"
}
}
}
}
node {
name: "zeros_like_3"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
dim {
size: 2
}
}
int_val: 0
}
}
}
}
node {
name: "Fill_1/dims"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\002\000\000\000\003\000\000\000"
}
}
}
}
node {
name: "Fill_1/value"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 8
}
}
}
}
node {
name: "Fill_1"
op: "Fill"
input: "Fill_1/dims"
input: "Fill_1/value"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "ones"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
float_val: 1.0
}
}
}
}
node {
name: "my_const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\000\000\200?\000\000\000@"
}
}
}
}
versions {
producer: 24
}
打印graph def
Variables
Variable 是一個(gè)類气筋,constant 是一種op
初始化variable
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
初始化全部variables
init_ab=tf.variables_initializer([a,b],name="init_ab")
with tf.Session() as sess:
sess.run(init_ab)b
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-31-a05695c617e6> in <module>()
1
----> 2 with tf.Session(init_ab=tf.variables_initializer([a,b],name="init_ab")) as sess:
3 sess.run(init_ab)
~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\ops\variables.py in variables_initializer(var_list, name)
1378 """
1379 if var_list and context.in_graph_mode():
-> 1380 return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
1381 return control_flow_ops.no_op(name=name)
1382
~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\ops\variables.py in <listcomp>(.0)
1378 """
1379 if var_list and context.in_graph_mode():
-> 1380 return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
1381 return control_flow_ops.no_op(name=name)
1382
AttributeError: 'Tensor' object has no attribute 'initializer'
初始化部分variables但是報(bào)錯(cuò)
W=tf.Variable(tf.zeros([784,10]))
with tf.Session() as sess:
sess.run(W.initializer)
初始化單獨(dú)一個(gè)variable
#初始化大小700X100的隨機(jī)數(shù)值矩陣
W=tf.Variable(tf.truncated_normal([700,10]))
with tf.Session() as sess:
sess.run(W.initializer)
print(W)
print(W.eval())
<tf.Variable 'Variable_2:0' shape=(700, 10) dtype=float32_ref>
[[-1.64373493 1.38142228 0.02620468 ..., 0.03384978 -1.18727875
-0.30884752]
[ 1.10493028 -0.39550069 -0.03230183 ..., 0.4909828 0.55792344
-1.59890783]
[-0.44164404 0.69113725 -0.86431575 ..., 0.27377507 -1.64605892
0.84873748]
...,
[-1.36063004 0.99020559 0.57447302 ..., -0.20169938 -0.09743395
-1.45789444]
[ 0.30184504 -0.51223069 -1.18259192 ..., -0.5938319 1.26349866
0.19031805]
[ 0.96660119 0.90686685 -1.39591968 ..., 0.13805443 0.88506061
1.22530198]]
Variable的賦值方法
W=tf.Variable(10)
W.assign(100)
with tf.Session() as sess:
sess.run(W.initializer)
print(W.eval())
10
W.assign(100)并沒有真正賦值給W,只是做了一下運(yùn)算旋圆。此處需要放在session里才能賦值
W=tf.Variable(10)
assign_op=W.assign(100)
with tf.Session() as sess:
sess.run(W.initializer)
sess.run(assign_op)
print(W.eval())
100
在session加入運(yùn)算后賦值成功宠默。賦值不用初始化variable因?yàn)閍ssign_op一直執(zhí)行了這一動(dòng)作。數(shù)值初始化本身就是賦值的一類灵巧,只不過是專門賦值初始數(shù)值的op
#其它賦值方式
my_var=tf.Variable(2,name="my_var")
my_var_times_two=my_var.assign(2*my_var)
with tf.Session() as sess:
sess.run(my_var.initializer)
sess.run(my_var_times_two)#>>4
sess.run(my_var_times_two)#>>8
sess.run(my_var_times_two)#>>16
#其它賦值方式
my_var=tf.Variable(10)
With tf.Session() as sess:
sess.run(my_var.initializer)
sess.run(my_var.assign_add(10))#>>20
sess.run(my_var.assign_sub(2))#>>18
assign_add()與assign_sub()不能初始化variable,只能用my_var的數(shù)值
#每個(gè)session包含各自的variable
W=tf.Variable(10)
sess1=tf.Session()
sess2=tf.Session()
sess1.run(W.initializer)
sess2.run(W.initializer)
print(sess1.run(W.assign_add(10)))
print(sess2.run(W.assign_sub(2)))
20
8
#每個(gè)session包含各自的variable
W=tf.Variable(10)
sess1=tf.Session()
sess2=tf.Session()
sess1.run(W.initializer)
sess2.run(W.initializer)
print(sess1.run(W.assign_add(10)))
print(sess2.run(W.assign_sub(2)))
print(sess1.run(W.assign_add(100)))
print(sess2.run(W.assign_sub(50)))
sess1.close()
sess2.close()
20
8
120
-42
#用一個(gè)Variable初始化另一個(gè)Variable
#W是個(gè)700X100的隨機(jī)矩陣
W=tf.Variable(tf.truncated_normal([700,10]))
U=tf.Variable(2*W)
Session 與 InteractiveSession
sess = tf.InteractiveSession()
a=tf.constant(5.0)
b=tf.constant(6.0)
c=a*b
print(c.eval()) #在InteractiveSession下搀矫,無需像之前那樣指定session
sess.close()
30.0
聲明tf.InteractiveSession()后無需再次指定session,可以直接運(yùn)算
Control Dependency
用來指定運(yùn)算順序
with g.control_dependencies([a,b,c]):
d= ...
e= ...
#運(yùn)行順序仍然是a,b,c
Placeholder 占位
如同公式f(x,y)=x*2+y一樣,我們無需知道x,y的確切數(shù)值就可以編輯公式
#創(chuàng)建一個(gè)placeholder
a=tf.placeholder(tf.float32,shape=[3]) #建議一開始設(shè)置好數(shù)據(jù)類型
b=tf.constant([5,5,5],tf.float32)
c=a+b
with tf.Session() as sess:
print(sess.run(c,{a:[1,2,3]})) #向placeholder傳入dict 賦值
#每回只能做一次賦值
[ 6. 7. 8.]
更改op的數(shù)值
a=tf.add(2,5)
b=tf.multiply(a,3)
with tf.Session() as sess:
#將a的數(shù)值替換為15
replace_dict={a:15}
sess.run(b,feed_dict=replace_dict)#將replace_dict的數(shù)值傳給feed_dict
Lazy Loading
直到需要時(shí)再初始化對(duì)象
#普通loading,需要把所有op全部寫出來
x=tf.Variable(10,name='x')
y=tf.Variable(20,name='y')
z=tf.add(x,y)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(10):
sess.run(z)
#lazy loading
x=tf.Variable(10,name='x')
y=tf.Variable(20,name='y')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(10):
sess.run(tf.add(x,y)) #把最后一步運(yùn)算放到這里
運(yùn)行千次OP時(shí)的建議
- 將op定義與計(jì)算/運(yùn)行分開
- 使用python property 編寫function