-
一個簡單的線性回歸模型
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import numpy as np import matplotlib.pyplot as plt # 隨機(jī)生成1000個點(diǎn)讯赏,分布在 y=0.1x+0.3 附近 num_point = 1000 vectors_set = [] for i in range(num_point): x1 = np.random.normal(0.0, 0.55) # x 取值范圍 y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03) vectors_set.append([x1, y1]) # 生成樣本 x_data = [v[0] for v in vectors_set] y_data = [v[1] for v in vectors_set] plt.scatter(x_data, y_data, c='r') # 生成散點(diǎn)圖 # 生成一維矩陣 W,取值為[-1,1]之間的隨機(jī)值 W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='W') # 生成一維矩陣 b冷尉,初始值為0 b = tf.Variable(tf.zeros([1]), name='b') # 經(jīng)過計(jì)算得出預(yù)估值 y y = W * x_data + b # 以預(yù)估值 y 和實(shí)際值 y_data 之間的均方誤差作為損失 loss = tf.reduce_mean(tf.square(y - y_data), name='loss') # 采樣梯度下降法來優(yōu)化參數(shù) optimizer = tf.train.GradientDescentOptimizer(0.5) # 訓(xùn)練的過程就是最小化這個誤差值 train = optimizer.minimize(loss, name='train') sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) # 初始化的 W 和 b 是多少 print('W = ', sess.run(W), 'b = ', sess.run(b), 'loss = ', sess.run(loss)) # 執(zhí)行20次訓(xùn)練 for step in range(20): sess.run(train) # 輸出訓(xùn)練好的 W 和 b print('W = ', sess.run(W), 'b = ', sess.run(b), 'loss = ', sess.run(loss)) # 將函數(shù)構(gòu)造成一條直線 plt.scatter(x_data, y_data, c='r') plt.plot(x_data, sess.run(W) * x_data + sess.run(b)) plt.show()
-
一個簡單的邏輯回歸模型迭代
import tensorflow as tf import numpy as np from tensorflow.examples.tutorials.mnist import input_data tf.logging.set_verbosity(tf.logging.ERROR) # 數(shù)據(jù)讀取以及樣本導(dǎo)入 mnist = input_data.read_data_sets('MNIST_data/',one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print('mnist loaded...') print(trainimg.shape) print(trainlabel.shape) print(testimg.shape) print(testlabel.shape) print(trainimg) print(trainlabel[0]) # 變量初始化漱挎,None 表示無窮 x = tf.placeholder('float', [None, 784]) y = tf.placeholder('float', [None, 10]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) # 邏輯參數(shù)模型 actv = tf.nn.softmax(tf.matmul(x, W) + b) # 損失函數(shù)(cost function) cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv), reduction_indices=1)) # 優(yōu)化,使用梯度下降 learning_rate = 0.01 optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # 預(yù)測雀哨,取出每行里面的最大值 pred = tf.equal(tf.argmax(actv, 1), tf.argmax(y, 1)) # 準(zhǔn)確率磕谅,精度 accr = tf.reduce_mean(tf.cast(pred, 'float')) # 初始化 init = tf.global_variables_initializer() training_epochs = 50 # 迭代50次 batch_size = 100 # 每次迭代選陣100個樣本 display_step = 5 sess = tf.Session() sess.run(init) # 最小批次訓(xùn)練 for epoch in range(training_epochs): avg_cost = 0. num_batch = int(mnist.train.num_examples/batch_size) for i in range(num_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # 求解 feeds = {x: batch_xs, y: batch_ys} avg_cost += sess.run(cost, feed_dict=feeds)/num_batch # 損失值 if epoch % display_step == 0: feeds_train = {x: batch_xs, y: batch_ys} feeds_test = {x: mnist.test.images, y: mnist.test.labels} train_acc = sess.run(accr, feed_dict=feeds_train) test_acc = sess.run(accr, feed_dict=feeds_test) print('Epoch: %03d/%03d cost: %.9f train_acc: %03f test_acc: %.3f' % (epoch, training_epochs, avg_cost, train_acc, test_acc)) print('DONE')
-
一個簡單的卷積神經(jīng)網(wǎng)絡(luò)
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data tf.logging.set_verbosity(tf.logging.ERROR) # 只顯示錯誤 mnist = input_data.read_data_sets('MNIST_data/',one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print('MNIST Ready...') n_input = 784 # 輸入像素點(diǎn)個數(shù)(28*28) n_output = 10 # 輸出的分類數(shù) # 權(quán)重參數(shù) weights = { # 卷積層第一層參數(shù),filter = 3*3雾棺,深度為1膊夹,得出的特征圖為64個 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), # 卷積層第一層參數(shù),filter = 3*3捌浩,上一步得到64個特征圖放刨,深度為64,輸出深度為128 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.1)), # 全連接層1尸饺,28*28*1——14*14*64——7*7*128进统,將其轉(zhuǎn)換為1024維向量 'wd1': tf.Variable(tf.random_normal([7*7*128, 1024], stddev=0.1)), # 全連接層2,將1024維向量輸出為 n_output = 10 個分類 'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1)) } # 偏置參數(shù) biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bc2': tf.Variable(tf.random_normal([128], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)), 'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1)), } # 卷積與池化 def conv_basic(_input, _w, _b, _keepratio): # 輸入浪听,對輸入進(jìn)行預(yù)處理螟碎,將數(shù)據(jù)轉(zhuǎn)換為 tensorflow 格式 _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # 第一層卷積層,一般 strides 只修改中間的 width 和 deep _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # _mean, _var = tf.nn.moments(_conv1, [0, 1, 2]) # _conv1 = tf.nn.batch_normalization(_conv1, _mean, _var, 0, 1, 0.0001) _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1'])) _pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 隨機(jī)殺死一些節(jié)點(diǎn)迹栓,保留一部分節(jié)點(diǎn) _pool_dr1 = tf.nn.dropout(_pool1, _keepratio) # 第二層卷積層 _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME') # _mean, _var = tf.nn.moments(_conv2, [0, 1, 2]) # _conv2 = tf.nn.batch_normalization(_conv2, _mean, _var, 0, 1, 0.0001) _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2'])) _pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') _pool_dr2 = tf.nn.dropout(_pool2, _keepratio) # 矢量化掉分,將 tensor 轉(zhuǎn)換為 list _dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]]) # 全連接層第一層 _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1'])) _fc_dr1 = tf.nn.dropout(_fc1, _keepratio) # 全連接層第二層 _out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2']) out = { 'input_r': _input_r, 'conv1': _conv1, 'pool1': _pool1, 'pool_dr1': _pool_dr1, 'conv2': _conv2, 'pool2': _pool2, 'pool_dr2': _pool_dr2, 'densel': _dense1, 'fc1': _fc1, 'fc_dr1':_fc_dr1, 'out': _out } return out print('CNN Ready...') a = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)) a = tf.Print(a, [a], 'a: ') # 初始化變量 init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # 占位 x,y x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) keepratio = tf.placeholder(tf.float32) _pred = conv_basic(x, weights, biases, keepratio)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred,labels=y)) optm = tf.train.AdadeltaOptimizer(learning_rate=0.001).minimize(cost) _corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # 初始化變量 init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) print('Graph Ready...') training_epochs = 15 # 迭代15次 batch_size = 16 # 每次迭代選擇16個樣品 display_step = 1 # 參數(shù)優(yōu)化 for epoch in range(training_epochs): avg_cost = 0. # total_batch = int(mnist.train.num_examples / batch_size) total_batch = 10 # 循環(huán)遍歷所有批次 for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # 使用批量數(shù)據(jù)進(jìn)行訓(xùn)練 sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio: 0.7}) # 電腦平均損失 avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio: 1.}) / total_batch # 顯示每個時期的日志 if epoch % display_step == 0: print('Epoch: %03d/%03d cost : %.9f' % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio: 1.}) print('Training Accuracy: %.3f' % (train_acc)) # test_acc = sess.run(accr, feed_dict={x: mnist.test.images, y: mnist.test.labels, keepratio:1.}) # print('Test Accuracy: %.3f' % (test_acc)) print('Optimization Finished...')
-
一個簡單的神經(jīng)網(wǎng)絡(luò)模型
import tensorflow as tf import numpy as np from tensorflow.examples.tutorials.mnist import input_data tf.logging.set_verbosity(tf.logging.ERROR) # 只顯示錯誤 mnist = input_data.read_data_sets('MNIST_data/',one_hot=True) # 網(wǎng)絡(luò)拓?fù)? n_hidden_1 = 256 # 第一層神經(jīng)元個數(shù) n_hidden_2 = 128 # 第二層神經(jīng)元個數(shù) n_input = 784 # 輸入像素點(diǎn)個數(shù) n_classes = 10 # 輸出的分類的類別 # 輸入和輸出 x = tf.placeholder('float', [None, n_input]) y = tf.placeholder('float', [None, n_classes]) # 神經(jīng)網(wǎng)絡(luò)參數(shù)初始化 stddev = 0.1 # 權(quán)重參數(shù),初始化 weights = { 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)), 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)), 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev)) } # 偏置參數(shù)克伊,初始化 biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 'out': tf.Variable(tf.random_normal([n_classes])) } print('Network Ready...') def multilayer_preceptron(_X, _weights, _biases): layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2'])) return (tf.matmul(layer_2, _weights['out']) + _biases['out']) # 預(yù)測 pred = multilayer_preceptron(x, weights, biases) # 損失和優(yōu)化參數(shù) # 損失函數(shù)叉抡,兩種方式 # cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(pred, y)) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) # 梯度下降 optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost) corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # 準(zhǔn)確率 accr = tf.reduce_mean(tf.cast(corr, 'float')) # 精度 # 初始化 init = tf.global_variables_initializer() print('Function Ready...') training_epochs = 20 # 迭代20次 batch_size = 100 # 每次迭代選擇100個樣本 display_step = 4 sess = tf.Session() sess.run(init) # 優(yōu)化 for epoch in range(training_epochs): avg_cost = 0 total_batch = int(mnist.train.num_examples/batch_size) for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) feeds = {x: batch_xs, y: batch_ys} sess.run(optm, feed_dict=feeds) avg_cost += sess.run(cost, feed_dict=feeds) avg_cost = avg_cost/total_batch if (epoch+1) % display_step == 0: print('Epoch: %03d/%03d cost : %.9f'%(epoch, training_epochs, avg_cost)) feeds = {x: batch_xs, y: batch_ys} train_acc = sess.run(accr, feed_dict=feeds) print('Train Accuracy: %.3f'%(train_acc)) feeds = {x:mnist.test.images, y: mnist.test.labels} test_acc = sess.run(accr, feed_dict=feeds) print('Test Accuracy: %.3f'%(test_acc)) print('Optimization Finished...')
線性沫换、邏輯回歸模型,神經(jīng)網(wǎng)絡(luò)最铁、卷積神經(jīng)網(wǎng)絡(luò)模型
最后編輯于 :
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
- 文/潘曉璐 我一進(jìn)店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來惫叛,“玉大人倡勇,你說我怎么就攤上這事〖斡浚” “怎么了妻熊?”我有些...
- 文/不壞的土叔 我叫張陵,是天一觀的道長仑最。 經(jīng)常有香客問我扔役,道長,這世上最難降的妖魔是什么警医? 我笑而不...
- 正文 為了忘掉前任亿胸,我火速辦了婚禮,結(jié)果婚禮上讯蒲,老公的妹妹穿的比我還像新娘漏健。我一直安慰自己宜肉,他們只是感情好,可當(dāng)我...
- 文/花漫 我一把揭開白布序仙。 她就那樣靜靜地躺著,像睡著了一般溯街。 火紅的嫁衣襯著肌膚如雪诱桂。 梳的紋絲不亂的頭發(fā)上,一...
- 文/蒼蘭香墨 我猛地睜開眼辞槐,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了粘室?” 一聲冷哼從身側(cè)響起榄檬,我...
- 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎衔统,沒想到半個月后鹿榜,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體海雪,經(jīng)...
- 正文 獨(dú)居荒郊野嶺守林人離奇死亡,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
- 正文 我和宋清朗相戀三年舱殿,在試婚紗的時候發(fā)現(xiàn)自己被綠了奥裸。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
- 正文 年R本政府宣布,位于F島的核電站焚碌,受9級特大地震影響畦攘,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜十电,卻給世界環(huán)境...
- 文/蒙蒙 一知押、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧鹃骂,春花似錦台盯、人聲如沸。這莊子的主人今日做“春日...
- 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至寝殴,卻和暖如春蒿叠,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背蚣常。 一陣腳步聲響...
- 正文 我出身青樓施绎,卻偏偏與公主長得像,于是被迫代替她去往敵國和親贞绳。 傳聞我的和親對象是個殘疾皇子谷醉,可洞房花燭夜當(dāng)晚...
推薦閱讀更多精彩內(nèi)容
- 原文鏈接:https://yq.aliyun.com/articles/178374 0. 簡介 在過去,我寫的主...
- 上一篇搭建了一個簡單的cnn網(wǎng)絡(luò)用來識別手寫數(shù)字冈闭。 基于tensorflow搭建一個簡單的CNN模型(code) ...
- 總的來說俱尼,思路較為清晰,關(guān)鍵搞清卷積過程以及過程中張量維度的變化萎攒,還需注意求正確率的方法——利用平均值求解 fro...
- """ 最終測試集結(jié)果0.9912 test accuracy 0.9912 """ ]import tensor...
- import tensorflow as tf from tensorflow.examples.tutorial...