inception_v1網(wǎng)絡

# -*- coding: utf-8 -*-
# http://ju.outofmemory.cn/entry/344970      //layers
# https://my.oschina.net/u/876354/blog/1637819
# https://www.zhihu.com/question/49039504

import os
import tensorflow as tf
import 數(shù)據(jù)處理_hss as data_hss
import shutil
import numpy as np
# import 模型2sxc as md1
import time

def conv1d_relu(X,filters,kernel_size,strides):

    X_change = tf.layers.conv1d(X, filters, kernel_size, strides, 'same', use_bias=True, activation=None)
    X_change = tf.nn.relu(X_change)

    return X_change


if __name__ == '__main__':

    start = time.time()
    file_1 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\N'    #文件夾
    file_2 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\S'
    file_3 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\T'
    file_4 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\V'
    file_5 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\X'
    file_NO_X = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\非X'

    data_all_1,label_all_1 = data_hss.data_analysis(file_NO_X,label_def = [0,1],proportion = 800)    # 先定義標簽
    data_all_2,label_all_2 = data_hss.data_analysis(file_5,label_def = [1,0],proportion = 50)    # 先定義標簽

    data_all = data_all_1 + data_all_2
    label_all = label_all_1 + label_all_2
    print("data_all = ",len(data_all))
    print("label_all = ",len(label_all))

    data_train,label_train,data_test,label_tast = data_hss.Dataset_partition(data_all,label_all)
    print(data_train.shape,label_train.shape,data_test.shape,label_tast.shape)



    sess = tf.InteractiveSession()

    input_data = tf.placeholder(tf.float32, [None, 75])
    input_label = tf.placeholder(tf.float32, [None, 2])
    inputdata = tf.reshape(input_data,[-1,75,1])

    conv1_7x7 = conv1d_relu(inputdata, 64, 7, 2)
    pool1_3x3 = tf.layers.max_pooling1d(conv1_7x7, 3, 2, padding='same')

    #深度學習的局部響應歸一化LRN層划乖,對局部神經(jīng)元的活動創(chuàng)建競爭機制聋袋,使得其中響應比較大的值變得相對更大均函,并抑制其他反饋較小的神經(jīng)元牙瓢,增強了模型的泛化能力擎析。
    # pool1_3x3 = tf.nn.local_response_normalization(pool1_3x3)   # 圖像
    conv2_3x3_reduce = conv1d_relu(pool1_3x3, 64, 1, 1)
    conv2_3x3 = conv1d_relu(conv2_3x3_reduce, 192, 3, 1)
    # conv2_3x3 = tf.nn.local_response_normalization(conv2_3x3)
    pool2_3x3 = tf.layers.max_pooling1d(conv2_3x3, 3, 2, padding='same')

    # 3a ###########
    inception_3a_1_1 = conv1d_relu(pool2_3x3, 64, 1, 1)
    inception_3a_3_3_reduce = conv1d_relu(pool2_3x3, 96, 1, 1)
    inception_3a_3_3 = conv1d_relu(inception_3a_3_3_reduce, 128, 3, 1)
    inception_3a_5_5_reduce = conv1d_relu(pool2_3x3, 16, 1, 1)
    inception_3a_5_5 = conv1d_relu(inception_3a_5_5_reduce, 32, 5, 1)
    inception_3a_pool = tf.layers.max_pooling1d(pool2_3x3, 3, 1, padding='same')
    inception_3a_pool_1_1 = conv1d_relu(inception_3a_pool, 32, 1, 1)

    inception_3a_output = tf.concat([inception_3a_1_1,inception_3a_3_3,inception_3a_5_5,inception_3a_pool_1_1],axis=2)
    # 3b ###########
    inception_3b_1_1 = conv1d_relu(inception_3a_output, 128, 1, 1)
    inception_3b_3_3_reduce = conv1d_relu(inception_3a_output, 128, 1, 1)
    inception_3b_3_3 = conv1d_relu(inception_3b_3_3_reduce, 192, 3, 1)
    inception_3b_5_5_reduce = conv1d_relu(inception_3a_output, 32, 1, 1)
    inception_3b_5_5 = conv1d_relu(inception_3b_5_5_reduce, 96, 5, 1)
    inception_3b_pool = tf.layers.max_pooling1d(inception_3a_output, 3, 1, padding='same')
    inception_3b_pool_1_1 = conv1d_relu(inception_3b_pool, 64, 1, 1)
    inception_3b_output = tf.concat([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5,inception_3b_pool_1_1],axis=2)
    pool3_3_3 = tf.layers.max_pooling1d(inception_3b_output, 3, 2, padding='same')

    # 4a ###########
    inception_4a_1_1 = conv1d_relu(pool3_3_3, 192, 1, 1)
    inception_4a_3_3_reduce = conv1d_relu(pool3_3_3, 96, 1, 1)
    inception_4a_3_3 = conv1d_relu(inception_4a_3_3_reduce, 208, 3, 1)
    inception_4a_5_5_reduce = conv1d_relu(pool3_3_3, 16, 1, 1)
    inception_4a_5_5 = conv1d_relu(inception_4a_5_5_reduce, 48, 5, 1)
    inception_4a_pool = tf.layers.max_pooling1d(pool3_3_3, 3, 1, padding='same')
    inception_4a_pool_1_1 = conv1d_relu(inception_4a_pool, 64, 1, 1)
    inception_4a_output = tf.concat([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1],axis=2)
    # 4b ###########
    inception_4b_1_1 = conv1d_relu(inception_4a_output, 160, 1, 1)
    inception_4b_3_3_reduce = conv1d_relu(inception_4a_output, 112, 1, 1)
    inception_4b_3_3 = conv1d_relu(inception_4b_3_3_reduce, 224, 3, 1)
    inception_4b_5_5_reduce = conv1d_relu(inception_4a_output, 24, 1, 1)
    inception_4b_5_5 = conv1d_relu(inception_4b_5_5_reduce, 64, 5, 1)
    inception_4b_pool = tf.layers.max_pooling1d(inception_4a_output, 3, 1, padding='same')
    inception_4b_pool_1_1 = conv1d_relu(inception_4b_pool, 64, 1, 1)
    inception_4b_output = tf.concat([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5,inception_4b_pool_1_1],axis=2)
    # 4c ###########
    inception_4c_1_1 = conv1d_relu(inception_4b_output, 128, 1, 1)
    inception_4c_3_3_reduce = conv1d_relu(inception_4b_output, 128, 1, 1)
    inception_4c_3_3 = conv1d_relu(inception_4c_3_3_reduce, 256, 3, 1)
    inception_4c_5_5_reduce = conv1d_relu(inception_4b_output, 24, 1, 1)
    inception_4c_5_5 = conv1d_relu(inception_4c_5_5_reduce, 64, 5, 1)
    inception_4c_pool = tf.layers.max_pooling1d(inception_4b_output, 3, 1, padding='same')
    inception_4c_pool_1_1 = conv1d_relu(inception_4c_pool, 64, 1, 1)
    inception_4c_output = tf.concat([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1],axis=2)
    # 4d ###########
    inception_4d_1_1 = conv1d_relu(inception_4c_output, 112, 1, 1)
    inception_4d_3_3_reduce = conv1d_relu(inception_4c_output, 144, 1, 1)
    inception_4d_3_3 = conv1d_relu(inception_4d_3_3_reduce, 288, 3, 1)
    inception_4d_5_5_reduce = conv1d_relu(inception_4c_output, 32, 1, 1)
    inception_4d_5_5 = conv1d_relu(inception_4d_5_5_reduce, 64, 5, 1)
    inception_4d_pool = tf.layers.max_pooling1d(inception_4c_output, 3, 1, padding='same')
    inception_4d_pool_1_1 = conv1d_relu(inception_4d_pool, 64, 1, 1)
    inception_4d_output = tf.concat([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1],axis=2)
    # 4e ###########
    inception_4e_1_1 = conv1d_relu(inception_4d_output, 256, 1, 1)
    inception_4e_3_3_reduce = conv1d_relu(inception_4d_output, 160, 1, 1)
    inception_4e_3_3 = conv1d_relu(inception_4e_3_3_reduce, 320, 3, 1)
    inception_4e_5_5_reduce = conv1d_relu(inception_4d_output, 32, 1, 1)
    inception_4e_5_5 = conv1d_relu(inception_4e_5_5_reduce, 128, 5, 1)
    inception_4e_pool = tf.layers.max_pooling1d(inception_4d_output, 3, 1, padding='same')
    # 4e ###########
    inception_4e_pool_1_1 = conv1d_relu(inception_4e_pool, 128, 1, 1)
    inception_4e_output = tf.concat([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=2)
    pool4_3_3 = tf.layers.max_pooling1d(inception_4e_output, 3, 2, padding='same')
    # 5a ###########
    inception_5a_1_1 = conv1d_relu(pool4_3_3, 256, 1, 1)
    inception_5a_3_3_reduce = conv1d_relu(pool4_3_3, 160, 1, 1)
    inception_5a_3_3 = conv1d_relu(inception_5a_3_3_reduce, 320, 3, 1)
    inception_5a_5_5_reduce = conv1d_relu(pool4_3_3, 32, 1, 1)
    inception_5a_5_5 = conv1d_relu(inception_5a_5_5_reduce, 128, 5, 1)
    inception_5a_pool = tf.layers.max_pooling1d(pool4_3_3, 3, 1, padding='same')
    inception_5a_pool_1_1 = conv1d_relu(inception_5a_pool, 128, 1, 1)
    inception_5a_output = tf.concat([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1],axis=2)
    # 5b ###########
    inception_5b_1_1 = conv1d_relu(inception_5a_output, 384, 1, 1)
    inception_5b_3_3_reduce = conv1d_relu(inception_5a_output, 192, 1, 1)
    inception_5b_3_3 = conv1d_relu(inception_5b_3_3_reduce, 384, 3, 1)
    inception_5b_5_5_reduce = conv1d_relu(inception_5a_output, 48, 1, 1)
    inception_5b_5_5 = conv1d_relu(inception_5b_5_5_reduce, 128, 5, 1)
    inception_5b_pool = tf.layers.max_pooling1d(inception_5a_output, 3, 1, padding='same')
    inception_5b_pool_1_1 = conv1d_relu(inception_5b_pool, 128, 1, 1)
    inception_5b_output = tf.concat([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1],axis=2)

    ##########
    keep_prob = tf.placeholder(tf.float32)
    pool5_7_7 = tf.layers.average_pooling1d(inception_3a_output, 3, 1, padding='valid')   # inception_5b_output

    pool5_7_7 = tf.layers.dropout(pool5_7_7, keep_prob)
    print("pool5_7_7 = ",pool5_7_7.shape)
    # 鋪平圖像數(shù)據(jù)
    pool5_7_7_flat = tf.layers.Flatten()(pool5_7_7)
    print("pool5_7_7_flat = ",pool5_7_7_flat.shape)

    #全連接層計算
    y_conv = tf.layers.dense(pool5_7_7_flat,2)   #,activation = tf.nn.softmax
    # print("y_conv = ",y_conv.shape)


    # 計算交叉熵損失
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=input_label))
    # 創(chuàng)建優(yōu)化器,通知Tensorflow在訓練時要更新均值和方差的分布
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        train_step = tf.train.AdamOptimizer(1e-2).minimize(cross_entropy)
    # 創(chuàng)建計算準確度的操作
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(input_label, 1))
    accuracy_rate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))



    ########開始訓練過程########
    # 初始化所有變量、參數(shù)
    tf.global_variables_initializer().run()
    m_saver = tf.train.Saver(var_list=tf.global_variables(),max_to_keep = 15)    #聲明tf.train.Saver類用于保存模型




    ########### 訓練(設置訓練時Dropout的kepp_prob比率為0.5逼泣。mini-batch為50辅柴,進行2000次迭代訓練,參與訓練樣本5萬)
    model_doc = r'model'  # 存儲模型的文件夾
    if not os.path.exists(model_doc):   # 模型存儲文件夾
        os.makedirs(model_doc)
    else:
        shutil.rmtree(model_doc)   #先強制刪除文件夾
        os.makedirs(model_doc)

    train_accuracy_all = []   #保存所有準確度
    max_acc = 0
    f = open('model/acc.txt','w')

    for i in range(10000):

        batch_data_train,batch_label_train = data_hss.batch(data_train,label_train,batch_size = 200)
        # print("batch_data_train = ",batch_data_train,batch_data_train.shape)
        # print("batch_label_train = ",batch_label_train,batch_label_train.shape)

        # 循環(huán)次數(shù)是100的倍數(shù)的時候怠噪,打印
        if i%1 == 0:

            train_accuracy = accuracy_rate.eval(feed_dict={input_data:batch_data_train,input_label: batch_label_train,
                                                           keep_prob: 0.5})
            print ("-->step %d, training accuracy %.4f ,max_acc %.4f"%(i, train_accuracy,max_acc))
            print("cross_entropy = ",sess.run(cross_entropy,feed_dict={input_data:batch_data_train,input_label: batch_label_train,
                                                                       keep_prob: 0.5}))
            f.write(str(i)+', train_accuracy: '+str(train_accuracy)+'  '+str(max_acc) +'\n')
            # #保存最近的5次模型
            # m_saver.save(sess, './model/model.ckpt', global_step=i)
            #保存準確度最高的5次模型
            if train_accuracy >= max_acc :
                max_acc = train_accuracy
            if train_accuracy >= max_acc - 0.04:
                m_saver.save(sess, './model/model.ckpt', global_step=i)

            train_accuracy_all.append(train_accuracy)


        # 執(zhí)行訓練模型
        train_step_,loss = sess.run([train_step,cross_entropy], feed_dict={input_data:batch_data_train,
                                                                                   input_label: batch_label_train,
                                                                                   keep_prob: 0.5})


        # 打印測試集正確率 恐似,全部訓練完成之后,在最終測試集上進行全面測試傍念,得到整體的分類準確率
        # print(train_accuracy_all)
        if max_acc >= 0.9 and train_accuracy < 0.5:   #訓練準確度過了最高點并下降矫夷,就停止訓練
            break
    f.close()


    #######模型讀回及預測
    model_file=tf.train.latest_checkpoint('model/')
    m_saver.restore(sess,model_file)
    #寫文件
    # output1 = "偽差.txt"
    # if os.path.exists(output1): os.remove(output1)
    # file_name1 = open(output1, 'a+')
    # output2 = "非偽差.txt"
    # if os.path.exists(output2): os.remove(output2)
    # file_name2 = open(output2, 'a+')
    # output3 = "R_left2right.txt"
    # if os.path.exists(output3): os.remove(output3)
    # file_name3 = open(output3, 'a+')
    # output4 = "R_left2other.txt"
    # if os.path.exists(output4): os.remove(output4)
    # file_name4 = open(output4, 'a+')

    a = 0   # 預測對的個數(shù)
    TP = 0  # 預測正確的病癥個數(shù)
    FN_TP = 0 # 原標簽中有病癥的個數(shù)
    TN = 0  # 預測正確的非病癥個數(shù)
    TN_FP = 0 # 原標簽中正常的個數(shù)
    sensibility = 0  # 敏感性
    specificity = 0  # 特異性

    single_data = np.empty([1,75])
    single_label = np.zeros([1,2],"int")
    for i in range(0,len(data_test)):

        single_data[0] = data_test[i]
        single_label = label_tast[i]
        # print("single_data = ",single_data)
        # print("single_label = ",single_label)

        output = sess.run(y_conv , feed_dict = {input_data:single_data, keep_prob: 1.0})
        # print("output = ",output)
        output = sess.run(tf.nn.softmax(output))
        output = np.round(output)  #np.round(output)  #取最接近的整數(shù)
        print(i,"/",len(data_test)-1,"  output = ",output,"single_label = ",single_label)
        if single_label[0] == output[0][0] and single_label[1] == output[0][1] :
            a +=1

        if single_label[0] == output[0][0] and output[0][0] == 1:  #敏感性
            TP += 1
        if single_label[0] == 1 :
            FN_TP += 1

        if single_label[1] == output[0][1] and output[0][1] == 1:  #特異性
            TN += 1
        if single_label[1] == 1 :
            TN_FP += 1

        # if output[0][0] == 1:     # 偽差
        #     file_name1.write(str(data_test[i]) + '\n')
        # if output[0][1] == 1:     # 非偽差
        #     file_name2.write(str(data_test[i]) + '\n')
        # if single_label[0] == 1 and output[0][0] == 0:     # 偽差2非偽差
        #     file_name3.write(str(data_test[i]) + '\n')
        # if single_label[1] == 1 and output[0][0] == 1:     # 非偽差2偽差
        #     file_name4.write(str(data_test[i]) + '\n')

    print("len(data_test) = ",len(data_test),"a =",a)
    print("sensibility = ",TP/FN_TP,"specificity =",TN/TN_FP)

    # file_name1.close()
    # file_name2.close()
    # file_name3.close()
    # file_name4.close()

    end = time.time()
    print("程序運行時間:",end - start)

最后編輯于
?著作權歸作者所有,轉載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個濱河市憋槐,隨后出現(xiàn)的幾起案子双藕,更是在濱河造成了極大的恐慌,老刑警劉巖阳仔,帶你破解...
    沈念sama閱讀 212,080評論 6 493
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件忧陪,死亡現(xiàn)場離奇詭異,居然都是意外死亡近范,警方通過查閱死者的電腦和手機嘶摊,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 90,422評論 3 385
  • 文/潘曉璐 我一進店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來评矩,“玉大人叶堆,你說我怎么就攤上這事〕舛牛” “怎么了虱颗?”我有些...
    開封第一講書人閱讀 157,630評論 0 348
  • 文/不壞的土叔 我叫張陵,是天一觀的道長蔗喂。 經(jīng)常有香客問我上枕,道長,這世上最難降的妖魔是什么弱恒? 我笑而不...
    開封第一講書人閱讀 56,554評論 1 284
  • 正文 為了忘掉前任,我火速辦了婚禮棋恼,結果婚禮上返弹,老公的妹妹穿的比我還像新娘锈玉。我一直安慰自己,他們只是感情好义起,可當我...
    茶點故事閱讀 65,662評論 6 386
  • 文/花漫 我一把揭開白布拉背。 她就那樣靜靜地躺著,像睡著了一般默终。 火紅的嫁衣襯著肌膚如雪椅棺。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 49,856評論 1 290
  • 那天齐蔽,我揣著相機與錄音两疚,去河邊找鬼。 笑死含滴,一個胖子當著我的面吹牛诱渤,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播谈况,決...
    沈念sama閱讀 39,014評論 3 408
  • 文/蒼蘭香墨 我猛地睜開眼勺美,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了碑韵?” 一聲冷哼從身側響起赡茸,我...
    開封第一講書人閱讀 37,752評論 0 268
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎祝闻,沒想到半個月后占卧,有當?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 44,212評論 1 303
  • 正文 獨居荒郊野嶺守林人離奇死亡治筒,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 36,541評論 2 327
  • 正文 我和宋清朗相戀三年屉栓,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片耸袜。...
    茶點故事閱讀 38,687評論 1 341
  • 序言:一個原本活蹦亂跳的男人離奇死亡友多,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出堤框,到底是詐尸還是另有隱情域滥,我是刑警寧澤,帶...
    沈念sama閱讀 34,347評論 4 331
  • 正文 年R本政府宣布蜈抓,位于F島的核電站启绰,受9級特大地震影響,放射性物質發(fā)生泄漏沟使。R本人自食惡果不足惜委可,卻給世界環(huán)境...
    茶點故事閱讀 39,973評論 3 315
  • 文/蒙蒙 一、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧着倾,春花似錦拾酝、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,777評論 0 21
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至崇决,卻和暖如春材诽,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背恒傻。 一陣腳步聲響...
    開封第一講書人閱讀 32,006評論 1 266
  • 我被黑心中介騙來泰國打工脸侥, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留,地道東北人碌冶。 一個月前我還...
    沈念sama閱讀 46,406評論 2 360
  • 正文 我出身青樓湿痢,卻偏偏與公主長得像,于是被迫代替她去往敵國和親扑庞。 傳聞我的和親對象是個殘疾皇子譬重,可洞房花燭夜當晚...
    茶點故事閱讀 43,576評論 2 349

推薦閱讀更多精彩內(nèi)容