inception_resnet_v2網(wǎng)絡(luò)

# -*- coding: utf-8 -*-
# https://www.w3cschool.cn/tensorflow_python/tensorflow_python-13p92sws.html      # layers
# https://my.oschina.net/u/876354/blog/1637819     # GoogLeNet(v1到v4)
# https://www.zhihu.com/question/49039504          # GoogLeNet實現(xiàn)
# https://blog.csdn.net/m0_37987687/article/details/80241893
# http://www.reibang.com/p/cb8ebcee1b15     # BN層參數(shù)保存
# https://blog.csdn.net/touch_dream/article/details/79305617      # v4
# https://blog.csdn.net/liuxiao214/article/details/81914743     # inception-resnet v2
# http://www.reibang.com/p/006248a3fd7f       # inception-resnet v2

import os
import tensorflow as tf
import 數(shù)據(jù)處理_hss as data_hss
import shutil
import numpy as np
import time



def conv1d_relu(X,filters,kernel_size,strides,is_training):

    X_change = tf.layers.conv1d(X, filters, kernel_size, strides, 'same', use_bias=True, activation=None)
    X_change = tf.layers.batch_normalization(X_change,training = is_training)
    X_change = tf.nn.relu(X_change)

    return X_change

def conv1d_relu_valid(X,filters,kernel_size,strides,is_training):

    X_change = tf.layers.conv1d(X, filters, kernel_size, strides, 'valid', use_bias=True, activation=None)
    X_change = tf.layers.batch_normalization(X_change,training = is_training)
    X_change = tf.nn.relu(X_change)

    return X_change

def inception_ResNet_A(layers_name,is_training):

    layers_name = tf.nn.relu(layers_name)
    conv1_1x1 = conv1d_relu(layers_name, 32, 1, 1 ,is_training)

    conv2_1x1 = conv1d_relu(layers_name, 32, 1, 1 ,is_training)
    conv3_3x3 = conv1d_relu(conv2_1x1, 32, 3, 1 ,is_training)

    conv4_1x1 = conv1d_relu(layers_name, 32, 1, 1 ,is_training)
    conv5_3x3 = conv1d_relu(conv4_1x1, 48, 3, 1 ,is_training)
    conv6_3x3 = conv1d_relu(conv5_3x3, 64, 3, 1 ,is_training)

    Fc_1 = tf.concat([conv1_1x1,conv3_3x3,conv6_3x3],axis=2)
    Fc_1 = tf.nn.relu(Fc_1)
    conv7_1x1 = conv1d_relu(Fc_1, 384, 1, 1 ,is_training)

    Fc = tf.nn.relu(tf.add_n([layers_name ,conv7_1x1]))

    return Fc

def Reduction_A(layers_name,is_training):

    pool_max = tf.layers.max_pooling1d(layers_name, 3, 2, padding='valid')
    pool_max = tf.nn.relu(pool_max)

    conv1_3x3 = conv1d_relu_valid(layers_name, 384, 3, 2 ,is_training)

    conv2_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)
    conv3_3x3 = conv1d_relu(conv2_1x1, 224, 3, 1 ,is_training)
    conv4_3x3 = conv1d_relu_valid(conv3_3x3, 256, 3, 2 ,is_training)
    Fc = tf.concat([pool_max,conv1_3x3,conv4_3x3],axis=2)
    Fc = tf.nn.relu(Fc)

    return Fc

def inception_ResNet_B(layers_name,is_training):

    layers_name = tf.nn.relu(layers_name)
    conv1_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)

    conv2_1x1 = conv1d_relu(layers_name, 128, 1, 1 ,is_training)
    conv3_7x7 = conv1d_relu(conv2_1x1, 160, 7, 1 ,is_training)
    conv4_7x7 = conv1d_relu(conv3_7x7, 192, 7, 1 ,is_training)

    Fc_1 = tf.concat([conv1_1x1,conv4_7x7],axis=2)
    Fc_1 = tf.nn.relu(Fc_1)
    conv5_1x1 = conv1d_relu(Fc_1, 1024, 1, 1 ,is_training)   #1154

    Fc = tf.nn.relu(tf.add_n([layers_name ,conv5_1x1]))

    return Fc

def Reduction_B(layers_name,is_training):

    pool_max = tf.layers.max_pooling1d(layers_name, 3, 2, padding='valid')
    pool_max = tf.nn.relu(pool_max)

    conv1_1x1 = conv1d_relu(layers_name, 256, 1, 1 ,is_training)
    conv2_3x3 = conv1d_relu_valid(conv1_1x1, 384, 3, 2 ,is_training)

    conv3_1x1 = conv1d_relu(layers_name, 256, 1, 1 ,is_training)
    conv4_3x3 = conv1d_relu_valid(conv3_1x1, 288, 3, 2 ,is_training)

    conv5_1x1 = conv1d_relu(layers_name, 256, 1, 1 ,is_training)
    conv6_3x3 = conv1d_relu(conv5_1x1, 288, 3, 1 ,is_training)
    conv7_3x3 = conv1d_relu_valid(conv6_3x3, 320, 3, 2,is_training)

    Fc = tf.concat([pool_max,conv2_3x3,conv4_3x3,conv7_3x3],axis=2)
    Fc = tf.nn.relu(Fc)

    return Fc

def inception_ResNet_C(layers_name,is_training):

    layers_name = tf.nn.relu(layers_name)
    conv1_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)

    conv2_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)
    conv3_3x3 = conv1d_relu(conv2_1x1, 224, 3, 1 ,is_training)
    conv4_3x3 = conv1d_relu(conv3_3x3, 256, 3, 1 ,is_training)

    Fc_1 = tf.concat([conv1_1x1,conv4_3x3],axis=2)
    Fc_1 = tf.nn.relu(Fc_1)
    conv5_1x1 = conv1d_relu(Fc_1, 2016, 1, 1 ,is_training)    #2048

    Fc = tf.nn.relu(tf.add_n([layers_name ,conv5_1x1]))

    return Fc


def model_inception_v4():

    input__data = tf.placeholder(tf.float32, [None, 75*12])
    # print("input__data",input__data)
    input__label = tf.placeholder(tf.float32, [None, 2])
    inputdata = tf.reshape(input__data,[-1,75*12,1])
    is_training = tf.placeholder(tf.bool)
    # print("is_training",is_training)
    #### stem
    conv1_3x3 = conv1d_relu_valid(inputdata, 32, 3, 2 ,is_training)
    conv2_3x3 = conv1d_relu_valid(conv1_3x3, 32, 3, 1 ,is_training)
    conv3_3x3 = conv1d_relu(conv2_3x3, 64, 3, 1 ,is_training)
    pool1_3x3 = tf.layers.max_pooling1d(conv3_3x3, 3, 2, padding='valid')
    conv4_3x3 = conv1d_relu_valid(conv3_3x3, 96, 3, 2 ,is_training)
    Fc_1 = tf.concat([pool1_3x3,conv4_3x3],axis=2)
    conv5_1x1 = conv1d_relu(Fc_1, 64, 1, 1 ,is_training)
    conv6_3x3 = conv1d_relu_valid(conv5_1x1, 96, 3, 1 ,is_training)
    conv7_1x1 = conv1d_relu(Fc_1, 64, 1, 1 ,is_training)
    conv8_7x7 = conv1d_relu(conv7_1x1, 64, 7, 1 ,is_training)
    conv9_7x7 = conv1d_relu(conv8_7x7, 64, 7, 1 ,is_training)
    conv10_3x3 = conv1d_relu_valid(conv9_7x7, 96, 3, 1 ,is_training)
    Fc_2 = tf.concat([conv6_3x3,conv10_3x3],axis=2)
    conv11_3x3 = conv1d_relu_valid(Fc_2, 192, 3, 2 ,is_training)
    pool2_3x3 = tf.layers.max_pooling1d(Fc_2, 3, 2, padding='valid')
    Fc_3 = tf.concat([conv11_3x3,pool2_3x3],axis=2)
    print("Fc_3 = ",Fc_3 )    # shape=(?, 110, 384)

    ##### inception_ResNet_A * 5
    Fc_4 = inception_ResNet_A(Fc_3,is_training)
    Fc_5 = inception_ResNet_A(Fc_4,is_training)
    Fc_6 = inception_ResNet_A(Fc_5,is_training)
    Fc_7 = inception_ResNet_A(Fc_6,is_training)
    Fc_8 = inception_ResNet_A(Fc_7,is_training)
    print("Fc_8 = ",Fc_8 )
    ##### Reduction_A
    Fc_9 = Reduction_A(Fc_8,is_training)
    print("Fc_9 = ",Fc_9 )
    ##### inception_ResNet_B * 10
    Fc_10 = inception_ResNet_B(Fc_9,is_training)
    Fc_11 = inception_ResNet_B(Fc_10,is_training)
    Fc_12 = inception_ResNet_B(Fc_11,is_training)
    Fc_13 = inception_ResNet_B(Fc_12,is_training)
    Fc_14 = inception_ResNet_B(Fc_13,is_training)
    Fc_15 = inception_ResNet_B(Fc_14,is_training)
    Fc_16 = inception_ResNet_B(Fc_15,is_training)
    Fc_17 = inception_ResNet_B(Fc_16,is_training)
    Fc_18 = inception_ResNet_B(Fc_17,is_training)
    Fc_19 = inception_ResNet_B(Fc_18,is_training)
    print("Fc_19 = ",Fc_19 )
    ##### Reduction_B
    Fc_20 = Reduction_B(Fc_19,is_training)
    print("Fc_20 = ",Fc_20 )
    ##### Inception_C * 5
    Fc_21 = inception_ResNet_C(Fc_20,is_training)
    Fc_22 = inception_ResNet_C(Fc_21,is_training)
    Fc_23 = inception_ResNet_C(Fc_22,is_training)
    Fc_24 = inception_ResNet_C(Fc_23,is_training)
    Fc_25 = inception_ResNet_C(Fc_24,is_training)
    print("Fc_25 = ",Fc_25 )

    avg_temp = int(Fc_25.shape[1])
    pool_avg = tf.layers.average_pooling1d(Fc_25, avg_temp, 1, padding='valid')
    print("pool_avg = ",pool_avg )
    pool_avg = tf.nn.relu(pool_avg)
    keepprob = tf.placeholder(tf.float32)
    reshape_temp = int(Fc_25.shape[2])
    pool_avg_flat = tf.reshape(pool_avg, [-1, reshape_temp])
    pool_avg_flat = tf.layers.dropout(pool_avg_flat, keepprob)     #保存比例  1 - keepprob
    print("pool_avg_flat = ",pool_avg_flat)
    #全連接層計算
    yconv = tf.layers.dense(pool_avg_flat,2)   #,activation = tf.nn.softmax
    # print("y_conv = ",y_conv.shape)
    out = tf.nn.softmax(yconv,name = "out")   # 保存成.pb模型需要用到

    return input__data,input__label,keepprob,yconv,is_training,out

def optimization(yconv,input__label):

    # 計算交叉熵?fù)p失
    crossentropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yconv, labels=input__label))
    # 創(chuàng)建優(yōu)化器,通知Tensorflow在訓(xùn)練時要更新均值和方差的分布
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        trainstep = tf.train.AdamOptimizer(1e-4).minimize(crossentropy)
    # 創(chuàng)建計算準(zhǔn)確度的操作
    correct_prediction = tf.equal(tf.argmax(yconv, 1), tf.argmax(input__label, 1))
    accuracyrate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return crossentropy,trainstep,accuracyrate


if __name__ == '__main__':


    start = time.time()
    file_1 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\N'    #文件夾
    file_2 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\S'
    file_3 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\T'
    file_4 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\V'
    file_5 = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\X'
    file_NO_X = r'D:\hss_python_work\resnet_cnn\數(shù)據(jù)集\非X'

    data_all_1,label_all_1 = data_hss.data_analysis(file_5,label_def = [0,1],proportion = 800)    # 先定義標(biāo)簽
    data_all_2,label_all_2 = data_hss.data_analysis(file_5,label_def = [1,0],proportion = 800)    # 先定義標(biāo)簽

    data_all = data_all_1 + data_all_2
    label_all = label_all_1 + label_all_2
    print("data_all = ",len(data_all))
    print("label_all = ",len(label_all))

    data_train,label_train,data_test,label_tast = data_hss.Dataset_partition(data_all,label_all)
    print(data_train.shape,label_train.shape,data_test.shape,label_tast.shape)



    sess = tf.InteractiveSession()
    input_data,input_label,keep_prob,y_conv,is_training,out = model_inception_v4()
    cross_entropy,train_step,accuracy_rate = optimization(y_conv,input_label)


    ########開始訓(xùn)練過程########
    # 初始化所有變量鄙皇、參數(shù)
    tf.global_variables_initializer().run()
    var_list = [var for var in tf.global_variables() if "moving" in var.name]
    var_list += tf.trainable_variables()
    m_saver = tf.train.Saver(var_list=var_list, max_to_keep=5)



    ########### 訓(xùn)練(設(shè)置訓(xùn)練時Dropout的kepp_prob比率為0.5啸如。mini-batch為50岛都,進(jìn)行2000次迭代訓(xùn)練哮针,參與訓(xùn)練樣本5萬)
    model_doc = r'model'  # 存儲模型的文件夾
    if not os.path.exists(model_doc):   # 模型存儲文件夾
        os.makedirs(model_doc)
    else:
        shutil.rmtree(model_doc)   #先強制刪除文件夾
        os.makedirs(model_doc)

    train_accuracy_all = []   #保存所有準(zhǔn)確度
    max_acc = 0
    f = open('model/acc.txt','w')

    for i in range(10000):

        batch_data_train,batch_label_train = data_hss.batch(data_train,label_train,batch_size = 200)
        # print("batch_data_train = ",batch_data_train,batch_data_train.shape)
        # print("batch_label_train = ",batch_label_train,batch_label_train.shape)

        # 循環(huán)次數(shù)是100的倍數(shù)的時候腰湾,打印
        if i%1 == 0:

            train_accuracy = accuracy_rate.eval(feed_dict={input_data:batch_data_train,input_label: batch_label_train,
                                                           keep_prob: 1,is_training : False})
            print ("-->step %d, training accuracy %.4f ,max_acc %.4f"%(i, train_accuracy,max_acc))
            print("cross_entropy = ",sess.run(cross_entropy,feed_dict={input_data:batch_data_train,input_label: batch_label_train,
                                                                       keep_prob: 1,is_training : False}))
            f.write(str(i)+', train_accuracy: '+str(train_accuracy)+'  '+str(max_acc) +'\n')
            # #保存最近的5次模型
            # m_saver.save(sess, './model/model.ckpt', global_step=i)
            #保存準(zhǔn)確度最高的5次模型
            if train_accuracy >= max_acc :
                max_acc = train_accuracy
            if train_accuracy >= max_acc - 0.04:
                m_saver.save(sess, './model/model.ckpt', global_step=i)

            train_accuracy_all.append(train_accuracy)

        if max_acc >= 0.95 and train_accuracy < 0.5:   #訓(xùn)練準(zhǔn)確度過了最高點并下降勤家,就停止訓(xùn)練
            print("break reason 1")
            break
        if (len(train_accuracy_all) >= 5      #訓(xùn)練準(zhǔn)確度大于5次,且最后的5次準(zhǔn)確度全在95%以上狮荔,就停止
            and train_accuracy_all[len(train_accuracy_all) - 1] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 2] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 3] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 4] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 5] > 0.99) :
            # print(train_accuracy_all)
            print("break reason 2")
            break

        # 執(zhí)行訓(xùn)練模型
        train_step_,loss = sess.run([train_step,cross_entropy], feed_dict={input_data:batch_data_train,
                                                                                   input_label: batch_label_train,
                                                                                   keep_prob: 0.2,
                                                                                   is_training : True})


        # 打印測試集正確率 ,全部訓(xùn)練完成之后介粘,在最終測試集上進(jìn)行全面測試殖氏,得到整體的分類準(zhǔn)確率
        # print(train_accuracy_all)
        if max_acc >= 0.9 and train_accuracy < 0.5:   #訓(xùn)練準(zhǔn)確度過了最高點并下降,就停止訓(xùn)練
            break
    f.close()


    #######模型讀回及預(yù)測

    model_file=tf.train.latest_checkpoint('model/')
    m_saver.restore(sess,model_file)


    a = 0   # 預(yù)測對的個數(shù)
    TP = 0  # 預(yù)測正確的病癥個數(shù)
    FN_TP = 0 # 原標(biāo)簽中有病癥的個數(shù)
    TN = 0  # 預(yù)測正確的非病癥個數(shù)
    TN_FP = 0 # 原標(biāo)簽中正常的個數(shù)
    sensibility = 0  # 敏感性
    specificity = 0  # 特異性

    #### 批量心拍預(yù)測
    output = sess.run(y_conv , feed_dict = {input_data:data_test, keep_prob: 1.0,is_training : False})
    print("output = ",output)
    output = sess.run(tf.nn.softmax(output))
    output = np.round(output)  #np.round(output)  #取最接近的整數(shù)
    print("output = ",output)
    print("label_tast = ",label_tast)

    for i in range(0,len(data_test)):
        if label_tast[i][0] == output[i][0] and label_tast[i][1] == output[i][1] :
            a +=1
        if label_tast[i][0] == output[i][0] and output[i][0] == 1:  #敏感性
            TP += 1
        if label_tast[i][0] == 1 :
            FN_TP += 1
        if label_tast[i][1] == output[i][1] and output[i][1] == 1:  #特異性
            TN += 1
        if label_tast[i][1] == 1 :
            TN_FP += 1

    # ### 單個心拍預(yù)測
    # single_data = np.empty([1,75])
    # for i in range(0,len(data_test)):
    #
    #     single_data[0] = data_test[i]
    #     # print("single_data = ",single_data)
    #     # print("single_label = ",single_label)
    #
    #     output = sess.run(y_conv , feed_dict = {input_data:single_data, keep_prob: 1.0,is_training : False})
    #     # print("output = ",output)
    #     output = sess.run(tf.nn.softmax(output))
    #     output = np.round(output)  #np.round(output)  #取最接近的整數(shù)
    #     print(i,"/",len(data_test)-1,"  output = ",output,"single_label = ",label_tast[i])
    #     if label_tast[i][0] == output[0][0] and label_tast[i][1] == output[0][1] :
    #         a +=1
    #
    #     if label_tast[i][0] == output[0][0] and output[0][0] == 1:  #敏感性
    #         TP += 1
    #     if label_tast[i][0] == 1 :
    #         FN_TP += 1
    #
    #     if label_tast[i][1] == output[0][1] and output[0][1] == 1:  #特異性
    #         TN += 1
    #     if label_tast[i][1] == 1 :
    #         TN_FP += 1



    print("len(data_test) = ",len(data_test),"a =",a)
    print("sensibility = ",TP/FN_TP,"specificity =",TN/TN_FP)


    end = time.time()
    print("程序運行時間:",end - start)

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末姻采,一起剝皮案震驚了整個濱河市雅采,隨后出現(xiàn)的幾起案子,更是在濱河造成了極大的恐慌慨亲,老刑警劉巖婚瓜,帶你破解...
    沈念sama閱讀 212,080評論 6 493
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件,死亡現(xiàn)場離奇詭異刑棵,居然都是意外死亡巴刻,警方通過查閱死者的電腦和手機,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 90,422評論 3 385
  • 文/潘曉璐 我一進(jìn)店門蛉签,熙熙樓的掌柜王于貴愁眉苦臉地迎上來胡陪,“玉大人,你說我怎么就攤上這事碍舍∧” “怎么了?”我有些...
    開封第一講書人閱讀 157,630評論 0 348
  • 文/不壞的土叔 我叫張陵片橡,是天一觀的道長妈经。 經(jīng)常有香客問我,道長锻全,這世上最難降的妖魔是什么狂塘? 我笑而不...
    開封第一講書人閱讀 56,554評論 1 284
  • 正文 為了忘掉前任,我火速辦了婚禮鳄厌,結(jié)果婚禮上荞胡,老公的妹妹穿的比我還像新娘。我一直安慰自己了嚎,他們只是感情好泪漂,可當(dāng)我...
    茶點故事閱讀 65,662評論 6 386
  • 文/花漫 我一把揭開白布廊营。 她就那樣靜靜地躺著,像睡著了一般萝勤。 火紅的嫁衣襯著肌膚如雪露筒。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 49,856評論 1 290
  • 那天敌卓,我揣著相機與錄音慎式,去河邊找鬼。 笑死趟径,一個胖子當(dāng)著我的面吹牛瘪吏,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播蜗巧,決...
    沈念sama閱讀 39,014評論 3 408
  • 文/蒼蘭香墨 我猛地睜開眼掌眠,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了幕屹?” 一聲冷哼從身側(cè)響起蓝丙,我...
    開封第一講書人閱讀 37,752評論 0 268
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎望拖,沒想到半個月后渺尘,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 44,212評論 1 303
  • 正文 獨居荒郊野嶺守林人離奇死亡说敏,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 36,541評論 2 327
  • 正文 我和宋清朗相戀三年沧烈,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片像云。...
    茶點故事閱讀 38,687評論 1 341
  • 序言:一個原本活蹦亂跳的男人離奇死亡,死狀恐怖蚂夕,靈堂內(nèi)的尸體忽然破棺而出迅诬,到底是詐尸還是另有隱情,我是刑警寧澤婿牍,帶...
    沈念sama閱讀 34,347評論 4 331
  • 正文 年R本政府宣布侈贷,位于F島的核電站,受9級特大地震影響等脂,放射性物質(zhì)發(fā)生泄漏俏蛮。R本人自食惡果不足惜,卻給世界環(huán)境...
    茶點故事閱讀 39,973評論 3 315
  • 文/蒙蒙 一上遥、第九天 我趴在偏房一處隱蔽的房頂上張望搏屑。 院中可真熱鬧,春花似錦粉楚、人聲如沸辣恋。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,777評論 0 21
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽伟骨。三九已至饮潦,卻和暖如春,著一層夾襖步出監(jiān)牢的瞬間携狭,已是汗流浹背继蜡。 一陣腳步聲響...
    開封第一講書人閱讀 32,006評論 1 266
  • 我被黑心中介騙來泰國打工, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留逛腿,地道東北人稀并。 一個月前我還...
    沈念sama閱讀 46,406評論 2 360
  • 正文 我出身青樓,卻偏偏與公主長得像鳄逾,于是被迫代替她去往敵國和親稻轨。 傳聞我的和親對象是個殘疾皇子,可洞房花燭夜當(dāng)晚...
    茶點故事閱讀 43,576評論 2 349

推薦閱讀更多精彩內(nèi)容