国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 學(xué)院 > 開發(fā)設(shè)計(jì) > 正文

【tensorflow1.0學(xué)習(xí)筆記】(12)實(shí)現(xiàn)經(jīng)典的CNN卷積神經(jīng)網(wǎng)絡(luò)之二--VGGNet

2019-11-06 06:44:09
字體:
供稿:網(wǎng)友

這節(jié)開始VGGNet講解

先附上代碼,后期補(bǔ)充

# coding=utf-8# 載入常用的庫import tensorflow as tfimport mathimport timefrom datetime import datetime# 先定義一個(gè)函數(shù)conv_op,用來創(chuàng)建卷積層并把本層的參數(shù)存入?yún)?shù)列表"""使用get_shape()[-1].value獲取輸入input_op的通道數(shù),使用tf.name_scope(name)設(shè)置scope。使用tf.get_variable()創(chuàng)建kernel(即卷積核參數(shù)),shape=[kh, kw, n_in, n_out]即[卷積核的高, 卷積核的寬, 輸入通道數(shù), 輸出通道數(shù)],使用tf.contrib.layers.xavier_initilizer_conv2d()做參數(shù)初始化。"""# tf.Variable(tensor, trianable, name)def conv_op(input_op, name, kh, kw, n_in, n_out):    n_in = input_op.get_shape()[-1].value # 獲得輸入的通道數(shù)    with tf.name_scope(name) as scope:        kernel = tf.get_variable(scope+"w", shape=[kh, kw, n_in, n_out], dtype=tf.float32,                                 initializer=tf.contrib.layers.xavire_initilizer_conv2d())        conv = tf.nn.conv2d(input_op, kernel, (1, kh, kw, 1), padding='SAME')        bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)        biases = tf.Variable(bias_init_val, trainable=True, name='b')        z = tf.nn.bias_add(conv, biases)        activation = tf.nn.relu(z)        p += [kernel, biases]        return activation# 定義全連接層的創(chuàng)建函數(shù)fc_op"""一樣是先獲取輸入input_op的通道數(shù),然后使用tf.get_variable創(chuàng)建全連接層的參數(shù),只不過參數(shù)的維度只有兩個(gè),第一個(gè)維度為輸入的通道數(shù)n_in,第二個(gè)維度為輸出的通道數(shù)n_out。同樣,參數(shù)初始化方法也使用xavier_initializer。這里bias不再為0,而是賦予一個(gè)較小的值為0.1以避免deadneuron。然后使用tf.nn.relu_layer對輸入變量input_op與kernel做矩陣乘法加上bias,再做RELU非線性變換得到activation。最后將這個(gè)全連接層用到參數(shù)kernel,biases添加到參數(shù)表p,并將activation作為函數(shù)結(jié)果返回。"""def fc_op(input_op, name, n_out, p):    n_in = input_op.get_shape()[-1].value()    with tf.name_scope(name) as scope:        kernel = tf.get_variable(scope+"w", shape=[n_in, n_out], dtype=tf.float32,                                 initializer=tf.contrib.layers.xavire_initilizer())        biases = tf.Variable(tf.constant(0.1, shape=[n_out],                                         dtype=tf.float32, name='b'))        activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)        p += [kernel, biases]        return activation# 定義最大池化層的創(chuàng)建函數(shù)mpool_opdef mpool_op(input_op, name, kh, kw, dh, dw):    return tf.nn.max_pool(input_op, ksize=[1, kh, kw, 1],                          strides=[1, dh, dw, 1],                          padding='SAME', name=name)# 創(chuàng)建VGGNet-16的網(wǎng)絡(luò)結(jié)構(gòu)# VGGNet-19主要分為6部分,前5段為卷積函數(shù),最后一層為全連接網(wǎng)絡(luò)def inference_op(input_op, keep_PRob):    p = []    # 第一段卷積    conv1_1 = conv_op(input_op, name='conv1_1', kh=3, kw=3,                      n_out=64, dh=1, dw=1, p=p)    conv1_2 = conv_op(conv1_1, name='conv1_2', kh=3, kw=3,                      n_out=64, dh=1, dw=1, p=p)    pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)    # 第二段卷積    conv2_1 = conv_op(pool1, name='conv2_1', kh=3, kw=3,                      n_out=128, dh=1, dw=1, p=p)    conv2_2 = conv_op(conv2_1, name='conv2_2', kh=3, kw=3,                      n_out=128, dh=1, dw=1, p=p)    pool2 = mpool_op(conv2_2, name="pool2", kw=2, kh=2, dh=2, dw=2)    # 第三段卷積    conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3,                      n_out=256, dh=1, dw=1, p=p)    conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3,                      n_out=256, dh=1, dw=1, p=p)    conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3,                      n_out=256, dh=1, dw=1, p=p)    pool3 = mpool_op(conv3_3, name="pool3", kw=2, kh=2, dh=2, dw=2)    # 第四段卷積    conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3,                      n_out=512, dh=1, dw=1, p=p)    conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3,                      n_out=512, dh=1, dw=1, p=p)    conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3,                      n_out=512, dh=1, dw=1, p=p)    pool4 = mpool_op(conv4_3, name="pool4", kw=2, kh=2, dh=2, dw=2)    # 第五段卷積    conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3,                      n_out=512, dh=1, dw=1, p=p)    conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3,                      n_out=512, dh=1, dw=1, p=p)    conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3,                      n_out=512, dh=1, dw=1, p=p)    pool5 = mpool_op(conv5_3, name="pool5", kw=2, kh=2, dh=2, dw=2)    # 將第5段卷積網(wǎng)絡(luò)的輸出結(jié)果進(jìn)行扁平化,使用tf.reshape函數(shù)將每個(gè)樣本化為長度為7×7×512=25088的一維向量    shp = pool5.get_shape()    flattened_shape = shp[1].value*shp[2].value*shp[3].value    resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")    # 連接一個(gè)隱含節(jié)點(diǎn)數(shù)為4096的全連接層,激活函數(shù)為RELU。    # 然后連接一個(gè)Droput層,在訓(xùn)練時(shí)節(jié)點(diǎn)保留率為0.5,預(yù)測時(shí)為1.0    fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)    fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")    fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)    fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7")    """    最后連接一個(gè)有1000個(gè)輸出節(jié)點(diǎn)的全連接層,并使用softmax進(jìn)行處理得到分類輸出概率。    這里使用tf.argmax求輸出概率最大的類別。    最后將fc8,softmax,prediction和參數(shù)列表p一起返回。    """    fc8 = fc_op(fc7_drop, name="fc8", n_out=1000, p=p)    softmax = tf.nn.softmax(fc8)    prediction = tf.argmax(softmax, 1)    return prediction, softmax, fc8, p# 評測函數(shù)time_tensorflow_rundef time_tensorflow_run(session, target, feed, info_string):    num_step_burn_in = 10    total_duration = 0.0    total_duration_squared = 0.0    """    進(jìn)行num_batches + num_step_burn_in次迭代計(jì)算,使用time.time()記錄時(shí)間,每次迭代通過session.run(target)執(zhí)行。    在初始熱身的num_step_burn_in次迭代后,每10輪迭代顯示當(dāng)前所需要的時(shí)間。    同時(shí)每輪total_duration和total_duration_squared累加,以便后面計(jì)算每輪耗時(shí)的均值和標(biāo)準(zhǔn)差。    """    for i in range(num_batches + num_step_burn_in):        start_time = time.time()        _ = session.run(target, feed_dict=feed)        duration = time.time() - start_time        if i >= num_step_burn_in:            if not i % 10:                print('%s: step %d, duration = %.3f' %                      (datatime.now(), i - num_step_burn_in, duration))            total_duration += duration            total_duration_squared += duration * duration    # 在循環(huán)結(jié)束后,計(jì)算每輪迭代的平均耗時(shí)mm和標(biāo)準(zhǔn)差sd,最后將結(jié)果顯示出來。    # 這樣就完成來計(jì)算每輪迭代耗時(shí)的評測函數(shù)time_tensorflow_run.    mn = total_duration / num_batches    vr = total_duration_squared / num_batches - mn * mn    sd = math.sqrt(vr)    print('%s: %s across %d step, %.3f +/- %.3f sec / batch' %          (datatime.now(), info_string, num_batches, mn, sd))    # 主函數(shù)run_benchmark"""首先使用with tf.Graph().as_default()定義默認(rèn)的Graph方便后面使用。"""def run_benchmark():    with tf.Graph().as_default():        image_size = 224        # 構(gòu)造正態(tài)分布的隨機(jī)tensor,第一維度是batch_size,即每輪迭代的樣本數(shù)目,        # 第二個(gè)和第三個(gè)維度是圖片的尺寸image_size,第四個(gè)維度是圖片的顏色通道數(shù)目。        images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3],                                              dtype=tf.float32, stddev=1e-1))        # 創(chuàng)建keep_prob的placeholder,并調(diào)用inference_op函數(shù)構(gòu)建VGGNet-16的網(wǎng)絡(luò)結(jié)構(gòu),        # 獲得prediction。softmax,fc8和參數(shù)列表p        keep_prob = tf.placeholder(tf.float32)        prediction, softmax, fc8, p = inference_op(images, keep_prob)        # 創(chuàng)建session并初始化全局參數(shù)        init = tf.global_variables_initializer()        sess = tf.Sessin()        sess.run(init)        # 通過將keep_prob設(shè)為1.0來執(zhí)行,并使用time_tensorflow_run評測forward運(yùn)算時(shí)間。        # 再計(jì)算VGGNet-16最后的全連接層的輸出fc8的l2_loss,        # 并使用tf.gradients求相對于這個(gè)loss的所有模型參數(shù)的梯度。        # 最后使用time_tensorflow_run評測backward運(yùn)算時(shí)間,        # 這里target為求解梯度的操作grad,keep_prob為0.5。        time_tensorflow_run(sess, prediction, {keep_prob: 1.0}, "Forward")        objective = tf.nn.l2_loss(fc8)        grad = tf.gradients(objective, p)        time_tensorflow_run(sess, grad, {keep_prob: 0.5}, "Forward-backward")batch_size = 32num_batches = 100run_benchmark()


發(fā)表評論 共有條評論
用戶名: 密碼:
驗(yàn)證碼: 匿名發(fā)表
主站蜘蛛池模板: 井陉县| 清镇市| 定陶县| 天气| 康马县| 若尔盖县| 和平区| 柯坪县| 卓尼县| 吉水县| 咸宁市| 名山县| 新竹县| 九龙城区| 阿瓦提县| 宁强县| 云安县| 泰宁县| 油尖旺区| 吴忠市| 乐至县| 荃湾区| 安丘市| 科尔| 旅游| 黄浦区| 湖州市| 乌海市| 镇远县| 湾仔区| 开鲁县| 米泉市| 绿春县| 桂阳县| 司法| 长泰县| 平阳县| 永胜县| 游戏| 新晃| 雷山县|