mobileNet训练自己的样本

2023-05-16

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from six.moves import xrange
import os
#from datainput import read_and_decode
from mobileNet import mobilenet
from tf_test import read_and_decode

image_height = 224
image_width = 224
num_channels = 3
batch_size =5
num_classes=5

images, labels = read_and_decode("/home/henson/Desktop/mobile/test.tfrecords",batch_size)
summaries_dir ='/home/henson/Desktop/mobile/mnist_logs'
#print(images.shape,labels.shape)
print(images,"hello")
print(labels)

x = tf.placeholder(tf.float32, shape=[batch_size, image_height, image_width,
                                      num_channels])
y_ = tf.placeholder(tf.float32, shape=[None, 5])

"""
y = tf.placeholder(tf.int32, [None])
y_ = tf.one_hot(y,2,1,0,-1)
y_ = tf.cast(y_, tf.float32)
"""
# 卷积神经网络需要四维的数据,one-hot的标签
"""
def reformat(dataset, labels):
    dataset = dataset.reshape((-1, image_height, image_width, num_channels)).astype(np.float32)
    labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)
    return dataset, labels

train_dataset, train_labels = reformat(image, label)
#test_dataset, test_labels = reformatreformat(img, label)
print(train_dataset.shape, train_labels.shape)
#print(test_dataset.shape, test_labels.shape)
"""
sess = tf.InteractiveSession()

# dataset = tf.placeholder(tf.float32, shape = [None, image_height, image_width, 1])
# labels = tf.placeholder(tf.float32, shape = [None, 2])


if __name__ == '__main__':
    # mobilenet(x,num_classes=3,is_training=True,width_multiplier=1)
    keep_prob = tf.placeholder("float")
    logits, end_points = mobilenet(x, num_classes=5, is_training=True, width_multiplier=1)
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits))

    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1))
    tf.summary.scalar('cross_entropy', cross_entropy)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('accuracy', accuracy)
    merged = tf.summary.merge_all()
    print("accuracy shape:", accuracy)

    train_writer = tf.summary.FileWriter(summaries_dir + '/train', sess.graph)
    test_writer = tf.summary.FileWriter(summaries_dir + '/test')

    sess.run(tf.global_variables_initializer())

    threads = tf.train.start_queue_runners(sess=sess)
    print("new begin!")
    for i in range(1000):
        if i % 10 == 0:
            img_xs, label_xs = sess.run([images, labels])
            #print(images.shape, labels.shape)
            # train_acc = accuracy.eval(feed_dict={x: img_xs, y_: label_xs,keep_prob:0.5})
            summary, train_acc = sess.run([merged, accuracy], feed_dict={x: img_xs, y_: label_xs, keep_prob: 0.5})
            print("step%d,training accuracy %g" % (i, train_acc))
            train_writer.add_summary(summary, i)
            # sess.run(train_step, feed_dict={x: img_xs, y_: label_xs})
        train_step.run(feed_dict={x: img_xs, y_: label_xs})

train_writer.close()



        # images, labels = tf.train.shuffle_batch([image, label], batch_size=10, capacity=30, min_after_dequeue=10)

        # img_batch, label_batch = tf.train.shuffle_batch([image, label],
        #                                                 batch_size=20, capacity=2000,
        #                                                  min_after_dequeue=1, enqueue_many=False)

MobileNet来自:

A tensorflow implementation of Google’s MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications

The official implementation is avaliable at tensorflow/model.

The official implementation of object detection is now released, see tensorflow/model/object_detection.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from six.moves import xrange
import os
#from datainput import read_and_decode


image_height = 224
image_width = 224
num_channels = 3
batch_size=16


sess = tf.InteractiveSession()
def mobilenet(inputs,
          num_classes=1000,
          is_training=True,
          width_multiplier=1,
          scope='MobileNet'):

  """ MobileNet
  More detail, please refer to Google's paper(https://arxiv.org/abs/1704.04861).

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    scope: Optional scope for the variables.
  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, `num_classes`]
    end_points: a dictionary from components of the network to the corresponding
      activation.
  """

  def _depthwise_separable_conv(inputs,
                                num_pwc_filters,
                                width_multiplier,
                                sc,
                                downsample=False):
    """ Helper function to build the depth-wise separable convolution layer.
    """
    num_pwc_filters = round(num_pwc_filters * width_multiplier)
    _stride = 2 if downsample else 1

    # skip pointwise by setting num_outputs=None
    depthwise_conv = slim.separable_convolution2d(inputs,
                                                  num_outputs=None,
                                                  stride=_stride,
                                                  depth_multiplier=1,
                                                  kernel_size=[3, 3],
                                                  scope=sc+'/depthwise_conv')

    bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
    pointwise_conv = slim.convolution2d(bn,
                                        num_pwc_filters,
                                        kernel_size=[1, 1],
                                        scope=sc+'/pointwise_conv')
    bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
    return bn

  with tf.variable_scope(scope) as sc:
    end_points_collection = sc.name + '_end_points'
    with slim.arg_scope([slim.convolution2d, slim.separable_convolution2d],
                        activation_fn=None,
                        outputs_collections=[end_points_collection]):
      with slim.arg_scope([slim.batch_norm],
                          is_training=is_training,
                          activation_fn=tf.nn.relu):
        net = slim.convolution2d(inputs, round(32 * width_multiplier), [3, 3], stride=2, padding='SAME', scope='conv_1')
        print(net)
        net = slim.batch_norm(net, scope='conv_1/batch_norm')
        print(net)
        net = _depthwise_separable_conv(net, 64, width_multiplier, sc='conv_ds_2')
        print(net)
        net = _depthwise_separable_conv(net, 128, width_multiplier, downsample=True, sc='conv_ds_3')
        print(net)
        net = _depthwise_separable_conv(net, 128, width_multiplier, sc='conv_ds_4')
        print(net)
        net = _depthwise_separable_conv(net, 256, width_multiplier, downsample=True, sc='conv_ds_5')
        print(net)
        net = _depthwise_separable_conv(net, 256, width_multiplier, sc='conv_ds_6')
        print(net)
        net = _depthwise_separable_conv(net, 512, width_multiplier, downsample=True, sc='conv_ds_7')
        print(net)
        net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_8')
        print(net)
        net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_9')
        print(net)
        net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_10')
        print(net)
        net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_11')
        print(net)
        net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_12')
        print(net)
        net = _depthwise_separable_conv(net, 1024, width_multiplier, downsample=True, sc='conv_ds_13')
        print(net)
        net = _depthwise_separable_conv(net, 1024, width_multiplier, sc='conv_ds_14')
        print(net)
        net = slim.avg_pool2d(net, [7, 7], scope='avg_pool_15')
        print(net)
    #y_ = tf.placeholder(tf.float32, shape=[None, 3])
    keep_prob = tf.placeholder(tf.float32)

    end_points = slim.utils.convert_collection_to_dict(end_points_collection)
    net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
    print(net)
    end_points['squeeze'] = net
    print(net)
    logits = slim.fully_connected(net, num_classes, activation_fn=None, scope='fc_16')
    predictions = slim.softmax(logits, scope='Predictions')
    end_points['Logits'] = logits
    end_points['Predictions'] = predictions
  return logits, end_points

mobilenet.default_image_size = 224


def mobilenet_arg_scope(weight_decay=0.0):
  """Defines the default mobilenet argument scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.

  Returns:
    An `arg_scope` to use for the MobileNet model.
  """
  with slim.arg_scope(
      [slim.convolution2d, slim.separable_convolution2d],
      weights_initializer=slim.initializers.xavier_initializer(),
      biases_initializer=slim.init_ops.zeros_initializer(),
      weights_regularizer=slim.l2_regularizer(weight_decay)) as sc:
    return sc


直接导入mobileNet来训练,为什么结果会这样,应该怎么调参?目测是神经网络问题。

这里写图片描述

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

mobileNet训练自己的样本 的相关文章

  • 性能测试:模型趋势预测,让生产性能预测

    随着系统复杂度的提升 系统架构复杂度 部署规模也在逐步提升 这对性能测试测试 分析都带来挑战 古语有云 兵马未动粮草先行 针对测试而言测试环境及数据就是 粮草 性能测试如果环境 数据差异较大 性能测试出来的结果对生产指导意义就不是很大 那如
  • 【python黑帽子2】netcat.py编写及使用说明

    环境信息 python3 9 12 IDE为 xff1a vscode 源码 书中源码注意点 xff1a send函数中的if recv len lt 4096需要调整为if recv len lt 2 xff1a 该出的recv len值
  • 【Python黑帽子】proxy.py脚本

    span class token comment coding 61 utf 8 span span class token keyword import span sys span class token keyword import s
  • 【Jmeter】跨线程组共享数据

    背景 在性能测试中 xff0c 经常会遇见使用多线程组的情况 xff0c 例如用户登陆成功后 xff0c 对某个查询接口使用500个线程来进行压测 xff0c 这个时候就需要使用多线程组 设计说明 首先 xff0c 需要使用setUp Th
  • 【playwright】使用pytest-playwright执行用例时频繁打开浏览器

    背景说明 安装pytest playwright之后 xff0c 执行多个用例频繁打开浏览器 xff0c 而且无法给对应的fixture的scope设置为session 原因说明 pytest playwright定义了fixture的sc
  • (转)注册JNI函数的两种方式

    原文地址 http blog csdn net wwj 748 article details 52347341 前言 前面介绍过如何实现在Android Studio中制作我们自己的so库 xff0c 相信大家看过之后基本清楚如何在And
  • 【playwright】使用playwright实现拖动功能

    思路说明 使用locator定位到要拖动滑块元素 xff0c 如元素名叫ele 获取元素ele的bounding box含4分属性值 xff1a x xff0c y xff0c width xff0c height 把鼠标移动到元素ele的

随机推荐

  • 【playwright】pytest-playwright与allure结合,生成报告带有图片和录屏

    依赖的环境 需要安装allure命令行工具以及allure pytest插件 pytest playwright需要升级0 3 0版本 xff0c 改版本支持如下参数 xff1a Playwright browser 61 chromium
  • 【性能测试】缓慢的磁盘问题分析套路

    该文基于 性能之巅 xff1a 洞悉系统 企业与云计算 的1 9 1 缓慢的磁盘章节调整而来 用户问题 Scott 是一家中型公司里的系统管理员 数据库团队报告了一个支持ticket xff08 工单 xff09 xff0c 抱怨他们有一台
  • 【虚拟机】win 10的virtual box打开虚拟机报VERR_MEM_INIT_FAILED

    错误信息 解决办法 进入到 控制面板 程序 程序和功能 xff1a 选择 启用或关闭 Windows 功能 xff1a 重启电脑 xff0c 重新打开virtual下的虚拟机 xff0c 能够正常启动 xff1a
  • 【playwright】pytest-playwright增加代理服务选项

    playwright的代理设置参数为 xff1a https playwright dev python docs network http proxy 对pytest playwright py进行如下调整 xff0c 在browser
  • 【镜像源】分享三个镜像链接

    兰州大学 xff1a https mirror lzu edu cn 南京大学 xff1a https mirror nju edu cn docker官方镜像库 xff1a https github com docker library
  • 【Docker】基于系统iso构建docker基础镜像

    1 搭建本地yum源 1 xff09 将镜像通过光盘或U盘挂载到 mnt目录下 mount media kylin xxx iso mnt kylin 2 xff09 修改 etc yum repo d kylin x86 64 local
  • 【数据库】Oracle 12透明网关查询postgresql表某些字段不展示问题处理

    前置条件 1 对应版本列表 服务 版本 Oracle 12C 12 2 0 1 0 Heterogeneous Agent 12 2 0 1 0 odbc 2 3 1 unixODBC 2 3 6 psqlodbc 9 2 24 查看命令
  • 【机器学习】lightGBM是什么?

    梯度提升法 Gradient Boosting Machine 简记 GBM 以非参数方法 不假设函数形式 估计基函数 并在 函数空间 使用 梯度下降 进行近似求解 非参数方法包括K近邻法 决策树 以及基于决策树的装袋法 随机森林与提升法等
  • 【HttpRunner】学习准备

    1 安装python 3 7及以上版本 xff1a 2 安装fastapi xff1a pip install fastapi all 3 把如下代码粘贴复制到main py文件中 xff1a span class token keywor
  • Android中锁定文件的方法

    androidSDK中并没有锁定文件相关的api 但是android是基于linux操作系统的 linux比较底层 灵活性也更大 为了实现锁定文件的效果 大概有以下几种办法 用chmod命令修改文件读写权限利用linux中的多线程独占锁 启
  • 远程控制Ubuntu

    远程控制Ubuntu 在Ubuntu上安装team viewer或者向日葵 xff0c 进行远程控制 xff0c 这里记录采用team viewer方式的配置过程 xff0c 向日葵等远程控制类似 安装Ubuntu 官方下载Ubuntu系统
  • 信号降噪方法

    傅里叶变换 只能获取一段信号总体上包含哪些频率的成分 xff0c 但是对各成分出现的时刻并无所知 对非平稳过程 xff0c 傅里叶变换有局限性 短时傅里叶变换 xff08 Short time Fourier Transform STFT
  • C++ 带通滤波

    Butterworth Filter Coefficients The following files are for a library of functions to calculate Butterworth filter coeff
  • python之collections

    collections是日常工作中的重点 高频模块 xff0c 包含了一些特殊的容器 xff0c 针对Python内置的容器 xff0c 例如list dict set和tuple xff0c 常用类型有 xff1a namedtuple
  • git 指定下载文件,目录

    1 创建路径 mkdir gitfile cd lt 路径 gt eg xff1a cd home gitfile 2 创建一个空的本地仓库 git init 3 连接远程仓库GitHub git remote add f origin l
  • Ubuntu v4l2 视屏流花屏问题

    之前用的好好解析YUV xff0c MJPEG 换了个核心板就不好使了 xff0c opencv3 4 6 gt gt gt opencv4 5 5 xff0c Mat xff0c cvMat xff0c IplImage 的类型转换也不好
  • qt qmake .qrc hasmodification time xxx in the future

    原因 xff1a 跨平台生成的 qrc 文件创建时间与目标平台时间不一致导致 xff0c 如win写的 copy 到 Linux xff0c 再编译可能会遇到该bug 导致无法qmake 与 build 解决 xff1a touch qrc
  • (转)python图像操作

    转自 xff1a zbxzc 侵删 使用PIL完成 python view plain copy span class hljs keyword import span Image span class hljs comment 打开图像
  • 关于input to reshape is a tensor 的问题

    span class hljs keyword for span index name span class hljs operator in span enumerate classes class path 61 cwd 43 name
  • mobileNet训练自己的样本

    span class hljs keyword import span matplotlib pyplot span class hljs keyword as span plt span class hljs keyword import