Python-Tensorflow猫狗数据集分类,96%的准确率

2023-11-18

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!curl -O https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip
 % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  786M  100  786M    0     0   118M      0  0:00:06  0:00:06 --:--:--  125M    0  0:00:10 --:--:--  0:00:10 76.7M
!unzip -q kagglecatsanddogs_5340.zip
!ls
 CDLA-Permissive-2.0.pdf   __notebook_source__.ipynb    model.png
 PetImages		   kagglecatsanddogs_5340.zip  'readme[1].txt'
!ls PetImages
Cat  Dog
import os

num_skipped = 0
for folder_name in ("Cat", "Dog"):
    folder_path = os.path.join("PetImages", folder_name)
    for fname in os.listdir(folder_path):
        fpath = os.path.join(folder_path, fname)
        try:
            fobj = open(fpath, "rb")
            is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10)
        finally:
            fobj.close()

        if not is_jfif:
            num_skipped += 1
            # Delete corrupted image
            os.remove(fpath)

print("Deleted %d images" % num_skipped)
Deleted 1590 images
image_size = (160, 160)
batch_size = 128

train_ds, val_ds = tf.keras.utils.image_dataset_from_directory(
    "PetImages",
    validation_split=0.25,
    subset="both",
    seed=1337,
    image_size=image_size,
    batch_size=batch_size,
)
Found 23410 files belonging to 2 classes.
Using 17558 files for training.
Using 5852 files for validation
data_augmentation = keras.Sequential(
    [
        layers.RandomFlip("horizontal"),
        layers.RandomRotation(0.1),
    ]
)

# Apply `data_augmentation` to the training images.
train_ds = train_ds.map(
    lambda img, label: (data_augmentation(img), label),
    num_parallel_calls=tf.data.AUTOTUNE,
)
# Prefetching samples in GPU memory helps maximize GPU utilization.
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
val_ds = val_ds.prefetch(tf.data.AUTOTUNE)
inputs = keras.Input(shape=(160, 160, 3))

x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)

previous_block_activation = x

for size in [64, 128, 256, 512]:
    
    x = layers.Conv2D(size, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)
    
    x = layers.Conv2D(size, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)
    
    x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
    
    residual = layers.Conv2D(size, 1, strides=2, padding="same")(
        previous_block_activation
    )
    x = layers.add([x, residual])  # Add back residual
    previous_block_activation = x
    
x = layers.Conv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)

x = layers.Dropout(0.5)(x)
outputs = layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.summary()
Model: "model_1"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_8 (InputLayer)           [(None, 160, 160, 3  0           []                               
                                )]                                                                
                                                                                                  
 rescaling_3 (Rescaling)        (None, 160, 160, 3)  0           ['input_8[0][0]']                
                                                                                                  
 conv2d_35 (Conv2D)             (None, 80, 80, 32)   896         ['rescaling_3[0][0]']            
                                                                                                  
 batch_normalization_18 (BatchN  (None, 80, 80, 32)  128         ['conv2d_35[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_18 (Activation)     (None, 80, 80, 32)   0           ['batch_normalization_18[0][0]'] 
                                                                                                  
 conv2d_36 (Conv2D)             (None, 80, 80, 64)   18496       ['activation_18[0][0]']          
                                                                                                  
 batch_normalization_19 (BatchN  (None, 80, 80, 64)  256         ['conv2d_36[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_19 (Activation)     (None, 80, 80, 64)   0           ['batch_normalization_19[0][0]'] 
                                                                                                  
 conv2d_37 (Conv2D)             (None, 80, 80, 64)   36928       ['activation_19[0][0]']          
                                                                                                  
 batch_normalization_20 (BatchN  (None, 80, 80, 64)  256         ['conv2d_37[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_20 (Activation)     (None, 80, 80, 64)   0           ['batch_normalization_20[0][0]'] 
                                                                                                  
 max_pooling2d_17 (MaxPooling2D  (None, 40, 40, 64)  0           ['activation_20[0][0]']          
 )                                                                                                
                                                                                                  
 conv2d_38 (Conv2D)             (None, 40, 40, 64)   2112        ['activation_18[0][0]']          
                                                                                                  
 add_7 (Add)                    (None, 40, 40, 64)   0           ['max_pooling2d_17[0][0]',       
                                                                  'conv2d_38[0][0]']              
                                                                                                  
 conv2d_39 (Conv2D)             (None, 40, 40, 128)  73856       ['add_7[0][0]']                  
                                                                                                  
 batch_normalization_21 (BatchN  (None, 40, 40, 128)  512        ['conv2d_39[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_21 (Activation)     (None, 40, 40, 128)  0           ['batch_normalization_21[0][0]'] 
                                                                                                  
 conv2d_40 (Conv2D)             (None, 40, 40, 128)  147584      ['activation_21[0][0]']          
                                                                                                  
 batch_normalization_22 (BatchN  (None, 40, 40, 128)  512        ['conv2d_40[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_22 (Activation)     (None, 40, 40, 128)  0           ['batch_normalization_22[0][0]'] 
                                                                                                  
 max_pooling2d_18 (MaxPooling2D  (None, 20, 20, 128)  0          ['activation_22[0][0]']          
 )                                                                                                
                                                                                                  
 conv2d_41 (Conv2D)             (None, 20, 20, 128)  8320        ['add_7[0][0]']                  
                                                                                                  
 add_8 (Add)                    (None, 20, 20, 128)  0           ['max_pooling2d_18[0][0]',       
                                                                  'conv2d_41[0][0]']              
                                                                                                  
 conv2d_42 (Conv2D)             (None, 20, 20, 256)  295168      ['add_8[0][0]']                  
                                                                                                  
 batch_normalization_23 (BatchN  (None, 20, 20, 256)  1024       ['conv2d_42[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_23 (Activation)     (None, 20, 20, 256)  0           ['batch_normalization_23[0][0]'] 
                                                                                                  
 conv2d_43 (Conv2D)             (None, 20, 20, 256)  590080      ['activation_23[0][0]']          
                                                                                                  
 batch_normalization_24 (BatchN  (None, 20, 20, 256)  1024       ['conv2d_43[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_24 (Activation)     (None, 20, 20, 256)  0           ['batch_normalization_24[0][0]'] 
                                                                                                  
 max_pooling2d_19 (MaxPooling2D  (None, 10, 10, 256)  0          ['activation_24[0][0]']          
 )                                                                                                
                                                                                                  
 conv2d_44 (Conv2D)             (None, 10, 10, 256)  33024       ['add_8[0][0]']                  
                                                                                                  
 add_9 (Add)                    (None, 10, 10, 256)  0           ['max_pooling2d_19[0][0]',       
                                                                  'conv2d_44[0][0]']              
                                                                                                  
 conv2d_45 (Conv2D)             (None, 10, 10, 512)  1180160     ['add_9[0][0]']                  
                                                                                                  
 batch_normalization_25 (BatchN  (None, 10, 10, 512)  2048       ['conv2d_45[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_25 (Activation)     (None, 10, 10, 512)  0           ['batch_normalization_25[0][0]'] 
                                                                                                  
 conv2d_46 (Conv2D)             (None, 10, 10, 512)  2359808     ['activation_25[0][0]']          
                                                                                                  
 batch_normalization_26 (BatchN  (None, 10, 10, 512)  2048       ['conv2d_46[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_26 (Activation)     (None, 10, 10, 512)  0           ['batch_normalization_26[0][0]'] 
                                                                                                  
 max_pooling2d_20 (MaxPooling2D  (None, 5, 5, 512)   0           ['activation_26[0][0]']          
 )                                                                                                
                                                                                                  
 conv2d_47 (Conv2D)             (None, 5, 5, 512)    131584      ['add_9[0][0]']                  
                                                                                                  
 add_10 (Add)                   (None, 5, 5, 512)    0           ['max_pooling2d_20[0][0]',       
                                                                  'conv2d_47[0][0]']              
                                                                                                  
 conv2d_48 (Conv2D)             (None, 5, 5, 1024)   4719616     ['add_10[0][0]']                 
                                                                                                  
 batch_normalization_27 (BatchN  (None, 5, 5, 1024)  4096        ['conv2d_48[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 activation_27 (Activation)     (None, 5, 5, 1024)   0           ['batch_normalization_27[0][0]'] 
                                                                                                  
 global_average_pooling2d_1 (Gl  (None, 1024)        0           ['activation_27[0][0]']          
 obalAveragePooling2D)                                                                            
                                                                                                  
 dropout_5 (Dropout)            (None, 1024)         0           ['global_average_pooling2d_1[0][0
                                                                 ]']                              
                                                                                                  
 dense_5 (Dense)                (None, 2)            2050        ['dropout_5[0][0]']              
                                                                                                  
==================================================================================================
Total params: 9,611,586
Trainable params: 9,605,634
Non-trainable params: 5,952
__________________________________________________________________________________________________
epochs = 20

model.compile(
    optimizer=keras.optimizers.Adam(1e-3),
    loss="sparse_categorical_crossentropy",
    metrics=["accuracy"],
)
model.fit(
    train_ds,
    epochs=epochs,
    validation_data=val_ds,
)
Epoch 1/20
Corrupt JPEG data: 2226 extraneous bytes before marker 0xd9
 71/138 [==============>...............] - ETA: 30s - loss: 0.8919 - accuracy: 0.5826
Corrupt JPEG data: 228 extraneous bytes before marker 0xd9
 80/138 [================>.............] - ETA: 26s - loss: 0.8709 - accuracy: 0.5844
Warning: unknown JFIF revision number 0.00
 99/138 [====================>.........] - ETA: 17s - loss: 0.8284 - accuracy: 0.5950
Corrupt JPEG data: 128 extraneous bytes before marker 0xd9
Corrupt JPEG data: 65 extraneous bytes before marker 0xd9
104/138 [=====================>........] - ETA: 15s - loss: 0.8211 - accuracy: 0.5969
Corrupt JPEG data: 396 extraneous bytes before marker 0xd9
106/138 [======================>.......] - ETA: 14s - loss: 0.8178 - accuracy: 0.5980
Corrupt JPEG data: 239 extraneous bytes before marker 0xd9
138/138 [==============================] - ETA: 0s - loss: 0.7849 - accuracy: 0.6080
Corrupt JPEG data: 252 extraneous bytes before marker 0xd9
Corrupt JPEG data: 1153 extraneous bytes before marker 0xd9
Corrupt JPEG data: 162 extraneous bytes before marker 0xd9
Corrupt JPEG data: 214 extraneous bytes before marker 0xd9
Corrupt JPEG data: 99 extraneous bytes before marker 0xd9
Corrupt JPEG data: 1403 extraneous bytes before marker 0xd9
138/138 [==============================] - 81s 507ms/step - loss: 0.7849 - accuracy: 0.6080 - val_loss: 1.0104 - val_accuracy: 0.4925
Epoch 2/20

···

137/138 [============================>.] - ETA: 0s - loss: 0.0418 - accuracy: 0.9854
Corrupt JPEG data: 252 extraneous bytes before marker 0xd9
Corrupt JPEG data: 1153 extraneous bytes before marker 0xd9
Corrupt JPEG data: 162 extraneous bytes before marker 0xd9
Corrupt JPEG data: 214 extraneous bytes before marker 0xd9
Corrupt JPEG data: 99 extraneous bytes before marker 0xd9
Corrupt JPEG data: 1403 extraneous bytes before marker 0xd9
138/138 [==============================] - 69s 493ms/step - loss: 0.0418 - accuracy: 0.9854 - val_loss: 0.1075 - val_accuracy: 0.9636
epochs = 10

model.compile(
    optimizer=keras.optimizers.Adam(1e-5),
    loss="sparse_categorical_crossentropy",
    metrics=["accuracy"],
)
model.fit(
    train_ds,
    epochs=epochs,
    validation_data=val_ds,
)
img = keras.preprocessing.image.load_img(
    "PetImages/Cat/6779.jpg", target_size=image_size
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)  # Create batch axis

predictions = model.predict(img_array)
score = predictions
print(f"This image is {100 * (1 - score):.2f}% cat and {100 * score:.2f}% dog.")
array([[0.9912566 , 0.00874343]], dtype=float32)
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

Python-Tensorflow猫狗数据集分类,96%的准确率 的相关文章

随机推荐

  • 基于JAVA+SpringBoot+VUE的心理健康测试系统的设计与实现

    全网粉丝20W csdn特邀作者 博客专家 CSDN新星计划导师 java领域优质创作者 博客之星 掘金 华为云 阿里云 InfoQ等平台优质作者 专注于Java技术领域和毕业项目实战 文末获取项目下载方式 一 项目背景介绍 随着现代社会的
  • Redis常用的5种数据类型底层结构是怎样构成的

    前言 Redis是一个基于内存中的数据结构存储系统 可以用作数据库 缓存和消息中间件 Redis支持五种常见对象类型 字符串 String 哈希 Hash 列表 List 集合 Set 以及有序集合 Zset 我们在日常工作中也会经常使用它
  • 架构师成长之路

    延迟队列实现 基于监听key过期实现的延迟队列实现 这里需要继承KeyspaceEventMessageListener类来实现监听redis键过期 public class KeyExpirationEventMessageListene
  • Java CGLIB动态代理示例

    1 CGLIB动态代理简介 JDK动态代理是利用反射机制生成一个实现代理接口的匿名类 在调用具体方法前调用InvokeHandler来处理 而cglib动态代理是利用asm开源包 对代理对象类的class文件加载进来 通过修改其字节码生成子
  • pandas基础用法详解

    pandas基础用法详解 本文旨在总结pandas的基础用法 越用越发感觉基础的重要性 复杂和高级只是基础的衍生 扎实的基础和深刻的理解能帮助我们更快的弄懂复杂的东西 基础的熟悉的也就就能轻松发挥了 pandas是什么 Pandas 是一个
  • rabbitmq的DefaultConsumer使用和不同交换机模式的代码示例

    Defaultconsumer public class Consumer public static void main String args throws IOException TimeoutException 获取TCP长连接 C
  • 基于状态机的单个按键长按,短按实现复用

    开博第一文 希望再次记录学习的过程 按键扫描 单片机里面再基础不过的程序了 但对于初学者来说 用好按键也不是一件简单的事情 在毛老师的介绍下 第一次知道了状态机的思想也可以用于单片机的程序设计 感觉很是新奇 看了老师给发的几个文档后对状态机
  • 毕业设计 嵌入式 MP3音乐播放器设计与实现

    文章目录 1 简介 2 绪论 2 1 课题背景与目的 3 系统设计 3 1 系统架构 3 2 软件部分设计 3 3 实现效果 3 4 部分相关代码 4 最后 1 简介 Hi 大家好 学长今天向大家介绍一个 单片机项目 基于单片机的MP3音乐
  • 数据结构---HashSet存值和取值

    HashSet存值和取值 存 取 HashMap实现了Map接口 而HashSet实现了Set接口 HashMap用于存储键值对 而HashSet用于存储对象 HashMap不允许有重复的键 可以允许有重复的值 HashSet不允许有重复元
  • 【一千个论文合集】计算机科学的26个细分领域近年必读论文集合

    文章目录 1 机器学习 2 计算机视觉 3 自然语言处理 4 数据挖掘 5 机器人 6 知识工程 7 模式识别 8 信息检索与推荐 9 数据库 10 人机交互 11 计算机图形学 12 多媒体 13 可视化 14 数据科学 15 安全与隐私
  • vue利用 sortable 完成表格拖拽

    先讲一下vue2 使用sortable完成表格拖拽 不只是表格 div也可以实现 但我项目中是表格拖拽 github地址 安装 npm install sortablejs save 使用 我的项目中是拖拽一个小按钮移动 而不是整行
  • umi:配置式路由

    场景描述 很多时候 我们需要做到简单的路由拦截 比如用户未登录的时候 我们需要跳转到登录页面 等到用户登录后在重新跳转到之前的页面 而很多情况下这些是约定式路由无法完成的 就需要我们配置式路由 论述 umi自带的约定式路由 可以自动的生成路
  • element-ui的table表格实现跨页多选及回显效果

    效果图 安装 Element ui 和 egrid 基于 Element UI Table 组件封装的高阶表格组件 可无缝支持 element 的 table 组件 npm i element ui S npm i egrid S 引入 E
  • kubectl常用命令

    alias k kubectl alias kc k create f alias kgp k get pods alias kdp k describe pods alias kdep k delete pods alias kl k l
  • mysql 判断字符串相等_「8」掌握MySQL比较运算符是很有必要的

    本文要的主题就是MySQL比较运算符 也就是通过各类案例来看看其运行得到的结果是什么 以及掌握其规则 首先 我们看看比较运算符有哪些种类 接下来我们就通过案例的案例的方式来逐一学习 1 等于运算符 是用来判断数字 字符串和表达式是否相等 如
  • 面向对象编程中composition、aggregation、association的区别

    在面向对象程序设计中 对象的关系主要有三种 composition aggregation association composition 组合 复合 对象x的生命期由唯一拥有者owner控制 owner析构时会把x同步析构 例如Engin
  • uni-app css尺寸单位rpx介绍

    一 前言 1 在使用uni app开发小程序时支持的尺寸单位如下 1 基础单位 px rpx 2 h5单位 rem vm vh 2 rpx是微信小程序提出的一种响应式px 二 rpx介绍 1 现实情况下 ui设计师只提供一个分辨率的图 严格
  • css层叠样式表——css基础介绍

    css层叠样式表第一天 css层叠样式表01 css层叠样式表第一天 css介绍 css语法 html标签全局属性 引入css样式的方式 知识点应用 css介绍 css是Cascading Style Sheet的缩写 中文 层叠样式表 是
  • M1 Pro 安卓模拟器+Xposed环境

    吐槽 最近换了台mac 发现mac对程序员真的太友好了 一些环境配置起来很方便 借助brew几乎都是一行命令的事 但是有一些环境还不是那么好配 在这做一下记录 这次要说的是安卓模拟器 我本人是有一台Google Pixel真机的 插上数据线
  • Python-Tensorflow猫狗数据集分类,96%的准确率

    import tensorflow as tf from tensorflow import keras from tensorflow keras import layers curl O https download microsoft