如何在ubuntu下为caffe创建HDF5数据源?
hdf5数据源示例
layer{
name:"data"
type:"HDF5Data"
top:"data"
top:"label"
hdf5_data_parm{
source:"examples/hdf5_classification/data/train.txt"
batch_size:10
}
}
### hdf5文件label的写法:
### 假设有三张人脸图片,要定位图片中人脸的眼睛、鼻子、嘴的位置
1.jpg 11 78 62 71 84 65 26 51 46 89
2.jpg 13 25 62 53 53 65 26 32 42 13
3.jpg 15 78 66 13 84 65 32 41 51 65
使用python生成数据源:
ipmort hdf5
import os
import cv2
import math
import numpy as np
import random
import re
root_path="/home/lichunlin/caffe_case/HDF5/image"
with open("/home/lichunlin/caffe_case/HDF5/hdf5.txt",'r') as f:
lines=f.readlines()
num=len(lines)
random.shuffle(lines)
imgAccu = 0
imgs=np.zeros([num,3,224,224])
labels=np.zeros([num,10])
for i in range(num):
line-lines[i]
segments=re.split('\s+',line[:-1])
print segments[0]
img=cv2.imread(os.path.join(root_path,segments[0]))
img=cv2.resize(img,(224,224))
img=img.transpose(2,0,1) #opencv读到的数据是h*w*c,但是caffe的类型是c×h×w
#h:heght w:width c:channel
imgs[i,:,:,:]=img.astype(np.float32) #把相应的数据存储到相应的位置
for j in range(10): #当图片的格式改了以后,相应的位置也发生了变化,得到新的位置
labels[i,j]=float(segments[j+1]*224/256)
batchSize=1 #将大的文件分成好几个bathSize,一个bathSize存8000以下
batchNum=int(math.ceil(1.0*num/batchSize))
imgMean=int(math.ceil(1.0*num/batchSize))
imgMean=np.mean(imgs,axis=0)
#imgs=(imgs - ImgsMean)/255.0
labelsMean = np.
mean(labels,axis=0)
labels=(labels - labelsMean)/10
if os.path.exists('trainlist.txt'):
os.remove('trainlist.txt')
if os.path.exists('testlist.txt'):
os.remove('testlist.txt')
comp_kwargs={'compression':'gzip','compression_opts':1}
start=i*batchSize
end=min((i+1)*batchSize,num)
if i<batchNum-1:
filename='/home/lichunlin/caffe_case/HDF5/h5/train{0}.h5,format(i)'
else:
filename='/home/lichunlin/caffe_case/HDF5/h5/test{0}.h5,format(i-batchNum+1)'
print filename
with h5py.File(filename,'w') as f:
f.create_dataset('data',data=np.array((imgs[start:end]-imgsMean)/225.0).astype(np.float32), **comp_kwargs)
f.create_dataset('label',data=np.array(lables[start:end]).astype(np.float32),**comp_kwargs)
if i<batchNum -1:
with open('/home/lichunlin/caffe_case/HDF5/h5/trainlist.txt','a') as f:
f.write(os.pathjoin(os.getcwd(),'train{0}.h5').format(i)+'\n')
else:
with open('/home/lichunlin/caffe_case/HDF5/h5/trainlist.txt','a') as f:
f.write(os.pathjoin(os.getcwd(),'train{0}.h5').format(i-batchNum+1)+'\n')
imgsMean = np.mean(imgsMean,axis=(1,2))
with open('mean.txt','w') as f:
f.write(str(imgsMean[0])+'\n'+str(imgsMean[1])+'\n'+str(imgsMean[2]))
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)