我已经运行我的脚本超过 5 个小时了。我有 258 个 CSV 文件想要转换为 TF Records。我编写了以下脚本,正如我所说,我已经运行它超过 5 个小时了:
import argparse
import os
import sys
import standardize_data
import tensorflow as tf
FLAGS = None
PATH = '/home/darth/GitHub Projects/gru_svm/dataset/train'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def convert_to(dataset, name):
"""Converts a dataset to tfrecords"""
filename_queue = tf.train.string_input_producer(dataset)
# TF reader
reader = tf.TextLineReader()
# default values, in case of empty columns
record_defaults = [[0.0] for x in range(24)]
key, value = reader.read(filename_queue)
duration, service, src_bytes, dest_bytes, count, same_srv_rate, \
serror_rate, srv_serror_rate, dst_host_count, dst_host_srv_count, \
dst_host_same_src_port_rate, dst_host_serror_rate, dst_host_srv_serror_rate, \
flag, ids_detection, malware_detection, ashula_detection, label, src_ip_add, \
src_port_num, dst_ip_add, dst_port_num, start_time, protocol = \
tf.decode_csv(value, record_defaults=record_defaults)
features = tf.stack([duration, service, src_bytes, dest_bytes, count, same_srv_rate,
serror_rate, srv_serror_rate, dst_host_count, dst_host_srv_count,
dst_host_same_src_port_rate, dst_host_serror_rate, dst_host_srv_serror_rate,
flag, ids_detection, malware_detection, ashula_detection, src_ip_add,
src_port_num, dst_ip_add, dst_port_num, start_time, protocol])
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing {}'.format(filename))
writer = tf.python_io.TFRecordWriter(filename)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while not coord.should_stop():
example, l = sess.run([features, label])
print('Writing {dataset} : {example}, {label}'.format(dataset=sess.run(key),
example=example, label=l))
example_to_write = tf.train.Example(features=tf.train.Features(feature={
'duration' : _float_feature(example[0]),
'service' : _int64_feature(int(example[1])),
'src_bytes' : _float_feature(example[2]),
'dest_bytes' : _float_feature(example[3]),
'count' : _float_feature(example[4]),
'same_srv_rate' : _float_feature(example[5]),
'serror_rate' : _float_feature(example[6]),
'srv_serror_rate' : _float_feature(example[7]),
'dst_host_count' : _float_feature(example[8]),
'dst_host_srv_count' : _float_feature(example[9]),
'dst_host_same_src_port_rate' : _float_feature(example[10]),
'dst_host_serror_rate' : _float_feature(example[11]),
'dst_host_srv_serror_rate' : _float_feature(example[12]),
'flag' : _int64_feature(int(example[13])),
'ids_detection' : _int64_feature(int(example[14])),
'malware_detection' : _int64_feature(int(example[15])),
'ashula_detection' : _int64_feature(int(example[16])),
'label' : _int64_feature(int(l)),
'src_ip_add' : _float_feature(example[17]),
'src_port_num' : _float_feature(example[18]),
'dst_ip_add' : _float_feature(example[19]),
'dst_port_num' : _float_feature(example[20]),
'start_time' : _float_feature(example[21]),
'protocol' : _int64_feature(int(example[22])),
}))
writer.write(example_to_write.SerializeToString())
writer.close()
except tf.errors.OutOfRangeError:
print('Done converting -- EOF reached.')
finally:
coord.request_stop()
coord.join(threads)
def main(unused_argv):
files = standardize_data.list_files(path=PATH)
convert_to(dataset=files, name='train')
这已经让我想到也许它陷入了无限循环?我想要做的是读取每个 CSV 文件(258 个 CSV 文件)中的所有行,并将这些行写入 TF 记录(当然,是一个功能和一个标签)。然后,当没有更多可用行或 CSV 文件已用完时,停止循环。
The standardize_data.list_files(path)
是我在不同模块中编写的函数。我只是将它重新用于此脚本。它的作用是返回在中找到的所有文件的列表PATH
。请注意我的文件PATH
仅包含 CSV 文件。