像这样编写代码更惯用
def ProcessLargeTextFile():
with open("filepath", "r") as r, open("outfilepath", "w") as w:
for line in r:
x, y, z = line.split(' ')[:3]
w.write(line.replace(x,x[:-3]).replace(y,y[:-3]).replace(z,z[:-3]))
这里主要的节省就是做split
一次,但如果 CPU 没有被征税,这可能不会产生什么影响
It may有助于一次保存几千行并一次写入它们,以减少硬盘驱动器的抖动。一百万行是only54MB 内存!
def ProcessLargeTextFile():
bunchsize = 1000000 # Experiment with different sizes
bunch = []
with open("filepath", "r") as r, open("outfilepath", "w") as w:
for line in r:
x, y, z = line.split(' ')[:3]
bunch.append(line.replace(x,x[:-3]).replace(y,y[:-3]).replace(z,z[:-3]))
if len(bunch) == bunchsize:
w.writelines(bunch)
bunch = []
w.writelines(bunch)
由@Janne建议,另一种生成线条的方法
def ProcessLargeTextFile():
bunchsize = 1000000 # Experiment with different sizes
bunch = []
with open("filepath", "r") as r, open("outfilepath", "w") as w:
for line in r:
x, y, z, rest = line.split(' ', 3)
bunch.append(' '.join((x[:-3], y[:-3], z[:-3], rest)))
if len(bunch) == bunchsize:
w.writelines(bunch)
bunch = []
w.writelines(bunch)