提问者一定已经解决了问题,对于可能遇到这个问题的其他人,我的回答可能会有所帮助。
试试这个解决方案(根据你的数据集转换它),我在 50-80 GB 上尝试过,添加 numpy 会提高性能。
import pandas as pd
from datetime import date
from datetime import datetime
print("1 Load rec =", datetime.now())
df6 = pd.read_csv('sampleDataframeFlux/sampleDataframeFlux.csv',low_memory=False, memory_map=True,engine='c',na_filter=False,index_col=False,usecols=["time", "Label","Server","value"])
print("shape",df6.shape)
print("2 Create dataframe =",datetime.now())
df6["Label"]=df6["Server"]+"|"+df6["Label"]
df6.drop(['Server'],axis=1,inplace=True)
print("3 Time trim =", datetime.now())
df6['time']=df6['time']//1000000000
print("shape",df6.shape)
print("4 Round Epoch to nearest multiple of 5 =", datetime.now())
df6['time']=5*round(df6['time']/5)
print("shape",df6.shape)
print("5 Pivot dataframe=", datetime.now())
df6=df6.pivot_table(index='time', columns=["Label"],values="value",fill_value=0)
print("shape",df6.shape)
print("6 Epoch to UTC =", datetime.now())
df6.index=pd.to_datetime(df6.index, unit='s')
print("7 Convert to type category to reduce memory =", datetime.now())
df6=df6.astype('category')
print("shape",df6.shape)
print("8 Start to write to a file =", datetime.now())
df6.to_csv('file_11.csv', header=True, chunksize=500000)
print("9 Finish =", datetime.now())