【发布时间】:2019-08-07 14:25:37
【问题描述】:
我运行这个 python 代码来读取文件并将数据上传到引擎。 它运行良好,但是突然失败并在中间抛出错误。 我做了一些研究,但找不到有效的解决方案。 以下是错误
--- Logging error --- Traceback (most recent call last):
File "C:\Python36\lib\logging\__init__.py", line 998, in emit self.flush()
File "C:\Python36\lib\logging\__init__.py", line 978, in flush self.stream.flush()
OSError: [Errno 22] Invalid argument
下面是代码:
import argparse
import httplib2
import numpy as np
import pprint
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client.client import OAuth2Credentials,
HttpAccessTokenRefreshError
import pandas as pd
from datetime import date, timedelta
from dateutil.parser import parse
import time
import os
import json
import datetime
import logging
from datetime import datetime
import pysftp
import warnings
header = []
final_report = ""
logging.basicConfig(filename='Logs/DialogTech_To_DS3' + date.today().strftime("%Y.%m.%d"), level=logging.INFO)
def create_credentials(client_id, client_secret, refresh_token):
"""Create Google OAuth2 credentials.
Returns:
OAuth2Credentials
"""
return OAuth2Credentials(access_token=None,
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
token_expiry=None,
token_uri=GOOGLE_TOKEN_URI,
user_agent=None)
service = build('doubleclicksearch', 'v2', http=http)
return service
for filename in os.listdir('J:/SharedFolder/Feeds/Data/'):
file = 'J:/SharedFolder/Feeds/Data/' + filename
if filename.startswith('Daily_'):
print(filename)
file_name = filename
logging.info("Uploading Conversions from " + filename)
columns = ['Timestamp', 'GCLID', 'camp', 'OrderID', 'Orders', 'Revenue',
'OrderLevelDiscount', 'Units', 'OutOfStockViews', 'ScorecardApplied', 'StoreLocator']
data = pd.read_csv(file, delimiter='\t')
data['Revenue'] = data['Revenue'].map(lambda x: '{:.2f}'.format(x))
data['OrderID'] = data['OrderID'].map(lambda x: '{:.0f}'.format(x))
#data['OrderID'] = data['OrderID'].apply(lambda x: int(x) if "." in str(x) else x)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
dir = 'J:/SharedFolder/Feeds/Data/'
# data.to_csv(dir + 'FNS_' + filename.replace('Daily_', '').replace('.txt', '') + '.csv')
print(data.head(data['Timestamp'].count()))
print(data['Timestamp'].count())
for index, row in data.iterrows():
dt = parse(row['Timestamp'])
millisecond = int(round(dt.timestamp() * 1000))
#print(row)
if row['Orders'] > 0:
order_revenue_upload(service, row['GCLID'], str(row['OrderID']) + str(index), millisecond, row['Revenue'], row['Orders'])
if row['OrderLevelDiscount'] > 0:
order_level_discount_upload(service, row['GCLID'], str(row['OrderID']) + "_OLD_" + str(index), millisecond, row['OrderLevelDiscount'])
if row['Units'] > 0:
units_upload(service, row['GCLID'], str(row['OrderID']) + "_U_" + str(index), millisecond, row['Units'])
if row['OutOfStockViews'] > 0:
out_of_stock_views_upload(service, row['GCLID'], str(row['OrderID']) + "_OOSV_" + str(index), millisecond, row['OutOfStockViews'])
if row['ScorecardApplied'] > 0:
score_card_applied_upload(service, row['GCLID'], str(row['OrderID']) + "_SCA_" + str(index), millisecond, row['ScorecardApplied'])
if row['StoreLocator'] > 0:
store_locator_upload(service, row['GCLID'], str(row['OrderID']) + "_SL_" + str(index), millisecond, row['StoreLocator'])
os.rename(file, 'J:/SharedFolder/Feeds/Data/' + file_name)
【问题讨论】:
-
是否有可能您正在记录的任何文件名都无法使用文件系统的默认编码(定义为here)进行编码?
-
我不这么认为。该文件可以访问。