Python 添加 def myfunction() 时脚本不工作:
Python Script not working when I add def myfucntion():
我有一些代码在我单独 运行 时有效,但是当我 运行 将它定义为函数时它不起作用。我没有收到任何错误,所以它 运行,但是它不会拉回最新文件或更新 CSV 文件,它只是说和前一天一样。
这是为了更新一份报告,之前同事 运行 正在使用它,但我自己无法使用它。下面的代码是有效的:
def typose():
today = datetime.today().strftime('%d%m%Y')
yesterday = datetime.now() - timedelta(1)
yesterday1 = yesterday.strftime('%d%m%Y')
###############################################################################
#################### ESTABLISH CONNECTION TO ESENDEX SFTP #####################
###############################################################################
# Open a transport
# host,port = "sftp.esendex.com",22
host,port = "10.132.0.1",22
transport = paramiko.Transport((host,port))
# Auth
username,password = "bocsurveys","lfxDmr4i"
transport.connect(None,username,password)
# Go!
sftp = paramiko.SFTPClient.from_transport(transport)
###############################################################################
######################## PICK UP THE FILE FOR THE SMS #########################
############################### FROM ESENDEX ##################################
# Download the SMS
filepathsms = "/FromEsendex/CX_Survey_SMS_output_2_"+today+".csv"
localpathsms = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_2_"+today+".csv"
sftp.get(filepathsms ,localpathsms)
filepathsms2 = "/FromEsendex/CX_Survey_SMS_output_1_"+yesterday1+".csv"
localpathsms2 = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_1_"+yesterday1+".csv"
sftp.get(filepathsms2 ,localpathsms2)
filename = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_2_"+today+".csv"
filename2 = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_1_"+yesterday1+".csv"
###############################################################################
################## CREATING ONE RECORD PER DELIVERY NUMBER ####################
###############################################################################
##df1 = pandas.read_csv(filename,
## usecols= ['Question Label','Answer Label',
## 'Answer DateTime','Delivery Number',
## 'ShipTo Number'], encoding= 'unicode_escape')
df1 = pandas.read_csv(filename, usecols =[2,4,5,12,23],
encoding= 'unicode_escape')
df1 = df1.rename(columns= {df1.columns[0]: "Question Label",
df1.columns[1]: "Answer Label",
df1.columns[2]: "Answer DateTime",
df1.columns[3]: "Delivery Number",
df1.columns[4]: "ShipTo Number"})
# Filter only the records with scores
clean_data1 = df1[df1['Question Label'] != 2]
clean_data1 = clean_data1[clean_data1["Question Label"].notnull()]
clean_data2 = clean_data1[clean_data1['Answer Label'] != 'Error']
clean_df1 = pandas.DataFrame(clean_data2,
columns = ['Answer Label',
'Answer DateTime',
'Delivery Number',
'ShipTo Number'])
# Rename the columns
cleandf1 = clean_df1.rename(columns={"Answer Label": "Score",
"Answer DateTime": "Created",
"Delivery Number": "Delivery",
"ShipTo Number": "ShipTo" })
##df2 = pandas.read_csv(filename,
## usecols= ['Question Label',
## 'Answer DateTime',
## 'Answer Text',
## 'Delivery Number',
## 'ShipTo Number'], encoding= 'unicode_escape')
df2 = pandas.read_csv(filename, usecols =[2,5,6,12,23],
encoding= 'unicode_escape')
df2 = df2.rename(columns= {df2.columns[0]: "Question Label",
df2.columns[1]: "Answer DateTime",
df2.columns[2]: "Answer Text",
df2.columns[3]: "Delivery Number",
df2.columns[4]: "ShipTo Number"})
# Filter only the records with comments
clean_data3 = df2[df2['Question Label'] != 1]
clean_data3 = clean_data3[clean_data3["Question Label"].notnull()]
clean_df2 = pandas.DataFrame(clean_data3,
columns = ['Answer Text',
'Delivery Number',
'ShipTo Number'])
# Rename the columns
cleandf2 = clean_df2.rename(columns={"Answer Text": "Comment",
"Delivery Number": "Delivery",
"ShipTo Number": "ShipTo" })
## df3 = pandas.read_csv(filename,
## usecols= ['Classification Code','Classification Text',
## 'Country Code',
## 'Customer Post Code','Delivery Number',
## 'GroupTo Code','GroupTo Name',
## 'PGI Date','Plant Code',
## 'Plant Name','Pricing Area',
## 'Pricing Area Text','Sales Organisation',
## 'ShipTo Number'], encoding= 'unicode_escape')
df3 = pandas.read_csv(filename,
usecols= [7,8,9,11,12,13,14,16,17,18,19,20,22,23], encoding= 'unicode_escape')
df3 = df3.rename(columns = {df3.columns[0]: "Classification Code",
df3.columns[1]: "Classification Text",
df3.columns[2]: "Country Code",
df3.columns[3]: "Customer Post Code",
df3.columns[4]: "Delivery Number",
df3.columns[5]: "GroupTo Code",
df3.columns[6]: "GroupTo Name",
df3.columns[7]: "PGI Date",
df3.columns[8]: "Plant Code",
df3.columns[9]: "Plant Name",
df3.columns[10]: "Pricing Area",
df3.columns[11]: "Pricing Area Text",
df3.columns[12]: "Sales Organisation",
df3.columns[13]: "ShipTo Number"})
# dropping ALL duplicte values
clean_df3 = df3.drop_duplicates()
cleandf3 = clean_df3.rename(columns={"Classification Code": "Classification_Code",
"Classification Text": "Classification_Text",
"Customer Post Code": "Customer_Postcode",
"Country Code": "Country_Code",
"Delivery Number": "Delivery",
"GroupTo Code": "Group_To",
"GroupTo Name": "Group_To_Name",
"PGI Date": "PGI_Date",
"Plant Code": "Plant",
"Plant Name": "Plant_Name",
"Pricing Area": "Pricing_Area",
"Pricing Area Text": "Pricing_Area_Description",
"Sales Organisation": "Sales_Organisation",
"ShipTo Number": "ShipTo" })
# Join the tables
result1 = pandas.merge(cleandf1, cleandf2, how='left', on=['Delivery','ShipTo'])
result2 = pandas.merge(result1, cleandf3, how='left', on=['Delivery','ShipTo'])
# Check the data types
result2.dtypes
result2['Created'] = pandas.to_datetime(yesterday)
# Change the data types
result2 = result2.astype({'Score': 'str',
'Created': 'datetime64[ns]',
'Delivery': 'int64',
'ShipTo': 'int64',
'Comment':'str',
'Classification_Code':'str',
'Classification_Text':'str',
'Country_Code':'str',
'Customer_Postcode':'str',
'Group_To': 'float',
'Group_To_Name':'str',
'PGI_Date': 'int64',
'Plant':'int64',
'Plant_Name':'str',
'Pricing_Area':'str',
'Pricing_Area_Description':'str',
'Sales_Organisation': 'str'
})
# Add a column that will give us the channel
result2['Channel'] = 'SMS'
# Export to a csv
result2.to_csv(r'C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_2_'+today+'.csv', index = False)
schedule.every().day.at("09:00").do(typose)
有谁知道为什么我添加 def typose():?
时它不起作用
您的缩进似乎不正确
您的代码:
def typose():
today = datetime.today().strftime('%d%m%Y')
试试这个:
def typose():
today = datetime.today().strftime('%d%m%Y')
有可能是以下原因造成的问题!
- 缩进
您需要而不是执行以下操作
def typose():
today = datetime.today().strftime('%d%m%Y')
- 检查调用函数
尝试检查以下命令是否确实 运行 代码,您可以
通过做这个简单的测试来检查:
改变 schedule.every().day.at("09:00").do(typose)
至 typose()
如果有效,则 schedule.every().day.at("09:00").do(typose)
不会 运行 或调用函数 typose,因此如果它是 [=14=,则可以将其更改为 if 语句] 它 运行 是代码。
- 查看功能码
输入错误值时可能会出现问题,执行时没有错误,但无法正常工作!
如果这没有帮助,请告诉我
我有一些代码在我单独 运行 时有效,但是当我 运行 将它定义为函数时它不起作用。我没有收到任何错误,所以它 运行,但是它不会拉回最新文件或更新 CSV 文件,它只是说和前一天一样。
这是为了更新一份报告,之前同事 运行 正在使用它,但我自己无法使用它。下面的代码是有效的:
def typose():
today = datetime.today().strftime('%d%m%Y')
yesterday = datetime.now() - timedelta(1)
yesterday1 = yesterday.strftime('%d%m%Y')
###############################################################################
#################### ESTABLISH CONNECTION TO ESENDEX SFTP #####################
###############################################################################
# Open a transport
# host,port = "sftp.esendex.com",22
host,port = "10.132.0.1",22
transport = paramiko.Transport((host,port))
# Auth
username,password = "bocsurveys","lfxDmr4i"
transport.connect(None,username,password)
# Go!
sftp = paramiko.SFTPClient.from_transport(transport)
###############################################################################
######################## PICK UP THE FILE FOR THE SMS #########################
############################### FROM ESENDEX ##################################
# Download the SMS
filepathsms = "/FromEsendex/CX_Survey_SMS_output_2_"+today+".csv"
localpathsms = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_2_"+today+".csv"
sftp.get(filepathsms ,localpathsms)
filepathsms2 = "/FromEsendex/CX_Survey_SMS_output_1_"+yesterday1+".csv"
localpathsms2 = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_1_"+yesterday1+".csv"
sftp.get(filepathsms2 ,localpathsms2)
filename = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_2_"+today+".csv"
filename2 = "C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_1_"+yesterday1+".csv"
###############################################################################
################## CREATING ONE RECORD PER DELIVERY NUMBER ####################
###############################################################################
##df1 = pandas.read_csv(filename,
## usecols= ['Question Label','Answer Label',
## 'Answer DateTime','Delivery Number',
## 'ShipTo Number'], encoding= 'unicode_escape')
df1 = pandas.read_csv(filename, usecols =[2,4,5,12,23],
encoding= 'unicode_escape')
df1 = df1.rename(columns= {df1.columns[0]: "Question Label",
df1.columns[1]: "Answer Label",
df1.columns[2]: "Answer DateTime",
df1.columns[3]: "Delivery Number",
df1.columns[4]: "ShipTo Number"})
# Filter only the records with scores
clean_data1 = df1[df1['Question Label'] != 2]
clean_data1 = clean_data1[clean_data1["Question Label"].notnull()]
clean_data2 = clean_data1[clean_data1['Answer Label'] != 'Error']
clean_df1 = pandas.DataFrame(clean_data2,
columns = ['Answer Label',
'Answer DateTime',
'Delivery Number',
'ShipTo Number'])
# Rename the columns
cleandf1 = clean_df1.rename(columns={"Answer Label": "Score",
"Answer DateTime": "Created",
"Delivery Number": "Delivery",
"ShipTo Number": "ShipTo" })
##df2 = pandas.read_csv(filename,
## usecols= ['Question Label',
## 'Answer DateTime',
## 'Answer Text',
## 'Delivery Number',
## 'ShipTo Number'], encoding= 'unicode_escape')
df2 = pandas.read_csv(filename, usecols =[2,5,6,12,23],
encoding= 'unicode_escape')
df2 = df2.rename(columns= {df2.columns[0]: "Question Label",
df2.columns[1]: "Answer DateTime",
df2.columns[2]: "Answer Text",
df2.columns[3]: "Delivery Number",
df2.columns[4]: "ShipTo Number"})
# Filter only the records with comments
clean_data3 = df2[df2['Question Label'] != 1]
clean_data3 = clean_data3[clean_data3["Question Label"].notnull()]
clean_df2 = pandas.DataFrame(clean_data3,
columns = ['Answer Text',
'Delivery Number',
'ShipTo Number'])
# Rename the columns
cleandf2 = clean_df2.rename(columns={"Answer Text": "Comment",
"Delivery Number": "Delivery",
"ShipTo Number": "ShipTo" })
## df3 = pandas.read_csv(filename,
## usecols= ['Classification Code','Classification Text',
## 'Country Code',
## 'Customer Post Code','Delivery Number',
## 'GroupTo Code','GroupTo Name',
## 'PGI Date','Plant Code',
## 'Plant Name','Pricing Area',
## 'Pricing Area Text','Sales Organisation',
## 'ShipTo Number'], encoding= 'unicode_escape')
df3 = pandas.read_csv(filename,
usecols= [7,8,9,11,12,13,14,16,17,18,19,20,22,23], encoding= 'unicode_escape')
df3 = df3.rename(columns = {df3.columns[0]: "Classification Code",
df3.columns[1]: "Classification Text",
df3.columns[2]: "Country Code",
df3.columns[3]: "Customer Post Code",
df3.columns[4]: "Delivery Number",
df3.columns[5]: "GroupTo Code",
df3.columns[6]: "GroupTo Name",
df3.columns[7]: "PGI Date",
df3.columns[8]: "Plant Code",
df3.columns[9]: "Plant Name",
df3.columns[10]: "Pricing Area",
df3.columns[11]: "Pricing Area Text",
df3.columns[12]: "Sales Organisation",
df3.columns[13]: "ShipTo Number"})
# dropping ALL duplicte values
clean_df3 = df3.drop_duplicates()
cleandf3 = clean_df3.rename(columns={"Classification Code": "Classification_Code",
"Classification Text": "Classification_Text",
"Customer Post Code": "Customer_Postcode",
"Country Code": "Country_Code",
"Delivery Number": "Delivery",
"GroupTo Code": "Group_To",
"GroupTo Name": "Group_To_Name",
"PGI Date": "PGI_Date",
"Plant Code": "Plant",
"Plant Name": "Plant_Name",
"Pricing Area": "Pricing_Area",
"Pricing Area Text": "Pricing_Area_Description",
"Sales Organisation": "Sales_Organisation",
"ShipTo Number": "ShipTo" })
# Join the tables
result1 = pandas.merge(cleandf1, cleandf2, how='left', on=['Delivery','ShipTo'])
result2 = pandas.merge(result1, cleandf3, how='left', on=['Delivery','ShipTo'])
# Check the data types
result2.dtypes
result2['Created'] = pandas.to_datetime(yesterday)
# Change the data types
result2 = result2.astype({'Score': 'str',
'Created': 'datetime64[ns]',
'Delivery': 'int64',
'ShipTo': 'int64',
'Comment':'str',
'Classification_Code':'str',
'Classification_Text':'str',
'Country_Code':'str',
'Customer_Postcode':'str',
'Group_To': 'float',
'Group_To_Name':'str',
'PGI_Date': 'int64',
'Plant':'int64',
'Plant_Name':'str',
'Pricing_Area':'str',
'Pricing_Area_Description':'str',
'Sales_Organisation': 'str'
})
# Add a column that will give us the channel
result2['Channel'] = 'SMS'
# Export to a csv
result2.to_csv(r'C:/Users/l0ad06/Desktop/Daily Feedback from Esendex/CX_Survey_SMS_output_2_'+today+'.csv', index = False)
schedule.every().day.at("09:00").do(typose)
有谁知道为什么我添加 def typose():?
您的缩进似乎不正确
您的代码:
def typose():
today = datetime.today().strftime('%d%m%Y')
试试这个:
def typose():
today = datetime.today().strftime('%d%m%Y')
有可能是以下原因造成的问题!
- 缩进
您需要而不是执行以下操作
def typose():
today = datetime.today().strftime('%d%m%Y')
- 检查调用函数
尝试检查以下命令是否确实 运行 代码,您可以
通过做这个简单的测试来检查:
改变 schedule.every().day.at("09:00").do(typose)
至 typose()
如果有效,则 schedule.every().day.at("09:00").do(typose)
不会 运行 或调用函数 typose,因此如果它是 [=14=,则可以将其更改为 if 语句] 它 运行 是代码。
- 查看功能码
输入错误值时可能会出现问题,执行时没有错误,但无法正常工作!
如果这没有帮助,请告诉我