json python 处理每个文件中具有不同 id 的两个 csv 文件时遇到问题
Having trouble processing two csv files that has different id's in each file with json python
我有两个 csv 文件,我想打开它们并从两个 csv 文件中获取唯一 ID。每个 id 都有我想要获取的特定日期并减去这些日期。问题是我需要从一个 csv 中获取一个日期,从另一个 csv 中获取另一个日期。这两个 csv 文件都有一些我不需要的数据,所以在我的代码中我创建了新的字典并从两个 csv 文件中提取了这些日期并将它们转储到一个新的 JSON 文件中。我想指出的是,在一个 csv 文件中,唯一 ID 聚集在 csv 文件的中间,而其他唯一 ID 聚集在最后,所以当 运行 代码时,我注意到代码不断重新启动到 csv 的顶部,并且不会继续检查 csv 文件中的唯一 ID。
代码在函数 make_json() 中运行,它获取我想要的数据并减去 CSV 中的两个日期,然后将它们转储到 JSON 文件中。另一个名为 getMarginDict() 的函数来自另一个 CSV 文件,该文件中也有日期。唯一的区别是我需要减去的日期在另一个 CSV 文件中。我需要从 csv1 中获取日期并从 csv2 中减去日期,但如上所述,代码一直在顶部进行处理。我相信这是因为文件正在重新打开。有没有办法让代码继续处理唯一 ID 到它停止的地方。最后,代码在检查第一个唯一 ID 时有效,但随后重新启动。
这是最终的 JSON 文件的样子:如您所见,最后三个名为“新日期”的字段计算不正确,但前两个是。
{"Name": "1", "Unique ID": "1357", "Date Finish": "06/19/2024", "Date Completed": "06/19/2024", "Date Left": 775, "Date Completed-Today's Date": 775, "New Date": 43}
{"Name": "2", "Unique ID": "1357", "Date Finish": "06/19/2024", "Date Completed": "06/19/2024", "Date Left": 775, "Date Completed-Today's Date": 775, "New Date": 43}
{"Name": "3", "Unique ID": "7531", "Date Finish": "12/25/2024", "Date Completed": "12/25/2024", "Date Left": 964, "Date Completed-Today's Date": 964, "New Date": -146}
{"Name": "4", "Unique ID": "7531", "Date Finish": "12/25/2024", "Date Completed": "12/25/2024", "Date Left": 964, "Date Completed-Today's Date": 964, "New Date": -146}
{"Name": "5", "Unique ID": "7531", "Date Finish": "12/25/2024", "Date Completed": "12/25/2024", "Date Left": 964, "Date Completed-Today's Date": 964, "New Date": -146}
data = {}
newData = []
marginData = {}
def incrementSV(data,x):
data["Name"] = str(x)
return x
def createNewDict(data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish):
return data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish
def createNewDict(data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish):
data["Unique ID"] = IDNum
data["Date Finish"] = endDate
data["Date Completed"] = latefinish
data["Days Left"] = diffBaseline.days
data["Date Completed-Today's Date"] = diffLateFinish.days
def getMarginDict(endDateNew):
with open(csvFilePathIMS, encoding='utf-8-sig') as csvf:
csvReaderGR = csv.DictReader(csvf)
for newsRows in csvReaderGR:
uuid = newsRows["Unique ID"]
laterDate= newsRows["Finish"]
laterDateNew= datetime.datetime.strptime(laterDate, '%m/%d/%Y')
dateNew= laterDateNewNew.date() - endDateNew9.date()
if int(uuid) == 1234:
marginData["New Date"] = dateNew.days
return marginData
elif int(uuid) == 4321:
marginData["New Date"] = dateNew.days
return marginData
def make_json(csvFilePath,jsonFilePath):
es = Elasticsearch("localhost",)
# Open a csv reader called DictReader
with open(csvFilePath, encoding='utf-8-sig') as csvf:
csvReader = csv.DictReader(csvf)
# Convert each row into a dictionary
# and add it to data
for rows in csvReader:
IDNum = rows['Unique ID']
endDate = rows['End Date']
latefinish = rows['Date Completed']
endDateNew= datetime.datetime.strptime(endDate, '%m/%d/%Y')
diffBaseline = endDateNew.date() - datetime.date.today()
newLatefinishDate = datetime.datetime.strptime(latefinish, '%m/%d/%Y')
diffLateFinish = newLatefinishDate.date() - datetime.date.today()
imsData = partial(createNewDict, data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish)
if int(IDNum) == 1357:
for x in range(1,3):
incrementSV(data,x)
imsData()
getMarginDict(endDateNew)
totalIMS = dict(data, **marginData)
newData.append(dict(totalIMS))
elif int(IDNum) == 7531:
for x in range(3,6):
incrementSV(data,x)
imsData()
getMarginDict(endDateNew)
totalIMS = dict(data, **marginData)
newData.append(dict(totalIMS))
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
for item in newData:
x = json.dump(item,jsonf)
jsonf.write('\n')
csvFilePath = "location of 1st csv file"
jsonFilePath = "location of JSON file"
csvFilePathIMS = 'location of 2nd csv file'
# Call the make_json function
make_json(csvFilePath,jsonFilePath)
每次调用 getMarginDict 时,您都在使用 with open(csvFilePathIMS, encoding='utf-8-sig') as csvf:
打开和读取 csv 文件。由于这是一个上下文管理器,当您退出“with open...”语句的范围时,文件会自动关闭。
听起来你打算做的是打开文件一次并阅读它 line-by-line。为此,您可以使用 csvReaderGR = csv.DictReader(open(csvFilePathIMS, encoding='utf-8-sig'))
打开文件并重复使用此实例。
我有两个 csv 文件,我想打开它们并从两个 csv 文件中获取唯一 ID。每个 id 都有我想要获取的特定日期并减去这些日期。问题是我需要从一个 csv 中获取一个日期,从另一个 csv 中获取另一个日期。这两个 csv 文件都有一些我不需要的数据,所以在我的代码中我创建了新的字典并从两个 csv 文件中提取了这些日期并将它们转储到一个新的 JSON 文件中。我想指出的是,在一个 csv 文件中,唯一 ID 聚集在 csv 文件的中间,而其他唯一 ID 聚集在最后,所以当 运行 代码时,我注意到代码不断重新启动到 csv 的顶部,并且不会继续检查 csv 文件中的唯一 ID。
代码在函数 make_json() 中运行,它获取我想要的数据并减去 CSV 中的两个日期,然后将它们转储到 JSON 文件中。另一个名为 getMarginDict() 的函数来自另一个 CSV 文件,该文件中也有日期。唯一的区别是我需要减去的日期在另一个 CSV 文件中。我需要从 csv1 中获取日期并从 csv2 中减去日期,但如上所述,代码一直在顶部进行处理。我相信这是因为文件正在重新打开。有没有办法让代码继续处理唯一 ID 到它停止的地方。最后,代码在检查第一个唯一 ID 时有效,但随后重新启动。
这是最终的 JSON 文件的样子:如您所见,最后三个名为“新日期”的字段计算不正确,但前两个是。
{"Name": "1", "Unique ID": "1357", "Date Finish": "06/19/2024", "Date Completed": "06/19/2024", "Date Left": 775, "Date Completed-Today's Date": 775, "New Date": 43}
{"Name": "2", "Unique ID": "1357", "Date Finish": "06/19/2024", "Date Completed": "06/19/2024", "Date Left": 775, "Date Completed-Today's Date": 775, "New Date": 43}
{"Name": "3", "Unique ID": "7531", "Date Finish": "12/25/2024", "Date Completed": "12/25/2024", "Date Left": 964, "Date Completed-Today's Date": 964, "New Date": -146}
{"Name": "4", "Unique ID": "7531", "Date Finish": "12/25/2024", "Date Completed": "12/25/2024", "Date Left": 964, "Date Completed-Today's Date": 964, "New Date": -146}
{"Name": "5", "Unique ID": "7531", "Date Finish": "12/25/2024", "Date Completed": "12/25/2024", "Date Left": 964, "Date Completed-Today's Date": 964, "New Date": -146}
data = {}
newData = []
marginData = {}
def incrementSV(data,x):
data["Name"] = str(x)
return x
def createNewDict(data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish):
return data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish
def createNewDict(data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish):
data["Unique ID"] = IDNum
data["Date Finish"] = endDate
data["Date Completed"] = latefinish
data["Days Left"] = diffBaseline.days
data["Date Completed-Today's Date"] = diffLateFinish.days
def getMarginDict(endDateNew):
with open(csvFilePathIMS, encoding='utf-8-sig') as csvf:
csvReaderGR = csv.DictReader(csvf)
for newsRows in csvReaderGR:
uuid = newsRows["Unique ID"]
laterDate= newsRows["Finish"]
laterDateNew= datetime.datetime.strptime(laterDate, '%m/%d/%Y')
dateNew= laterDateNewNew.date() - endDateNew9.date()
if int(uuid) == 1234:
marginData["New Date"] = dateNew.days
return marginData
elif int(uuid) == 4321:
marginData["New Date"] = dateNew.days
return marginData
def make_json(csvFilePath,jsonFilePath):
es = Elasticsearch("localhost",)
# Open a csv reader called DictReader
with open(csvFilePath, encoding='utf-8-sig') as csvf:
csvReader = csv.DictReader(csvf)
# Convert each row into a dictionary
# and add it to data
for rows in csvReader:
IDNum = rows['Unique ID']
endDate = rows['End Date']
latefinish = rows['Date Completed']
endDateNew= datetime.datetime.strptime(endDate, '%m/%d/%Y')
diffBaseline = endDateNew.date() - datetime.date.today()
newLatefinishDate = datetime.datetime.strptime(latefinish, '%m/%d/%Y')
diffLateFinish = newLatefinishDate.date() - datetime.date.today()
imsData = partial(createNewDict, data, IDNum, endDate, latefinish, diffBaseline, diffLateFinish)
if int(IDNum) == 1357:
for x in range(1,3):
incrementSV(data,x)
imsData()
getMarginDict(endDateNew)
totalIMS = dict(data, **marginData)
newData.append(dict(totalIMS))
elif int(IDNum) == 7531:
for x in range(3,6):
incrementSV(data,x)
imsData()
getMarginDict(endDateNew)
totalIMS = dict(data, **marginData)
newData.append(dict(totalIMS))
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
for item in newData:
x = json.dump(item,jsonf)
jsonf.write('\n')
csvFilePath = "location of 1st csv file"
jsonFilePath = "location of JSON file"
csvFilePathIMS = 'location of 2nd csv file'
# Call the make_json function
make_json(csvFilePath,jsonFilePath)
每次调用 getMarginDict 时,您都在使用 with open(csvFilePathIMS, encoding='utf-8-sig') as csvf:
打开和读取 csv 文件。由于这是一个上下文管理器,当您退出“with open...”语句的范围时,文件会自动关闭。
听起来你打算做的是打开文件一次并阅读它 line-by-line。为此,您可以使用 csvReaderGR = csv.DictReader(open(csvFilePathIMS, encoding='utf-8-sig'))
打开文件并重复使用此实例。