Python - 副本错误缩进
Python - copies Bad indent
这里需要一点帮助
我有一个 python 脚本可以复制和编辑 txt 文件
但我对缩进有疑问
而且我不确定如何修复它
我通常使用的缩进是 indent=4
并且在大多数情况下它从未失败过
现在我正在处理较小的文件并且文件有不同的缩进,所以当它复制文件进行编辑时它删除了 2 行
原始文件以 6 个缩进开始,并随着缩进而增加和减少
这里是原图
{
......"unid": 100, - indent 6
"awar": false,
"upst": [
........{ - indent 8
.........."lvls": [ - indent 10
............{ - indent 12
.............."ulpf": true, - indent 14
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 5214,
"lzsid": 24434,
"livid": 214867,
"isra": false
}
这是结果
Top of Market
{
...."unid": 1, - indent 4
"awar": false,
"upst": [
........{ - indent 8
............"lvls": [ - indent 12
................{
"ulpf": true, - indent 16
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "Marks1",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 125,
"lzsid": 24434,
"livid": 214867,
"isra": false
},
Bottom of Market
所以当我将缩进增加到 6 时,它会将所有代码放在正确的位置,但现在这部分也增加了 2
这是6的结果
Top of Market
{
"unid": 1,
"awar": false,
"upst": [
{
"lvls": [
{
"ulpf": true, --- as you can see here it's moved over a ton
"fsg": [ moved over 24 indents
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "Marks1",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 125,
"lzsid": 24434,
"livid": 214867,
"isra": false
},
Bottom of Market
解决此问题的最佳方法是什么
这是脚本
import shutil
import glob
import json
import re
import asyncio
import sys
from asyncore import loop
unit_to_paid = {"(1)": 125,
"(2)": 124}
async def update_file(file, arg):
f = open(file, "r")
text = f.read()
data = json.loads(text)
num = re.search(r"\(([^()]+)\)", file)
data["unid"] = int(num.group(1))
data["paid"] = unit_to_paid[num.group(0)]
data["nupl"]["txt"] = f'Marks{num.group(1)}'
f.close()
f = open(file, "w")
f.truncate(0)
json.dump(data, f, indent=4) ------ here is the indent
f.write(", \nBottom of Market")
f.close()
f1 = open(file, "r+")
content = f1.read()
f1.seek(0, 0)
f1.write("Top of Market" + '\n' + content)
f1.close()
async def get_update_files(arg):
# Recursively grabs the files
files = glob.glob('New folder 2/**/*.txt', recursive=True)
# Creates a list of tasks to run concurrently
tasks = []
for file in files:
tasks.append(asyncio.create_task(update_file(file, arg)))
t = asyncio.gather(*tasks)
return t
def main():
args = sys.argv[1:]
# Deletes New folder 2 if it exists
shutil.rmtree('New folder 2', ignore_errors=True)
# Copies existing files into New folder 2
shutil.copytree('New folder 1', 'New folder 2')
# Will get each file and update them concurrently
if len(args) == 0:
asyncio.get_event_loop().run_until_complete(get_update_files("all"))
else:
asyncio.get_event_loop().run_until_complete(get_update_files(args[0]))
main()
我尝试了一些方法但没有任何效果我什至删除了这部分以便编辑原件
# Deletes New folder 2 if it exists
shutil.rmtree('New folder 2', ignore_errors=True)
# Copies existing files into New folder 2
shutil.copytree('New folder 1', 'New folder 2')
并更新了这个 files = glob.glob('New folder 1/**/*.txt', recursive=True)
更新结果
Empty row
Empty row
Empty row
Empty row
Empty row
Empty row ......................1000 indents................ Top Market
{
......"unid": 1,
"awar": false,
"upst": [
........{
.........."lvls": [
............{
.............."ulpf": true,
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 5214,
"lzsid": 24434,
"livid": 214867,
"isra": false
} < this shifted over with 0 indents
Python 的标准 json
模块,与大多数 JSON 编码器一样,不提供对空白的精细控制。如果您需要它,那么您可能无法使用 json.dump
,至少对于整个 data
。相反,编写您自己的代码来生成您想要的 JSON,在可能的情况下返回 json.dump
用于子组件。
可能是我在评论中表达得不够清楚。我的想法是首先生成缩进=2的文件,它会给出结果:
{
.."unid": 100, - indent 2
"awar": false,
"upst": [
....{ - indent 4
......"lvls": [ - indent 6
........{ - indent 8
.........."ulpf": true, - indent 10
"fsg": [
0,
然后你再添加 4 个 spaces 到所有已经以 space 开头的行(它不包括包含 {
和 }
的行开始,所以它们不会移动):
{
......"unid": 100, - indent 2 + 4 = 6
"awar": false,
"upst": [
........{ - indent 4 + 4 = 8
.........."lvls": [ - indent 6 + 4 = 10
............{ - indent 8 + 4 = 12
.............."ulpf": true, - indent 10 + 4 = 14
"fsg": [
0,
更新
另外,对同一个文件多次open/read/close是没有效果的,最好读取一次内容,按预期格式化,然后写回结果。这是一个示例,您可以如何修改您的
update_file实现方法:
async def update_file(file, arg):
f = open(file, "r+")
text = f.read()
data = json.loads(text)
num = re.search(r"\(([^()]+)\)", file)
data["unid"] = int(num.group(1))
data["paid"] = unit_to_paid[num.group(0)]
data["nupl"]["txt"] = f'Marks{num.group(1)}'
# Format the json into a string with indent 2
s = json.dumps(data, indent=2)
# Replace the starting space on each line with 5 spaces
# This effectively adds 4 to all indents
s = re.sub(r'^ ', r' ', s, 0, re.MULTILINE)
# Add the header and the footer
s = "Top of Market\n" + s + ", \nBottom of Market"
# Replace the file's content with the updated json
f.truncate(0)
f.write(s)
f.close()
当我 运行 您输入这段代码时,我得到以下输出:
Top of Market
{
"unid": 1,
"awar": false,
"upst": [
{
"lvls": [
{
"ulpf": true,
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "Marks1",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 125,
"lzsid": 24434,
"livid": 214867,
"isra": false
},
Bottom of Market
这里需要一点帮助
我有一个 python 脚本可以复制和编辑 txt 文件
但我对缩进有疑问
而且我不确定如何修复它
我通常使用的缩进是 indent=4
并且在大多数情况下它从未失败过
现在我正在处理较小的文件并且文件有不同的缩进,所以当它复制文件进行编辑时它删除了 2 行
原始文件以 6 个缩进开始,并随着缩进而增加和减少
这里是原图
{
......"unid": 100, - indent 6
"awar": false,
"upst": [
........{ - indent 8
.........."lvls": [ - indent 10
............{ - indent 12
.............."ulpf": true, - indent 14
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 5214,
"lzsid": 24434,
"livid": 214867,
"isra": false
}
这是结果
Top of Market
{
...."unid": 1, - indent 4
"awar": false,
"upst": [
........{ - indent 8
............"lvls": [ - indent 12
................{
"ulpf": true, - indent 16
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "Marks1",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 125,
"lzsid": 24434,
"livid": 214867,
"isra": false
},
Bottom of Market
所以当我将缩进增加到 6 时,它会将所有代码放在正确的位置,但现在这部分也增加了 2
这是6的结果
Top of Market
{
"unid": 1,
"awar": false,
"upst": [
{
"lvls": [
{
"ulpf": true, --- as you can see here it's moved over a ton
"fsg": [ moved over 24 indents
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "Marks1",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 125,
"lzsid": 24434,
"livid": 214867,
"isra": false
},
Bottom of Market
解决此问题的最佳方法是什么
这是脚本
import shutil
import glob
import json
import re
import asyncio
import sys
from asyncore import loop
unit_to_paid = {"(1)": 125,
"(2)": 124}
async def update_file(file, arg):
f = open(file, "r")
text = f.read()
data = json.loads(text)
num = re.search(r"\(([^()]+)\)", file)
data["unid"] = int(num.group(1))
data["paid"] = unit_to_paid[num.group(0)]
data["nupl"]["txt"] = f'Marks{num.group(1)}'
f.close()
f = open(file, "w")
f.truncate(0)
json.dump(data, f, indent=4) ------ here is the indent
f.write(", \nBottom of Market")
f.close()
f1 = open(file, "r+")
content = f1.read()
f1.seek(0, 0)
f1.write("Top of Market" + '\n' + content)
f1.close()
async def get_update_files(arg):
# Recursively grabs the files
files = glob.glob('New folder 2/**/*.txt', recursive=True)
# Creates a list of tasks to run concurrently
tasks = []
for file in files:
tasks.append(asyncio.create_task(update_file(file, arg)))
t = asyncio.gather(*tasks)
return t
def main():
args = sys.argv[1:]
# Deletes New folder 2 if it exists
shutil.rmtree('New folder 2', ignore_errors=True)
# Copies existing files into New folder 2
shutil.copytree('New folder 1', 'New folder 2')
# Will get each file and update them concurrently
if len(args) == 0:
asyncio.get_event_loop().run_until_complete(get_update_files("all"))
else:
asyncio.get_event_loop().run_until_complete(get_update_files(args[0]))
main()
我尝试了一些方法但没有任何效果我什至删除了这部分以便编辑原件
# Deletes New folder 2 if it exists
shutil.rmtree('New folder 2', ignore_errors=True)
# Copies existing files into New folder 2
shutil.copytree('New folder 1', 'New folder 2')
并更新了这个 files = glob.glob('New folder 1/**/*.txt', recursive=True)
更新结果
Empty row
Empty row
Empty row
Empty row
Empty row
Empty row ......................1000 indents................ Top Market
{
......"unid": 1,
"awar": false,
"upst": [
........{
.........."lvls": [
............{
.............."ulpf": true,
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 5214,
"lzsid": 24434,
"livid": 214867,
"isra": false
} < this shifted over with 0 indents
Python 的标准 json
模块,与大多数 JSON 编码器一样,不提供对空白的精细控制。如果您需要它,那么您可能无法使用 json.dump
,至少对于整个 data
。相反,编写您自己的代码来生成您想要的 JSON,在可能的情况下返回 json.dump
用于子组件。
可能是我在评论中表达得不够清楚。我的想法是首先生成缩进=2的文件,它会给出结果:
{
.."unid": 100, - indent 2
"awar": false,
"upst": [
....{ - indent 4
......"lvls": [ - indent 6
........{ - indent 8
.........."ulpf": true, - indent 10
"fsg": [
0,
然后你再添加 4 个 spaces 到所有已经以 space 开头的行(它不包括包含 {
和 }
的行开始,所以它们不会移动):
{
......"unid": 100, - indent 2 + 4 = 6
"awar": false,
"upst": [
........{ - indent 4 + 4 = 8
.........."lvls": [ - indent 6 + 4 = 10
............{ - indent 8 + 4 = 12
.............."ulpf": true, - indent 10 + 4 = 14
"fsg": [
0,
更新
另外,对同一个文件多次open/read/close是没有效果的,最好读取一次内容,按预期格式化,然后写回结果。这是一个示例,您可以如何修改您的 update_file实现方法:
async def update_file(file, arg):
f = open(file, "r+")
text = f.read()
data = json.loads(text)
num = re.search(r"\(([^()]+)\)", file)
data["unid"] = int(num.group(1))
data["paid"] = unit_to_paid[num.group(0)]
data["nupl"]["txt"] = f'Marks{num.group(1)}'
# Format the json into a string with indent 2
s = json.dumps(data, indent=2)
# Replace the starting space on each line with 5 spaces
# This effectively adds 4 to all indents
s = re.sub(r'^ ', r' ', s, 0, re.MULTILINE)
# Add the header and the footer
s = "Top of Market\n" + s + ", \nBottom of Market"
# Replace the file's content with the updated json
f.truncate(0)
f.write(s)
f.close()
当我 运行 您输入这段代码时,我得到以下输出:
Top of Market
{
"unid": 1,
"awar": false,
"upst": [
{
"lvls": [
{
"ulpf": true,
"fsg": [
0,
0,
0,
0,
0
]
}
]
}
],
"nupl": {
"txt": "Marks1",
"cpf": true
},
"esup": false,
"elcl": 0,
"gacp": false,
"paid": 125,
"lzsid": 24434,
"livid": 214867,
"isra": false
},
Bottom of Market