运行 Python 子目录脚本
Run Python Script on Subdirectories
我有一个包含子目录的父目录,每个子目录都包含一个 .html 文件,我想在其中 运行 我的代码。这需要一个 html 文件,并将导出一个包含 table 数据的相应 csv 文件。
我尝试了两种主要方法,但都无法正常工作,因为它无法相应地找到 .html 文件(不存在)。
注意:子目录中每个文件的名称将始终为index.html
Linux命令行(基于代码1)
for file in */; do for file in *.html; do python html_csv2.py "$file"; done; done
代码 1:
name = 'index.html'
html = utils.getFileContent(name)
#Get data from file
doc = SimplifiedDoc(html)
soup = bs(html, 'lxml')
title = (soup.select_one('title').text)
title = title.split(' -')
strain = title[0]
rows = []
tables = doc.selects('table.region-table')
tables = tables[:-1]
#print (type(tables))
for table in tables:
trs = table.tbody.trs
for tr in trs:
rows.append([td.text for td in tr.tds])
#print(rows)
#print(type(rows))
#print("PANDAS DATAFRAME")
df_rows = pd.DataFrame(rows)
df_rows.columns = ['Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']
df_rows['Strain'] = strain
df_rows = df_rows[['Strain','Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']]
#print(df_rows)
df_rows.to_csv (r'antismash_html.csv', index = False, header=True)
print('CSV CREATED')
在第二个片段中,我尝试使用 os 库相应地进入每个子目录。
代码 2:
import csv
from simplified_scrapy import SimplifiedDoc,req,utils
import sys
import pandas as pd
import lxml.html
from bs4 import BeautifulSoup as bs
import os
name = 'index.html'
html = utils.getFileContent(name)
# Get data from file
doc = SimplifiedDoc(html)
soup = bs(html, 'lxml')
cwd = os.getcwd()
print(cwd)
directory_to_check = cwd # Which directory do you want to start with?
def directory_function(directory):
print("Listing: " + directory)
print("\t-" + "\n\t-".join(os.listdir("."))) # List current working directory
# Get all the subdirectories of directory_to_check recursively and store them in a list:
directories = [os.path.abspath(x[0]) for x in os.walk(directory_to_check)]
directories.remove(os.path.abspath(directory_to_check)) #Dont' want it done in my main directory
def csv_create(name):
title = (soup.select_one('title').text)
title = title.split(' -')
strain = title[0]
rows = []
tables = doc.selects('table.region-table')
tables = tables[:-1]
#print (type(tables))
for table in tables:
trs = table.tbody.trs
for tr in trs:
rows.append([td.text for td in tr.tds])
#print(rows)
#print(type(rows))
#print("PANDAS DATAFRAME")
df_rows = pd.DataFrame(rows)
df_rows.columns = ['Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']
df_rows['Strain'] = strain
df_rows = df_rows[['Strain','Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']]
#print(df_rows)
df_rows.to_csv (r'antismash_html.csv', index = False, header=True)
print('CSV CREATED')
#with open(name +'.csv','w',encoding='utf-8') as f:
# csv_writer = csv.writer(f)
# csv_writer.writerows(rows)
for i in directories:
os.chdir(i) # Change working Directory
csv_create(name) # Run your function
directory_function
#csv_create(name)
我尝试使用此处的示例: 但无法相应地执行。
或者,您可以考虑使用 glob.glob()
。但请注意通过在 glob 表达式中指定路径或 cd 进入文件夹来从您打算搜索的文件夹进行搜索。
glob 会给你一个简单的相对路径列表。
>>> import glob
>>>
>>> files = glob.glob('**/*.py', recursive=True)
>>> len(files)
3177
>>> files[0]
'_wxWidgets-3.0.2/build/bakefiles/wxwin.py'
>>>
文档在这里有一些 glob 表达式示例:https://docs.python.org/3.5/library/glob.html
如果您从一个包含大量嵌套子文件夹的文件夹开始对您的驱动器进行递归搜索,它将使解释器停滞不前,直到它完成 - 或者您终止会话。
试试这个。
import os
from simplified_scrapy import utils
def getSubDir(name,end=None):
filelist = os.listdir(name)
if end:
filelist = [os.path.join(name,l) for l in filelist if l.endsWith(end)]
return filelist
subDir = getSubDir('./') # The directory which you want to start with
for dir in subDir:
# files = getSubDir(dir,end='index.html')
fileName = dir+'/index.html'
if not os.path.isfile(fileName): continue
html = utils.getFileContent(fileName)
我有一个包含子目录的父目录,每个子目录都包含一个 .html 文件,我想在其中 运行 我的代码。这需要一个 html 文件,并将导出一个包含 table 数据的相应 csv 文件。
我尝试了两种主要方法,但都无法正常工作,因为它无法相应地找到 .html 文件(不存在)。 注意:子目录中每个文件的名称将始终为index.html
Linux命令行(基于代码1)
for file in */; do for file in *.html; do python html_csv2.py "$file"; done; done
代码 1:
name = 'index.html'
html = utils.getFileContent(name)
#Get data from file
doc = SimplifiedDoc(html)
soup = bs(html, 'lxml')
title = (soup.select_one('title').text)
title = title.split(' -')
strain = title[0]
rows = []
tables = doc.selects('table.region-table')
tables = tables[:-1]
#print (type(tables))
for table in tables:
trs = table.tbody.trs
for tr in trs:
rows.append([td.text for td in tr.tds])
#print(rows)
#print(type(rows))
#print("PANDAS DATAFRAME")
df_rows = pd.DataFrame(rows)
df_rows.columns = ['Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']
df_rows['Strain'] = strain
df_rows = df_rows[['Strain','Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']]
#print(df_rows)
df_rows.to_csv (r'antismash_html.csv', index = False, header=True)
print('CSV CREATED')
在第二个片段中,我尝试使用 os 库相应地进入每个子目录。
代码 2:
import csv
from simplified_scrapy import SimplifiedDoc,req,utils
import sys
import pandas as pd
import lxml.html
from bs4 import BeautifulSoup as bs
import os
name = 'index.html'
html = utils.getFileContent(name)
# Get data from file
doc = SimplifiedDoc(html)
soup = bs(html, 'lxml')
cwd = os.getcwd()
print(cwd)
directory_to_check = cwd # Which directory do you want to start with?
def directory_function(directory):
print("Listing: " + directory)
print("\t-" + "\n\t-".join(os.listdir("."))) # List current working directory
# Get all the subdirectories of directory_to_check recursively and store them in a list:
directories = [os.path.abspath(x[0]) for x in os.walk(directory_to_check)]
directories.remove(os.path.abspath(directory_to_check)) #Dont' want it done in my main directory
def csv_create(name):
title = (soup.select_one('title').text)
title = title.split(' -')
strain = title[0]
rows = []
tables = doc.selects('table.region-table')
tables = tables[:-1]
#print (type(tables))
for table in tables:
trs = table.tbody.trs
for tr in trs:
rows.append([td.text for td in tr.tds])
#print(rows)
#print(type(rows))
#print("PANDAS DATAFRAME")
df_rows = pd.DataFrame(rows)
df_rows.columns = ['Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']
df_rows['Strain'] = strain
df_rows = df_rows[['Strain','Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']]
#print(df_rows)
df_rows.to_csv (r'antismash_html.csv', index = False, header=True)
print('CSV CREATED')
#with open(name +'.csv','w',encoding='utf-8') as f:
# csv_writer = csv.writer(f)
# csv_writer.writerows(rows)
for i in directories:
os.chdir(i) # Change working Directory
csv_create(name) # Run your function
directory_function
#csv_create(name)
我尝试使用此处的示例:
或者,您可以考虑使用 glob.glob()
。但请注意通过在 glob 表达式中指定路径或 cd 进入文件夹来从您打算搜索的文件夹进行搜索。
glob 会给你一个简单的相对路径列表。
>>> import glob
>>>
>>> files = glob.glob('**/*.py', recursive=True)
>>> len(files)
3177
>>> files[0]
'_wxWidgets-3.0.2/build/bakefiles/wxwin.py'
>>>
文档在这里有一些 glob 表达式示例:https://docs.python.org/3.5/library/glob.html
如果您从一个包含大量嵌套子文件夹的文件夹开始对您的驱动器进行递归搜索,它将使解释器停滞不前,直到它完成 - 或者您终止会话。
试试这个。
import os
from simplified_scrapy import utils
def getSubDir(name,end=None):
filelist = os.listdir(name)
if end:
filelist = [os.path.join(name,l) for l in filelist if l.endsWith(end)]
return filelist
subDir = getSubDir('./') # The directory which you want to start with
for dir in subDir:
# files = getSubDir(dir,end='index.html')
fileName = dir+'/index.html'
if not os.path.isfile(fileName): continue
html = utils.getFileContent(fileName)