I have a parent directory with subdirectories, each one of these contains a .html file on which I want to run my code. This takes an html file and will export a respective csv file with table data.
I have tried two main approaches but neither work appropriately because it's not able to find the .html file accordingly (non-existent). Note: The name for each file in sub directory will always be index.html
for file in */; do for file in *.html; do python html_csv2.py "$file"; done; done
name = 'index.html'
html = utils.getFileContent(name)
#Get data from file
doc = SimplifiedDoc(html)
soup = bs(html, 'lxml')
title = (soup.select_one('title').text)
title = title.split(' -')
strain = title[0]
rows = []
tables = doc.selects('table.region-table')
tables = tables[:-1]
#print (type(tables))
for table in tables:
trs = table.tbody.trs
for tr in trs:
rows.append([td.text for td in tr.tds])
#print(rows)
#print(type(rows))
#print("PANDAS DATAFRAME")
df_rows = pd.DataFrame(rows)
df_rows.columns = ['Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']
df_rows['Strain'] = strain
df_rows = df_rows[['Strain','Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']]
#print(df_rows)
df_rows.to_csv (r'antismash_html.csv', index = False, header=True)
print('CSV CREATED')
In this second snippet I'm trying to use the os library to go into each sub-directory accordingly.
import csv
from simplified_scrapy import SimplifiedDoc,req,utils
import sys
import pandas as pd
import lxml.html
from bs4 import BeautifulSoup as bs
import os
name = 'index.html'
html = utils.getFileContent(name)
# Get data from file
doc = SimplifiedDoc(html)
soup = bs(html, 'lxml')
cwd = os.getcwd()
print(cwd)
directory_to_check = cwd # Which directory do you want to start with?
def directory_function(directory):
print("Listing: " + directory)
print("\t-" + "\n\t-".join(os.listdir("."))) # List current working directory
# Get all the subdirectories of directory_to_check recursively and store them in a list:
directories = [os.path.abspath(x[0]) for x in os.walk(directory_to_check)]
directories.remove(os.path.abspath(directory_to_check)) #Dont' want it done in my main directory
def csv_create(name):
title = (soup.select_one('title').text)
title = title.split(' -')
strain = title[0]
rows = []
tables = doc.selects('table.region-table')
tables = tables[:-1]
#print (type(tables))
for table in tables:
trs = table.tbody.trs
for tr in trs:
rows.append([td.text for td in tr.tds])
#print(rows)
#print(type(rows))
#print("PANDAS DATAFRAME")
df_rows = pd.DataFrame(rows)
df_rows.columns = ['Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']
df_rows['Strain'] = strain
df_rows = df_rows[['Strain','Region', 'Class', 'From', 'To', 'Associated Product', 'Class', 'Similarity']]
#print(df_rows)
df_rows.to_csv (r'antismash_html.csv', index = False, header=True)
print('CSV CREATED')
#with open(name +'.csv','w',encoding='utf-8') as f:
# csv_writer = csv.writer(f)
# csv_writer.writerows(rows)
for i in directories:
os.chdir(i) # Change working Directory
csv_create(name) # Run your function
directory_function
#csv_create(name)
I tried using the example here: Python: run script in all subdirectories but was not able to execute accordingly.
Try this.
import os
from simplified_scrapy import utils
def getSubDir(name,end=None):
filelist = os.listdir(name)
if end:
filelist = [os.path.join(name,l) for l in filelist if l.endsWith(end)]
return filelist
subDir = getSubDir('./') # The directory which you want to start with
for dir in subDir:
# files = getSubDir(dir,end='index.html')
fileName = dir+'/index.html'
if not os.path.isfile(fileName): continue
html = utils.getFileContent(fileName)