Skip to content

Commit

Permalink
fix typos and spelling errors
Browse files Browse the repository at this point in the history
  • Loading branch information
Elias Howell committed May 30, 2023
1 parent 0f0efbf commit 4180ed5
Show file tree
Hide file tree
Showing 21 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ jobs:
python-version: "3.9"
- name: install poetry
run: pip install poetry wheel
- name: install depdenencies
- name: install dependencies
run: poetry install --only=dev
- name: flake8 check
run: poetry run flake8 scrapers scrapers_next
Expand Down
2 changes: 1 addition & 1 deletion scrapers/az/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def scrape_actions(self, bill, page, self_chamber):

for action in utils.action_map:
if page[action] and utils.action_map[action]["name"] != "":
# sometimes intead of a date they placeholder with True
# sometimes instead of a date they placeholder with True
# see 2021 SB1308
if page[action] is True:
continue
Expand Down
2 changes: 1 addition & 1 deletion scrapers/dc/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ def scrape(self, session=None):
v.vote("other", mem_name)
other_count += 1
else:
# Incase anything new pops up
# In case anything new pops up
other_count += 1
v.vote("other", mem_name)

Expand Down
2 changes: 1 addition & 1 deletion scrapers/de/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def scrape(self, session=None):

def filter_bills(self, items):
"""
Read through all bills on a page. If a bill has no subsitutes,
Read through all bills on a page. If a bill has no substitutes,
yield it. If a bill does have substitutes, keep the highest-numbered
substitute and only yield that Bill object.
Bills may be amended (`BILL_ID w/ AMENDMENT ID` on the website),
Expand Down
2 changes: 1 addition & 1 deletion scrapers/fl/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def process_page(self):
votes["no"].append(member)
else:
raise ValueError(
"Unparseable vote found for {} in {}:\n{}".format(
"Unparsable vote found for {} in {}:\n{}".format(
member, self.source.url, line
)
)
Expand Down
2 changes: 1 addition & 1 deletion scrapers/ia/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def scrape_prefiles(self, session):
elif ".pdf" in document_url:
media_type = "application/pdf"
bill.add_document_link(
note="Backround Statement", url=document_url, media_type=media_type
note="Background Statement", url=document_url, media_type=media_type
)

bill.add_version_link(
Expand Down
2 changes: 1 addition & 1 deletion scrapers/id/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def _split(string):
actor, date, row[2], session, bill_id, chamber, url
)
# bill.add_vote_event(vote)
# some td's text is seperated by br elements
# some td's text is separated by br elements
if len(row[2]):
action = "".join(row[2].itertext())
action = action.replace("\xa0", " ").strip()
Expand Down
2 changes: 1 addition & 1 deletion scrapers/ma/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def scrape_house_vote(self, vote, vurl, supplement):
self.info("No vote found in supplement for vote #%s" % supplement)
return

# create list of independant items in vote_text
# create list of independent items in vote_text
rows = vote_text.splitlines()
lines = []
for row in rows:
Expand Down
2 changes: 1 addition & 1 deletion scrapers/md/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def parse_vote_pdf(self, vote_url, bill):
if not any(
motion_keyword in motion.lower() for motion_keyword in motion_keywords
):
# This condition covers for the bad formating in SB 1260
# This condition covers for the bad formatting in SB 1260
motion = lines[page_index - 3]
if not any(
motion_keyword in motion.lower() for motion_keyword in motion_keywords
Expand Down
2 changes: 1 addition & 1 deletion scrapers/mi/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def scrape_event_page(self, url, chamber):

# The MI pages often contain broken markup for line breaks in the agenda
# like </BR>. This gets stripped in text_content and we lose the information
# needed to seperate out agenda sections.
# needed to separate out agenda sections.
# So instead, pull out the raw HTML, break it, then parse it.
agenda = page.xpath("//td[contains(., 'Agenda')]/following-sibling::td")[0]
agenda_html = lxml.etree.tostring(agenda, encoding="unicode")
Expand Down
2 changes: 1 addition & 1 deletion scrapers/mt/committees.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def _fix_house_text(self, filename):
The best solution to this is to throw out the offending text,
and replace it with the correct text. The third and fourth
columns are joint comittees that are scraped from the Senate
columns are joint committees that are scraped from the Senate
document, so the only column that needs to be inserted this way
is the second.
"""
Expand Down
2 changes: 1 addition & 1 deletion scrapers/pr/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class NoSuchBill(Exception):
("Enviado al Gobernador", "executive", "executive-receipt"),
("Veto", "executive", "executive-veto"),
("Veto de Bolsillo", "executive", "executive-veto"),
# comissions give a report but sometimes they dont do any amendments and
# commissions give a report but sometimes they dont do any amendments and
# leave them as they are.
# i am not checking if they did or not. but it be easy just read the end and
# if it doesn't have amendments it should say 'sin enmiendas'
Expand Down
2 changes: 1 addition & 1 deletion scrapers/pr/votes.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def scrape_journal(self, url, chamber, session, date):
result = "pass"
else:
result = "fail"
msg = "Voting result {} not guarenteed to be 'fail'. Take a look.".format(
msg = "Voting result {} not guaranteed to be 'fail'. Take a look.".format(
vote_result["result"]
)
self.logger.warning(msg)
Expand Down
2 changes: 1 addition & 1 deletion scrapers/sd/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def scrape(self):
# Because the list of docs isn't ordered, We need to loop through this list multiple times.
# once to grab DocumentTypeId = 5, which are the agendas for the actual meetings
# then after we've created the events, again for DocumentTypeId = 4, which are the minutes
# we can skip the other DocumentTypeIds becase they're included in the /Documents endpoint,
# we can skip the other DocumentTypeIds because they're included in the /Documents endpoint,
# or audio which is duplicated in DocumentTypeId 5
for row in documents:
if row["NoMeeting"] is True:
Expand Down
2 changes: 1 addition & 1 deletion scrapers/usa/votes.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def scrape_senate_vote(self, session, period, roll_call):
roll_call = page.xpath("//roll_call_vote/vote_number/text()")[0]
vote_id = "us-{}-upper-{}".format(when.year, roll_call)

# note: not everthing the senate votes on is a bill, this is OK
# note: not everything the senate votes on is a bill, this is OK
# non bills include nominations and impeachments
doc_type = page.xpath("//roll_call_vote/document/document_type/text()")[0]

Expand Down
4 changes: 2 additions & 2 deletions scrapers/vi/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@
("COCHPY&R", "COMMITTEE OF CULTURE, HISTORIC PRESERVATION, YOUTH & RECREATION"),
("COEDA&P", "COMMITTEE OF ECONOMIC DEVELOPMENT, AGRICULTURE & PLANNING"),
("COE&WD", "COMMITTEE OF EDUCATION & WORKFORCE DEVELOPMENT"),
("HEALTH", "COMMITTEE OF ENERGY & ENVIROMENTAL PROTECTION"),
("HEALTH", "COMMITTEE OF ENERGY & ENVIRONMENTAL PROTECTION"),
("COF", "COMMITTEE OF FINANCE"),
("COHHHS&VA", "COMMITTEE OF HEALTH, HOSPITAL & HUMAN SERVICES"),
("COHSJ&PS", "COMMITTEE OF HOMELAND SECURITY, PUBLIC SAFETY & JUSTICE"),
("PUBLICWRKS", "COMMITTEE OF HOUSING, PUBLIC WORKS & WASTE MANAGMENT"),
("PUBLICWRKS", "COMMITTEE OF HOUSING, PUBLIC WORKS & WASTE MANAGEMENT"),
("RULJUD", "COMMITTEE OF RULES & JUDICIARY"),
("WHOLE", "COMMITTEE OF THE WHOLE"),
("GOVSERV", "COMMITTEE ON GOVERNMENT SERVICES, CONSUMER AND VETERANS AFFAIRS"),
Expand Down
2 changes: 1 addition & 1 deletion scrapers/wi/bills.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def parse_sponsors(self, bill, action, chamber):
line,
)
if not match:
# So far, the only one that doens't match is
# So far, the only one that doesn't match is
# http://docs.legis.wisconsin.gov/2011/proposals/ab568
# In the following format:
# Introduced by Representatives Krusick and J. Ott, by ... ;
Expand Down
2 changes: 1 addition & 1 deletion scrapers/wv/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def scrape_meeting_page(self, url):

def clean_date(self, when):
# Remove all text after the third comma to make sure no extra text
# is included in the date. Required to correctly parse texxt like this:
# is included in the date. Required to correctly parse text like this:
# "Friday, March 3, 2023, Following wrap up of morning agenda"
when = ",".join(when.split(",")[:2])

Expand Down
2 changes: 1 addition & 1 deletion scrapers_next/de/committees.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def process_page(self):


class CommitteeList(HtmlPage):
# This page is scraped before geting the json data because the current
# This page is scraped before getting the json data because the current
# session id needs to be extracted. This page has a <select> element where
# the first <option> inside of it has the required session id.
source = "https://legis.delaware.gov/Committees"
Expand Down
2 changes: 1 addition & 1 deletion scrapers_next/ny/people.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def block_to_text(elem):

def parse_address_lines(text):
"""
a fairly common occurence, a bunch of lines like
a fairly common occurrence, a bunch of lines like
addr line 1
addr line 2
addr line 3?
Expand Down
2 changes: 1 addition & 1 deletion scripts/people-update.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ fi
rm -rf "${SCRIPT_DIR}/../_scrapes/${TODAY}"
mkdir -p "${SCRIPT_DIR}/../_scrapes"

echo "ensuring depdendencies are up to date"
echo "ensuring dependencies are up to date"
poetry install

echo "Cloning people repo..."
Expand Down

0 comments on commit 4180ed5

Please sign in to comment.