diff --git a/custom_components/waste_collection_schedule/manifest.json b/custom_components/waste_collection_schedule/manifest.json
index 3d6d03ad..9bccda7f 100644
--- a/custom_components/waste_collection_schedule/manifest.json
+++ b/custom_components/waste_collection_schedule/manifest.json
@@ -7,5 +7,5 @@
"integration_type": "hub",
"iot_class": "cloud_polling",
"requirements": ["icalendar", "recurring_ical_events", "icalevents", "bs4"],
- "version": "1.41.0"
+ "version": "1.42.0"
}
diff --git a/custom_components/waste_collection_schedule/sensor.py b/custom_components/waste_collection_schedule/sensor.py
index aed7f818..a6243ab1 100644
--- a/custom_components/waste_collection_schedule/sensor.py
+++ b/custom_components/waste_collection_schedule/sensor.py
@@ -29,6 +29,7 @@
CONF_DATE_TEMPLATE = "date_template"
CONF_COLLECTION_TYPES = "types"
CONF_ADD_DAYS_TO = "add_days_to"
+CONF_EVENT_INDEX = "event_index"
class DetailsFormat(Enum):
@@ -52,6 +53,7 @@ class DetailsFormat(Enum):
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DATE_TEMPLATE): cv.template,
vol.Optional(CONF_ADD_DAYS_TO, default=False): cv.boolean,
+ vol.Optional(CONF_EVENT_INDEX, default=0): cv.positive_int,
}
)
@@ -88,6 +90,7 @@ async def async_setup_platform(hass, config, async_add_entities, discovery_info=
value_template=value_template,
date_template=date_template,
add_days_to=config.get(CONF_ADD_DAYS_TO),
+ event_index=config.get(CONF_EVENT_INDEX),
)
)
@@ -110,6 +113,7 @@ def __init__(
value_template,
date_template,
add_days_to,
+ event_index,
):
"""Initialize the entity."""
self._api = api
@@ -121,6 +125,7 @@ def __init__(
self._value_template = value_template
self._date_template = date_template
self._add_days_to = add_days_to
+ self._event_index = event_index
self._value = None
@@ -201,6 +206,7 @@ def _update_sensor(self):
count=1,
include_types=self._collection_types,
include_today=self._include_today,
+ start_index=self._event_index,
)
self._set_state(upcoming1)
@@ -220,6 +226,7 @@ def _update_sensor(self):
leadtime=self._leadtime,
include_types=self._collection_types,
include_today=self._include_today,
+ start_index=self._event_index,
)
for collection in upcoming:
attributes[self._render_date(collection)] = self._separator.join(
@@ -229,7 +236,10 @@ def _update_sensor(self):
# show list of collections in details
for t in collection_types:
collections = self._aggregator.get_upcoming(
- count=1, include_types=[t], include_today=self._include_today
+ count=1,
+ include_types=[t],
+ include_today=self._include_today,
+ start_index=self._event_index,
)
date = (
"" if len(collections) == 0 else self._render_date(collections[0])
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/collection_aggregator.py b/custom_components/waste_collection_schedule/waste_collection_schedule/collection_aggregator.py
index f0cab999..40ab561b 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/collection_aggregator.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/collection_aggregator.py
@@ -33,6 +33,7 @@ def get_upcoming(
include_types=None,
exclude_types=None,
include_today=False,
+ start_index=None,
):
"""Return list of all entries, limited by count and/or leadtime.
@@ -47,6 +48,7 @@ def get_upcoming(
include_types=include_types,
exclude_types=exclude_types,
include_today=include_today,
+ start_index=start_index,
)
def get_upcoming_group_by_day(
@@ -56,6 +58,7 @@ def get_upcoming_group_by_day(
include_types=None,
exclude_types=None,
include_today=False,
+ start_index=None,
):
"""Return list of all entries, grouped by day, limited by count and/or leadtime."""
entries = []
@@ -73,6 +76,8 @@ def get_upcoming_group_by_day(
for key, group in iterator:
entries.append(CollectionGroup.create(list(group)))
+ if start_index is not None:
+ entries = entries[start_index:]
if count is not None:
entries = entries[:count]
@@ -86,6 +91,7 @@ def _filter(
include_types=None,
exclude_types=None,
include_today=False,
+ start_index=None,
):
# remove unwanted waste types from include list
if include_types is not None:
@@ -115,6 +121,8 @@ def _filter(
entries.sort(key=lambda e: e.date)
# remove surplus entries
+ if start_index is not None:
+ entries = entries[start_index:]
if count is not None:
entries = entries[:count]
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/service/AbfallnaviDe.py b/custom_components/waste_collection_schedule/waste_collection_schedule/service/AbfallnaviDe.py
index 375e9327..b836b7f6 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/service/AbfallnaviDe.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/service/AbfallnaviDe.py
@@ -12,15 +12,25 @@
"service_id": "aachen",
},
{
- "title": "AWA Entsorgungs GmbH",
- "url": "https://www.awa-gmbh.de/",
- "service_id": "zew2",
+ "title": "Abfallwirtschaft Stadt Nürnberg",
+ "url": "https://www.nuernberg.de/",
+ "service_id": "nuernberg",
},
{
"title": "Abfallwirtschaftsbetrieb Bergisch Gladbach",
"url": "https://www.bergischgladbach.de/",
"service_id": "aw-bgl2",
},
+ {
+ "title": "AWA Entsorgungs GmbH",
+ "url": "https://www.awa-gmbh.de/",
+ "service_id": "zew2",
+ },
+ {
+ "title": "AWG Kreis Warendorf",
+ "url": "https://www.awg-waf.de/",
+ "service_id": "krwaf",
+ },
{
"title": "Bergischer Abfallwirtschaftverbund",
"url": "https://www.bavweb.de/",
@@ -46,6 +56,11 @@
"url": "https://www.ebd-dorsten.de/",
"service_id": "dorsten",
},
+ {
+ "title": "EGW Westmünsterland",
+ "url": "https://www.egw.de/",
+ "service_id": "wml2",
+ },
{
"title": "Gütersloh",
"url": "https://www.guetersloh.de/",
@@ -62,9 +77,9 @@
"service_id": "krhs",
},
{
- "title": "AWG Kreis Warendorf",
- "url": "https://www.awg-waf.de/",
- "service_id": "krwaf",
+ "title": "Kronberg im Taunus",
+ "url": "https://www.kronberg.de/",
+ "service_id": "kronberg",
},
{
"title": "Gemeinde Lindlar",
@@ -76,16 +91,6 @@
"url": "https://www.betriebsamt-norderstedt.de/",
"service_id": "nds",
},
- {
- "title": "Abfallwirtschaft Stadt Nürnberg",
- "url": "https://www.nuernberg.de/",
- "service_id": "nuernberg",
- },
- {
- "title": "WBO Wirtschaftsbetriebe Oberhausen",
- "url": "https://www.wbo-online.de/",
- "service_id": "oberhausen",
- },
{
"title": "Kreis Pinneberg",
"url": "https://www.kreis-pinneberg.de/",
@@ -106,27 +111,26 @@
"url": "https://www.stl-luedenscheid.de/",
"service_id": "stl",
},
- # {
- # "title": "'Stadt Straelen",
- # "url": "https://www.straelen.de/",
- # "service_id": "straelen",
- # },
{
"title": "Kreis Viersen",
"url": "https://www.kreis-viersen.de/",
"service_id": "viersen",
},
{
- "title": "EGW Westmünsterland",
- "url": "https://www.egw.de/",
- "service_id": "wml2",
+ "title": "WBO Wirtschaftsbetriebe Oberhausen",
+ "url": "https://www.wbo-online.de/",
+ "service_id": "oberhausen",
},
{
- "title": "Kronberg im Taunus",
- "url": "https://www.kronberg.de/",
- "service_id": "kronberg",
+ "title": "ZEW Zweckverband Entsorgungsregion West",
+ "url": "https://zew-entsorgung.de/",
+ "service_id": "zew2",
},
-
+ # {
+ # "title": "'Stadt Straelen",
+ # "url": "https://www.straelen.de/",
+ # "service_id": "straelen",
+ # },
]
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/service/CMCityMedia.py b/custom_components/waste_collection_schedule/waste_collection_schedule/service/CMCityMedia.py
index 1120e0eb..68e2c10b 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/service/CMCityMedia.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/service/CMCityMedia.py
@@ -38,17 +38,6 @@
"43": "mdi:package-variant", # Papiertonne
},
},
- {
- "hpid": 107,
- "realm": 10701,
- "name": "www.kressbronn.de - Müllkalender",
- "region": "Gemeinde Kressbronn am Bodensee",
- "icons": {
- "47": "mdi:trash-can", # Bio- und Restmüllabfuhr
- "46": "mdi:recycle", # Gelbe Säcke
- "48": "mdi:package-variant", # Papiertonne
- },
- },
{
"hpid": 168,
"realm": 16801,
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_lippe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_lippe_de.py
new file mode 100644
index 00000000..881ff6a7
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_lippe_de.py
@@ -0,0 +1,106 @@
+import datetime
+
+import requests
+from bs4 import BeautifulSoup, Tag
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Abfallwirtschaftsverbandes Lippe"
+DESCRIPTION = "Source for Abfallwirtschaftsverbandes Lippe."
+URL = "https://abfall-lippe.de"
+TEST_CASES = {
+ "Bad Salzuflen BB": {"gemeinde": "Bad Salzuflen", "bezirk": "BB"},
+ "Augustdorf": {"gemeinde": "Augustdorf"},
+ "Barntrup 3B": {"gemeinde": "Barntrup", "bezirk": "3-B"},
+}
+
+
+ICON_MAP = {
+ "Graue": "mdi:trash-can",
+ "Glass": "mdi:bottle-soda",
+ "Grüne": "mdi:leaf",
+ "Laubannahme": "mdi:leaf-maple",
+ "Blaue": "mdi:package-variant",
+ "Gelbe": "mdi:recycle",
+ "Schadstoffsammlung": "mdi:biohazard",
+ "Groß-Container Altpapier|Pappe": "mdi:package-variant-closed",
+}
+
+
+API_URL = "https://abfall-lippe.de/service/abfuhrkalender"
+
+
+class Source:
+ def __init__(self, gemeinde: str, bezirk: str | None = None):
+ self._gemeinde: str = gemeinde
+ self._bezirk: str = bezirk if bezirk is not None else ""
+ self._ics = ICS()
+
+ def fetch(self):
+ year = datetime.datetime.now().year
+ urls = [
+ API_URL,
+ f"{API_URL}-{year}",
+ f"{API_URL}-{year-1}",
+ f"{API_URL}-{year+1}",
+ ]
+ for url in urls:
+ r = requests.get(url)
+ if r.status_code == 200 and r.request.url != "https://abfall-lippe.de":
+ break
+ if r.status_code != 200 or r.request.url == "https://abfall-lippe.de":
+ raise Exception(
+ "Failed to fetch data from Abfallwirtschaftsverbandes Lippe The URL may have changed."
+ )
+ r.raise_for_status()
+
+ soup = BeautifulSoup(r.text, "html.parser")
+ headlines = soup.find_all("div", class_="elementor-widget-heading")
+
+ gemeinde_headline: Tag | None = None
+ for headline in headlines:
+ if not isinstance(headline, Tag):
+ continue
+ h3 = headline.find("h3")
+ if not isinstance(h3, Tag):
+ continue
+
+ if h3.text.lower().strip() == self._gemeinde.lower().strip():
+ gemeinde_headline = headline
+ break
+
+ if gemeinde_headline is None:
+ raise Exception("Gemeinde not found, please check spelling")
+
+ links_container = gemeinde_headline.parent
+
+ if links_container is None:
+ raise Exception(f"No links found for {self._gemeinde}")
+
+ link: Tag | None = None
+ for a in links_container.find_all("a"):
+ if not isinstance(a, Tag):
+ continue
+ if (
+ a.text.lower().replace("ics", "").strip()
+ == self._bezirk.lower().replace("ics", "").strip()
+ ):
+ link = a.get("href")
+ break
+
+ if link is None:
+ raise Exception("Did not found matching ICS link for gemeinde and (bezirk)")
+
+ # get ICS file
+ r = requests.get(link)
+ r.raise_for_status()
+ r.encoding = "utf-8"
+ dates = self._ics.convert(r.text)
+ entries = []
+ for d in dates:
+ icon = ICON_MAP.get(d[1].split(" ")[0])
+ if icon is None:
+ icon = ICON_MAP.get(d[1])
+ entries.append(Collection(d[0], d[1], icon=icon))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_emsland_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_emsland_de.py
new file mode 100644
index 00000000..ed85df27
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_emsland_de.py
@@ -0,0 +1,124 @@
+# Nearly direct copy of source awn_de
+
+from html.parser import HTMLParser
+
+import requests
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Abfallwirtschaftsbetrieb Emsland"
+DESCRIPTION = "Source for AWN (Abfallwirtschaft Neckar-Odenwald-Kreis)."
+URL = "https://www.awn-online.de"
+TEST_CASES = {
+ "Andervenne Am Gallenberg": {
+ "city": "Andervenne",
+ "street": "Am Gallenberg",
+ "house_number": "1",
+ },
+ "Neubörger Aschendorfer Straße 1 A": {
+ "city": "Neubörger",
+ "street": "Aschendorfer Straße",
+ "house_number": 1,
+ "address_suffix": "A",
+ },
+ "Lähden Ahornweg 15": {
+ "city": "Lähden",
+ "street": "Ahornweg",
+ "house_number": 15,
+ },
+}
+SERVLET = "https://portal.awb-emsland.de/WasteManagementEmsland/WasteManagementServlet"
+
+ICON_MAP = {
+ "Restabfallbehaelter": "mdi:trash-can",
+ "Papierbehaelter": "mdi:package-variant",
+ "Wertstoffbehaelter": "mdi:recycle",
+ "Bioabfallbehaelter": "mdi:leaf",
+}
+
+
+# Parser for HTML input (hidden) text
+class HiddenInputParser(HTMLParser):
+ def __init__(self):
+ super().__init__()
+ self._args = {}
+
+ @property
+ def args(self):
+ return self._args
+
+ def handle_starttag(self, tag, attrs):
+ if tag == "input":
+ d = dict(attrs)
+ if str(d["type"]).lower() == "hidden":
+ self._args[d["name"]] = d["value"] if "value" in d else ""
+
+
+class Source:
+ def __init__(
+ self, city: str, street: str, house_number: int, address_suffix: str = ""
+ ):
+ self._city = city
+ self._street = street
+ self._hnr = house_number
+ self._suffix = address_suffix
+ self._ics = ICS()
+
+ def fetch(self):
+ session = requests.session()
+
+ r = session.get(
+ SERVLET,
+ params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
+ )
+ r.raise_for_status()
+ r.encoding = "utf-8"
+
+ parser = HiddenInputParser()
+ parser.feed(r.text)
+
+ args = parser.args
+ args["Ort"] = self._city
+ args["Strasse"] = self._street
+ args["Hausnummer"] = str(self._hnr)
+ args["Hausnummerzusatz"] = self._suffix
+ args["SubmitAction"] = "CITYCHANGED"
+ r = session.post(
+ SERVLET,
+ data=args,
+ )
+ r.raise_for_status()
+
+ args["SubmitAction"] = "forward"
+ args["ContainerGewaehlt_1"] = "on"
+ args["ContainerGewaehlt_2"] = "on"
+ args["ContainerGewaehlt_3"] = "on"
+ args["ContainerGewaehlt_4"] = "on"
+ args["ContainerGewaehlt_5"] = "on"
+ args["ContainerGewaehlt_6"] = "on"
+ args["ContainerGewaehlt_7"] = "on"
+ args["ContainerGewaehlt_8"] = "on"
+ args["ContainerGewaehlt_9"] = "on"
+ args["ContainerGewaehlt_10"] = "on"
+ r = session.post(
+ SERVLET,
+ data=args,
+ )
+ r.raise_for_status()
+
+ args["ApplicationName"] = "com.athos.kd.emsland.AbfuhrTerminModel"
+ args["SubmitAction"] = "filedownload_ICAL"
+ r = session.post(
+ SERVLET,
+ data=args,
+ )
+ r.raise_for_status()
+
+ dates = self._ics.convert(r.text)
+
+ entries = []
+ for d in dates:
+ bin_type = d[1].strip()
+ entries.append(Collection(d[0], bin_type, icon=ICON_MAP.get(bin_type)))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aylesburyvaledc_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aylesburyvaledc_gov_uk.py
new file mode 100644
index 00000000..ed760491
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aylesburyvaledc_gov_uk.py
@@ -0,0 +1,59 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "Aylesbury Vale District Council"
+DESCRIPTION = "Source for aylesburyvaledc.gov.uk services for Aylesbury Vale District Council, UK."
+URL = "https://aylesburyvaledc.gov.uk"
+TEST_CASES = {
+ "Test_001": {"uprn": "766292368"},
+ "Test_002": {"uprn": 766310306},
+ "Test_003": {"uprn": "766029949"},
+}
+ICON_MAP = {
+ "REFUSE": "mdi:trash-can",
+ "GARDEN": "mdi:leaf",
+ "RECYCLING": "mdi:recycle",
+ "FOOD": "mdi:food",
+}
+
+
+class Source:
+ def __init__(self, uprn):
+ self._uprn = str(uprn).zfill(12)
+
+ def fetch(self):
+ # Build SOAP1.2 request
+ url = "http://avdcbins.web-labs.co.uk/RefuseApi.asmx"
+ headers = {"Content-Type": "application/soap+xml; charset=utf-8"}
+ body = f"""
+
+
+
+ {self._uprn}
+
+
+
+ """
+
+ response = requests.post(url, data=body, headers=headers)
+ soup = BeautifulSoup(response.content, "xml")
+ bins = soup.find_all("BinCollection")
+
+ entries = []
+ for item in bins:
+ dt = item.find("Date")
+ for waste in ICON_MAP:
+ w = item.find(waste.capitalize())
+ if w.text == "true":
+ entries.append(
+ Collection(
+ date=datetime.strptime(dt.text, "%Y-%m-%dT07:00:00").date(),
+ t=waste,
+ icon=ICON_MAP.get(waste),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
index c5100f7c..ae9bc936 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
@@ -1,9 +1,17 @@
from datetime import datetime
import requests
+# With verify=True the POST fails due to a SSLCertVerificationError.
+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+# These two lines areused to suppress the InsecureRequestWarning when using verify=False
+import urllib3
from bs4 import BeautifulSoup
from waste_collection_schedule import Collection # type: ignore[attr-defined]
+urllib3.disable_warnings()
+
TITLE = "Basingstoke and Deane Borough Council"
DESCRIPTION = "Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK."
URL = "https://basingstoke.gov.uk"
@@ -37,6 +45,7 @@ def fetch(self):
"https://www.basingstoke.gov.uk/bincollections",
headers=HEADERS,
cookies=REQUEST_COOKIES,
+ verify=False,
)
r.raise_for_status()
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bristol_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bristol_gov_uk.py
index c5f4fe9a..05db538d 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bristol_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bristol_gov_uk.py
@@ -2,10 +2,9 @@
# This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo
# https://github.com/robbrad/UKBinCollectionData
-import re
-import requests
-
from datetime import datetime
+
+import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Bristol City Council"
@@ -23,11 +22,7 @@
"180L GENERAL WASTE": "mdi:trash-can",
"45L BLACK RECYCLING BOX": "mdi:recycle",
"23L FOOD WASTE BIN": "mdi:food",
- "55L GREEN RECYCLING BOX": "mdi:recycle"
-}
-REGEX = {
- "waste": r'\"containerName\":\"([0-9]{2,3}L\s[a-zA-z\s]*)',
- "date": r'\"nextCollectionDate\":\"([\d]{4}-[\d]{2}-[\d]{2})'
+ "55L GREEN RECYCLING BOX": "mdi:recycle",
}
HEADERS = {
"Accept": "*/*",
@@ -76,18 +71,24 @@ def fetch(self):
headers=HEADERS,
json=payload,
)
- wastes = re.findall(REGEX["waste"], response.text)
- dates = re.findall(REGEX["date"], response.text)
- schedule = list(zip(wastes, dates))
+ data = response.json()["data"]
entries = []
- for item in schedule:
- entries.append(
- Collection(
- date=datetime.strptime(item[1], "%Y-%m-%d").date(),
- t=item[0],
- icon=ICON_MAP.get(item[0].upper()),
- )
- )
+ for item in data:
+ for collection in item["collection"]:
+ for collection_date_key in ["nextCollectionDate", "lastCollectionDate"]:
+ date_string = collection[collection_date_key].replace(
+ "T00:00:00", ""
+ )
+ entries.append(
+ Collection(
+ date=datetime.strptime(
+ date_string,
+ "%Y-%m-%d",
+ ).date(),
+ t=item["containerName"],
+ icon=ICON_MAP.get(item["containerName"].upper()),
+ )
+ )
return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/burnley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/burnley_gov_uk.py
new file mode 100644
index 00000000..e1c0c409
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/burnley_gov_uk.py
@@ -0,0 +1,99 @@
+import json
+from datetime import datetime
+from time import time_ns
+
+import requests
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Burnley Council"
+DESCRIPTION = "Source for burnley.gov.uk services for the Burnley, UK."
+URL = "https://burnley.gov.uk"
+TEST_CASES = {
+ "Test_001": {"uprn": "100010341681"},
+ "Test_002": {"uprn": 100010358864},
+ "Test_003": {"uprn": "100010357864"},
+ "Test_004": {"uprn": 100010327003},
+}
+HEADERS = {
+ "user-agent": "Mozilla/5.0",
+}
+ICON_MAP = {
+ "REFUSE": "mdi:trash-can",
+ "RECYCLING": "mdi:recycle",
+ "GARDEN": "mdi:leaf",
+}
+MONTHS = {
+ "January": 1,
+ "February": 2,
+ "March": 3,
+ "April": 4,
+ "May": 5,
+ "June": 6,
+ "July": 7,
+ "August": 8,
+ "September": 9,
+ "October": 10,
+ "November": 11,
+ "December": 12,
+}
+
+
+class Source:
+ def __init__(self, uprn):
+ self._uprn = str(uprn).zfill(12)
+
+ def fetch(self):
+
+ s = requests.Session()
+
+ # Set up session
+ timestamp = time_ns() // 1_000_000 # epoch time in milliseconds
+ s.get(
+ f"https://your.burnley.gov.uk/apibroker/domain/your.burnley.gov.uk?_={timestamp}",
+ headers=HEADERS,
+ )
+
+ # This request gets the session ID
+ sid_request = s.get(
+ "https://your.burnley.gov.uk/authapi/isauthenticated?uri=https%253A%252F%252Fyour.burnley.gov.uk%252Fen%252FAchieveForms%252F%253Fform_uri%253Dsandbox-publish%253A%252F%252FAF-Process-b41dcd03-9a98-41be-93ba-6c172ba9f80c%252FAF-Stage-edb97458-fc4d-4316-b6e0-85598ec7fce8%252Fdefinition.json%2526redirectlink%253D%25252Fen%2526cancelRedirectLink%253D%25252Fen%2526consentMessage%253Dyes&hostname=your.burnley.gov.uk&withCredentials=true",
+ headers=HEADERS,
+ )
+ sid_data = sid_request.json()
+ sid = sid_data["auth-session"]
+
+ # This request retrieves the schedule
+ timestamp = time_ns() // 1_000_000 # epoch time in milliseconds
+ payload = {"formValues": {"Section 1": {"case_uprn1": {"value": self._uprn}}}}
+ schedule_request = s.post(
+ f"https://your.burnley.gov.uk/apibroker/runLookup?id=607fe757df87c&repeat_against=&noRetry=false&getOnlyTokens=undefined&log_id=&app_name=AF-Renderer::Self&_={timestamp}&sid={sid}",
+ headers=HEADERS,
+ json=payload,
+ )
+ rowdata = json.loads(schedule_request.content)["integration"]["transformed"][
+ "rows_data"
+ ]
+
+ # Extract bin types and next collection dates
+ # Website doesn't return a year, so compare months to deal with collection spanning a year-end
+ entries = []
+ current_month = datetime.strftime(datetime.now(), "%B")
+ current_year = int(datetime.strftime(datetime.now(), "%Y"))
+ for item in rowdata:
+ info = rowdata[item]["display"].split(" - ")
+ waste = info[0]
+ dt = info[1]
+ bin_month = dt.split(" ")[-1]
+ if MONTHS[bin_month] < MONTHS[current_month]:
+ bin_year = current_year + 1
+ dt = dt + str(bin_year)
+ else:
+ dt = dt + str(current_year)
+ entries.append(
+ Collection(
+ t=waste,
+ date=datetime.strptime(dt, "%A %d %B%Y").date(),
+ icon=ICON_MAP.get(waste.upper()),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/calgary_ca.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/calgary_ca.py
new file mode 100644
index 00000000..099b6f76
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/calgary_ca.py
@@ -0,0 +1,100 @@
+import json
+from datetime import datetime, timedelta
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Calgary (AB)"
+DESCRIPTION = "Source for Calgary waste collection"
+URL = "https://www.calgary.ca"
+
+# ADDRESSES MUST BE ALL CAPS and INCLUDE A QUADRANT
+TEST_CASES = {"42 AUBURN SHORES WY SE": {"street_address": "42 AUBURN SHORES WY SE"}}
+
+SCHEDULE_LOOKUP_URL = "https://data.calgary.ca/resource/jq4t-b745.json"
+
+ICON_MAP = {
+ "Green": "mdi:leaf",
+ "Black": "mdi:trash-can",
+ "Blue": "mdi:recycle",
+}
+
+WEEKDAYS = [
+ "Monday",
+ "Tuesday",
+ "Wednesday",
+ "Thursday",
+ "Friday",
+ "Saturday",
+ "Sunday",
+]
+
+
+class Source:
+ def __init__(self, street_address):
+ self._street_address = street_address.upper()
+
+ def daterange(self, start_date, end_date):
+ for n in range(int((end_date - start_date).days)):
+ yield start_date + timedelta(n)
+
+ def collect(self, single_date, collection_day, interval):
+ # check the collection day against the day of the week
+ if WEEKDAYS[single_date.weekday()] == collection_day:
+ # check the interval (even, odd or every)
+ if interval == "EVERY":
+ return True
+
+ # get the week number of the current date
+ week = single_date.isocalendar()[1]
+ week_modulus = week % 2
+
+ # return true if the week moduls corresponds to the collection interval
+ if (interval == "EVEN") and (week_modulus == 0):
+ return True
+ elif (interval == "ODD") and (week_modulus == 1):
+ return True
+ return False
+
+ def fetch(self):
+ # lookup the schedule key for the address
+ schedule_download = requests.get(
+ SCHEDULE_LOOKUP_URL,
+ params={"address": self._street_address, "quadrant": "SE"},
+ )
+ schedule = json.loads(schedule_download.content.decode("utf-8"))
+ entries = []
+
+ for entry in schedule:
+ date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ current_date = datetime.now()
+ commodity = entry["commodity"]
+ summer_start = datetime.strptime(entry["summer_start"], date_format)
+ summer_end = datetime.strptime(entry["summer_end"], date_format)
+ winter_start = datetime.strptime(entry["winter_start"], date_format)
+ winter_end = datetime.strptime(entry["winter_end"], date_format)
+ collection_day_summer = entry["collection_day_summer"]
+ collection_day_winter = entry["collection_day_winter"]
+ clect_int_summer = entry["clect_int_summer"]
+ clect_int_winter = entry["clect_int_winter"]
+
+ # iterate over summer schedule and add entry if needed
+ for single_date in self.daterange(summer_start, summer_end):
+ # don't need to include dates already passed
+ if single_date < current_date:
+ continue
+ # if the collection interval is satisfied and if the weekday is the same as the collection day then add the entry
+ if self.collect(single_date, collection_day_summer, clect_int_summer):
+ icon = ICON_MAP.get(commodity)
+ entries.append(Collection(single_date.date(), commodity, icon=icon))
+
+ for single_date in self.daterange(winter_start, winter_end):
+ # don't need to include dates already passed
+ if single_date < current_date:
+ continue
+ # if the collection interval is satisfied and if the weekday is the same as the collection day then add the entry
+ if self.collect(single_date, collection_day_winter, clect_int_winter):
+ icon = ICON_MAP.get(commodity)
+ entries.append(Collection(single_date.date(), commodity, icon=icon))
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/camden_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/camden_gov_uk.py
new file mode 100644
index 00000000..2b0872c1
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/camden_gov_uk.py
@@ -0,0 +1,64 @@
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "London Borough of Camden"
+DESCRIPTION = "Source for London Borough of Camden."
+URL = "https://www.camden.gov.uk/"
+TEST_CASES = {
+ "Cannon Place": {"uprn": "5061647"},
+ "Red Lion Street": {"uprn": 5121151},
+}
+
+
+ICON_MAP = {
+ "rubbish": "mdi:trash-can",
+ "garden waste": "mdi:leaf",
+ "food": "mdi:food",
+ "recycling": "mdi:recycle",
+}
+
+
+API_URL = "https://environmentservices.camden.gov.uk/property/{uprn}"
+ICS_URL = "https://environmentservices.camden.gov.uk{href}"
+
+
+class Source:
+ def __init__(self, uprn: str | int):
+ self._uprn: str = str(uprn)
+ self._ics = ICS()
+
+ def fetch(self):
+
+ # get collection overview page
+ r = requests.get(API_URL.format(uprn=self._uprn))
+ r.raise_for_status()
+
+ soup = BeautifulSoup(r.text, "html.parser")
+
+ # get all ICS links (Add to my calendar)
+ ics_urls = []
+ for a in soup.find_all("a"):
+ if a["href"].startswith("/ical/"):
+ ics_urls.append(ICS_URL.format(href=a["href"]))
+
+ # get all collections from ICS files
+ collections = []
+ for ics_url in ics_urls:
+ r = requests.get(ics_url)
+ r.raise_for_status()
+ collections.extend(self._ics.convert(r.text))
+
+ entries = []
+ for d in collections:
+ bin_type = d[1].replace("Reminder", "").replace(" - ", "").strip()
+ icon = ICON_MAP.get(
+ bin_type.lower()
+ .replace("domestic", "")
+ .replace("collection", "")
+ .strip()
+ )
+ entries.append(Collection(date=d[0], t=bin_type, icon=icon))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/cardiff_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/cardiff_gov_uk.py
new file mode 100644
index 00000000..b65f2e76
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/cardiff_gov_uk.py
@@ -0,0 +1,108 @@
+import datetime
+import json
+import xml.etree.ElementTree as ET
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Cardiff Council"
+DESCRIPTION = "Source script for cardiff.gov.uk"
+URL = "https://cardiff.gov.uk"
+TEST_CASES = {
+ "Glass": {"uprn": "100100124569"},
+ "NoGlass": {"uprn": "100100127440"},
+}
+
+ICON_MAP = {
+ "General": "mdi:trash-can",
+ "Recycling": "mdi:recycle",
+ "Garden": "mdi:leaf",
+ "Food": "mdi:food",
+ "Glass": "mdi:glass-fragile",
+}
+
+PAYLOAD_GET_JWT = (
+ ""
+ ""
+ ""
+ ""
+ ""
+ ""
+)
+
+URL_COLLECTIONS = "https://api.cardiff.gov.uk/WasteManagement/api/WasteCollection"
+URL_GET_JWT = (
+ "https://authwebservice.cardiff.gov.uk/AuthenticationWebService.asmx?op=GetJWT"
+)
+
+
+def get_headers() -> dict[str, str]:
+ """Return common headers that every request to the Cardiff API requires."""
+ return {
+ "Origin": "https://www.cardiff.gov.uk",
+ "Referer": "https://www.cardiff.gov.uk/",
+ "User-Agent": "Mozilla/5.0",
+ }
+
+
+def get_token() -> str:
+ """Get an access token."""
+ headers = get_headers()
+ headers.update(
+ {
+ "Content-Type": 'text/xml; charset="UTF-8"',
+ }
+ )
+ r = requests.post(URL_GET_JWT, headers=headers, data=PAYLOAD_GET_JWT)
+ r.raise_for_status()
+
+ tree = ET.fromstring(r.text)
+
+ jwt_result_element = tree.find(
+ ".//GetJWTResult", namespaces={"": "http://tempuri.org/"}
+ )
+
+ if jwt_result_element is None or jwt_result_element.text is None:
+ raise Exception("could not find Token")
+ jwt_result = jwt_result_element.text
+
+ token = json.loads(jwt_result)["access_token"]
+ return token
+
+
+class Source:
+ def __init__(self, uprn=None):
+ self._uprn = uprn
+
+ def fetch(self):
+ payload_waste_collections = {
+ "systemReference": "web",
+ "language": "eng",
+ "uprn": self._uprn,
+ }
+
+ entries = []
+
+ jwt = get_token()
+ client = requests.Session()
+ headers = get_headers()
+ headers.update({"Authorization": f"Bearer {jwt}"})
+
+ r = client.post(
+ URL_COLLECTIONS, headers=headers, json=payload_waste_collections
+ )
+ r.raise_for_status()
+ collections = r.json()
+
+ for week in collections["collectionWeeks"]:
+ for bin in week["bins"]:
+ entries.append(
+ Collection(
+ date=datetime.datetime.fromisoformat(week["date"]).date(),
+ t=bin["type"],
+ icon=ICON_MAP.get(bin["type"]),
+ )
+ )
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/cherwell_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/cherwell_gov_uk.py
index 69b38435..e7299b92 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/cherwell_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/cherwell_gov_uk.py
@@ -13,13 +13,14 @@
"Test_001": {"uprn": "100120758315"},
"Test_002": {"uprn": "100120780449"},
"Test_003": {"uprn": 100120777153},
+ "Test_004": {"uprn": 10011931488},
}
HEADERS = {
"user-agent": "Mozilla/5.0",
}
REGEX = {
"DATES": r"([Green|Blue|Brown]+ Bin)",
- "ORDINALS": r"(st|nd|rd|th)",
+ "ORDINALS": r"(st|nd|rd|th) ",
}
ICON_MAP = {
"GREEN BIN": "mdi:trash-can",
@@ -51,9 +52,9 @@ def fetch(self):
# Get date, append year, and increment year if date is >1 month in the past.
# This tries to deal year-end dates when the YEAR is missing
date = box.find("p", {"class": "bin-collection-tasks__date"}).text.strip()
+ date = re.sub(REGEX["ORDINALS"],"", date)
date += " " + str(yr)
- dt = re.sub(REGEX["ORDINALS"],"", date)
- dt = datetime.strptime(dt, "%d %B %Y")
+ dt = datetime.strptime(date, "%d%B %Y")
if (dt - today) < timedelta(days=-31):
dt = dt.replace(year = dt.year + 1)
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chiemgau_recycling_lk_rosenheim.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chiemgau_recycling_lk_rosenheim.py
new file mode 100644
index 00000000..b11fe1d8
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chiemgau_recycling_lk_rosenheim.py
@@ -0,0 +1,41 @@
+from datetime import datetime
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Chiemgau Recycling - Landkreis Rosenheim" # Title will show up in README.md and info.md
+DESCRIPTION = "Source script for paper waste collection in Landkreis Rosenheim area" # Describe your source
+URL = "https://chiemgau-recycling.de" # Insert url to service homepage. URL will show up in README.md and info.md
+COUNTRY = "de"
+TEST_CASES = { # Insert arguments for test cases to be used by test_sources.py script
+ "Bruckmühl 1": {
+ "district": "Bruckmühl 1"
+ }
+}
+
+ICON_MAP = { # Optional: Dict of waste types and suitable mdi icons
+ "Papier": "mdi:package-variant",
+}
+
+API_URL = "https://blauetonne.stkn.org/lk_rosenheim"
+
+class Source:
+ def __init__(self, district):
+ self.district = district
+
+ def fetch(self):
+ entries = []
+
+ r = requests.get(f"{API_URL}", params={"district": self.district})
+ r.raise_for_status()
+
+ for date in r.json():
+ entries.append(
+ Collection(
+ date=datetime.fromisoformat(date).date(), # Collection date
+ t="Papier Tonne", # Collection type
+ icon=ICON_MAP.get("Papier"), # Collection icon
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/cmcitymedia_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/cmcitymedia_de.py
index 5617fdec..bef35ae6 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/cmcitymedia_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/cmcitymedia_de.py
@@ -19,18 +19,9 @@ def EXTRA_INFO():
return [{"title": s["region"], "url": URL} for s in SERVICE_MAP]
-API_URL = "https://sslslim.cmcitymedia.de/v1/{}/waste/{}/dates"
+API_URL = "http://slim.cmcitymedia.de/v1/{}/waste/{}/dates"
DATE_FORMAT = "%Y-%m-%d"
-
-class TLSAdapter(requests.adapters.HTTPAdapter):
- def init_poolmanager(self, *args, **kwargs):
- ctx = ssl.create_default_context()
- ctx.set_ciphers("DEFAULT@SECLEVEL=1")
- kwargs["ssl_context"] = ctx
- return super().init_poolmanager(*args, **kwargs)
-
-
class Source:
def __init__(self, hpid, realmid=None, district=None):
self.hpid = hpid
@@ -39,17 +30,13 @@ def __init__(self, hpid, realmid=None, district=None):
)
self.realmid = realmid if realmid else self.service["realm"]
self.district = district
- self.session = requests.Session()
- self.session.mount("https://", TLSAdapter())
def fetch(self):
entries = []
district_param = f"?district={self.district}" if self.district else ""
- result = self.session.get(
- API_URL.format(self.hpid, self.service["realm"]) + district_param
- )
+ result = requests.get(API_URL.format(self.hpid, self.service["realm"]) + district_param)
result.raise_for_status()
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/crawley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/crawley_gov_uk.py
new file mode 100644
index 00000000..d64a0459
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/crawley_gov_uk.py
@@ -0,0 +1,84 @@
+# Credit where it's due:
+# This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo
+# https://github.com/robbrad/UKBinCollectionData
+
+
+import re
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Crawley Borough Council (myCrawley)"
+DESCRIPTION = "Source for Crawley Borough Council (myCrawley)."
+URL = "https://crawley.gov.uk/"
+TEST_CASES = {
+ "Feroners Cl": {"uprn": "100061775179"},
+ "Peterborough Road": {"uprn": 100061787552, "usrn": 9700731},
+}
+
+
+ICON_MAP = {
+ "Rubbish and Small Electricals Collection": "mdi:trash-can",
+ "Glass": "mdi:bottle-soda",
+ "Bio": "mdi:leaf",
+ "Paper": "mdi:package-variant",
+ "Recycling and Textiles Collection": "mdi:recycle",
+}
+
+
+API_URL = "https://my.crawley.gov.uk/en/service/check_my_bin_collection"
+
+
+class Source:
+ def __init__(self, uprn: str | int, usrn: str | int | None = None):
+ self._uprn = str(uprn)
+ self._usrn = str(usrn) if usrn else None
+
+ def fetch(self):
+ today = datetime.now().date()
+ day = today.day
+ month = today.month
+ year = today.year
+
+ api_url = (
+ f"https://my.crawley.gov.uk/appshost/firmstep/self/apps/custompage/waste?language=en&uprn={self._uprn}"
+ f"&usrn={self._usrn}&day={day}&month={month}&year={year}"
+ )
+ response = requests.get(api_url)
+
+ soup = BeautifulSoup(response.text, features="html.parser")
+ soup.prettify()
+
+ entries = []
+
+ titles = [title.text.strip() for title in soup.select(".block-title")]
+ collection_tag = soup.body.find_all(
+ "div",
+ {"class": "col-md-6 col-sm-6 col-xs-6"},
+ string=re.compile("Next collection|Current or last collection"),
+ )
+
+ bin_index = 0
+ for tag in collection_tag:
+ for item in tag.next_elements:
+ if str(item).startswith('
'):
+ collection_date = datetime.strptime(
+ item.text + " " + str(year), "%A %d %B %Y"
+ ).date()
+ if collection_date < today and bin_index % 2 == 1:
+ collection_date = collection_date.replace(
+ year=collection_date.year + 1
+ )
+ entries.append(
+ Collection(
+ date=collection_date,
+ t=titles[bin_index // 2],
+ icon=ICON_MAP.get(titles[bin_index // 2]),
+ )
+ )
+ bin_index += 1
+ break
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
new file mode 100644
index 00000000..f8f39949
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
@@ -0,0 +1,92 @@
+import re
+from datetime import datetime, timedelta
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Dudley Metropolitan Borough Council"
+DESCRIPTION = "Source for Dudley Metropolitan Borough Council, UK."
+URL = "https://dudley.gov.uk"
+TEST_CASES = {
+ "Test_001": {"uprn": "90090715"},
+ "Test_002": {"uprn": 90104555},
+ "Test_003": {"uprn": "90164803"},
+ "Test_004": {"uprn": 90092621},
+}
+ICON_MAP = {"RECYCLING": "mdi:recycle", "GARDEN": "mdi:leaf", "REFUSE": "mdi:trash-can"}
+REGEX = r"(\d+ \w{3})"
+
+
+class Source:
+ def __init__(self, uprn: str | int):
+ self._uprn = str(uprn)
+
+ def check_date(self, d: str, t: datetime, y: int):
+ """
+ Get date, append year, and increment year if date is >1 month in the past.
+
+ This tries to deal year-end dates when the YEAR is missing
+ """
+ d += " " + str(y)
+ date = datetime.strptime(d, "%d %b %Y")
+ if (date - t) < timedelta(days=-31):
+ date = date.replace(year=date.year + 1)
+ return date.date()
+
+ def append_entries(self, d: datetime, w: str, e: list) -> list:
+ """
+ Append provided entry and Refuse entry for the same day.
+
+ Refuse is collected on the same dates as alternating Recycling/Garden collections,
+ so create two entries for each date Refuse & Recycling, or Refuse & Garden
+ """
+ e.append(
+ Collection(
+ date=d,
+ t=w,
+ icon=ICON_MAP.get(w.upper()),
+ )
+ )
+ e.append(
+ Collection(
+ date=d,
+ t="Refuse",
+ icon=ICON_MAP.get("REFUSE"),
+ )
+ )
+ return e
+
+ def fetch(self):
+
+ today = datetime.now()
+ today = today.replace(hour=0, minute=0, second=0, microsecond=0)
+ yr = int(today.year)
+
+ s = requests.Session()
+ r = s.get(
+ f"https://maps.dudley.gov.uk/?action=SetAddress&UniqueId={self._uprn}"
+ )
+ soup = BeautifulSoup(r.text, "html.parser")
+
+ panel = soup.find("div", {"aria-label": "Refuse and Recycling Collection"})
+ panel_data = panel.find("div", {"class": "atPanelData"})
+ panel_data = panel_data.text.split("Next")[
+ 1:
+ ] # remove first element it just contains general info
+
+ entries = []
+ for item in panel_data:
+ text = item.replace("\r\n", "").strip()
+ if "recycling" in text:
+ dates = re.findall(REGEX, text)
+ for dt in dates:
+ dt = self.check_date(dt, today, yr)
+ self.append_entries(dt, "Recycling", entries)
+ elif "garden" in text:
+ dates = re.findall(REGEX, text)
+ for dt in dates:
+ dt = self.check_date(dt, today, yr)
+ self.append_entries(dt, "Garden", entries)
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dunedin_govt_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dunedin_govt_nz.py
new file mode 100644
index 00000000..ab40f0e2
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dunedin_govt_nz.py
@@ -0,0 +1,127 @@
+import json
+import requests
+
+from datetime import datetime, timedelta
+from waste_collection_schedule import Collection
+
+TITLE = "Dunedin District Council"
+DESCRIPTION = "Source for Dunedin District Council Rubbish & Recycling collection."
+URL = "https://www.dunedin.govt.nz/"
+TEST_CASES = {
+ # "No Collection": {"address": "3 Farm Road West Berwick"}, # Useful for troubleshooting, elicits a "No Collection" response from website
+ "Calendar 1": {"address": "5 Bennett Road Ocean View"},
+ "Calendar 2": {"address": "2 Council Street Dunedin"},
+ "All Week": {"address": "118 High Street Dunedin"},
+ "Collection 'c'": {"address": "2 - 90 Harbour Terrace Dunedin"},
+}
+DAYS = {
+ "Monday" : 0,
+ "Tuesday": 1,
+ "Wednesday": 2,
+ "Thursday": 3,
+ "Friday": 4,
+ "Saturday": 5,
+ "Sunday": 6,
+}
+HEADERS = {
+ "user-agent": "Mozilla/5.0",
+}
+ICON_MAP = {
+ "BLACK BAG": "mdi:trash-can",
+ "BLUE BIN": "mdi:bottle-soda",
+ "YELLOW BIN": "mdi:recycle",
+}
+
+# _LOGGER = logging.getLogger(__name__)
+
+
+class Source:
+ def __init__(self, address):
+ self._address = str(address).replace(" ", "+").strip()
+
+ def fetch(self):
+ # Get collection json
+ s = requests.Session()
+ r = s.get(
+ f"https://www.dunedin.govt.nz/design/rubbish-and-collection-days-search/lookup/_nocache?query={self._address}",
+ headers=HEADERS
+ )
+ r_json = json.loads(r.text)[0]["attributes"]
+
+ # Work out the date of the next collection(s)
+ collection_dates = []
+ today = datetime.now().date()
+ if r_json["collectionDay"] == "No Collection":
+ raise Exception("No collection service for that address")
+ elif r_json["collectionDay"] == "Today":
+ collection_dates.append(today)
+ elif r_json["collectionDay"] == "Tomorrow":
+ collection_dates.append(today + timedelta(days=1))
+ elif r_json["collectionDay"] == "All Week": # assume this means weekdays only, not weekends
+ collection_date = today
+ counter = 0
+ while counter <= 7:
+ if collection_date.strftime("%A") in "Monday Tuesday Wednesday Thursday Friday":
+ collection_dates.append(collection_date)
+ collection_date = collection_date + timedelta(days=1)
+ counter +=1
+ else: # find date of next matching weekday
+ collection_date = today
+ while collection_date.strftime("%A") != r_json["collectionDay"]:
+ collection_date = collection_date + timedelta(days=1)
+ collection_dates.append(collection_date)
+
+ # Adjust dates impacted by public holidays
+ '''
+ Note: A json of the public holiday potentially impacting collections can be retrieved from:
+ https://www.dunedin.govt.nz/__data/assets/js_file/0005/875336/publicHolidayData.js
+ At the time of writing (2023), none of the listed public holidays impact collection days
+ so it's not known how to account for any impact on collection day/date.
+ '''
+
+ # Now work out which waste types need to be displayed
+ '''
+ Note: r_json["CurrentWeek"] contains the collection code for the current calendar week.
+ If the collection day hasn't passed, then the collection code should be correct.
+ If the collection occurred earlier in the week, the collection code needs
+ to be switched to next week's collection code.
+ The collection codes seem to translate to:
+ b -> Blue bin & Black bag
+ c -> Blue bin, Yellow bin & Black bag
+ y -> Yellow bin & Black bag
+ n -> Black bag
+ These are likely to change in 2024 when new waste types are introduced, see:
+ https://www.dunedin.govt.nz/council/council-projects/waste-futures/the-future-of-rubbish-and-recycling-in-dunedin
+ '''
+ if r_json["collectionDay"] != "All Week":
+ if today.weekday() > DAYS[r_json["collectionDay"]]: # collection should have happened
+ if r_json["CurrentWeek"] == "c": # not strictly needed, included for completeness
+ r_json["CurrentWeek"] = "c"
+ if r_json["CurrentWeek"] == "y":
+ r_json["CurrentWeek"] = "b"
+ elif r_json["CurrentWeek"] != "n" and r_json["CurrentWeek"] != "c":
+ r_json["CurrentWeek"] = "y"
+
+ waste_types = []
+ if r_json["CurrentWeek"] == "n":
+ waste_types.append("Black Bag")
+ elif r_json["CurrentWeek"] == "y":
+ waste_types.extend(("Black Bag", "Yellow Bin"))
+ elif r_json["CurrentWeek"] == "b":
+ waste_types.extend(("Black Bag", "Blue Bin"))
+ elif r_json["CurrentWeek"] == "c":
+ waste_types.extend(("Black Bag", "Yellow Bin", "Blue Bin"))
+
+ # Now build schedule
+ entries = []
+ for waste in waste_types:
+ for schedule in collection_dates:
+ entries.append(
+ Collection(
+ date = schedule,
+ t=waste,
+ icon=ICON_MAP.get(waste.upper()),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/durham_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/durham_gov_uk.py
new file mode 100644
index 00000000..e312c0d0
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/durham_gov_uk.py
@@ -0,0 +1,39 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Durham County Council"
+DESCRIPTION = "Source for Durham County Council, UK."
+URL = "https://durham.gov.uk"
+TEST_CASES = {
+ "Test_001": {"uprn": "100110414978"},
+ "Test_002": {"uprn": 100110427200},
+}
+ICON_MAP = {"RECYCLE": "mdi:recycle", "GARDEN": "mdi:leaf", "RUBBISH": "mdi:trash-can"}
+
+
+class Source:
+ def __init__(self, uprn: str | int):
+ self._uprn = str(uprn)
+
+ def fetch(self):
+ s = requests.Session()
+ r = s.get(f"https://www.durham.gov.uk/bincollections?uprn={self._uprn}")
+ soup = BeautifulSoup(r.text, "html.parser")
+
+ entries = []
+ for waste in ICON_MAP:
+ w = soup.find_all("tr", {"class": f"{waste.lower()}"})
+ for item in w:
+ x = item.find_all("td")
+ entries.append(
+ Collection(
+ date=datetime.strptime(x[-1].text, "%d %B %Y").date(),
+ t=x[0].text,
+ icon=ICON_MAP.get(waste),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/east_renfrewshire_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/east_renfrewshire_gov_uk.py
new file mode 100644
index 00000000..dc612648
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/east_renfrewshire_gov_uk.py
@@ -0,0 +1,129 @@
+import base64
+import json
+import re
+from urllib.parse import parse_qs, urlparse
+
+import requests
+from bs4 import BeautifulSoup
+from dateutil import parser
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "East Renfrewshire Council"
+DESCRIPTION = "Source for eastrenfrewshire.gov.uk services for East Renfrewshire"
+URL = "https://www.eastrenfrewshire.gov.uk"
+
+TEST_CASES = {
+ "Test_001": {"postcode": "G78 2TJ", "uprn": "131016859"},
+ "Test_002": {"postcode": "g775ar", "uprn": 131019331},
+ "Test_003": {"postcode": "g78 3er", "uprn": "000131020112"},
+}
+
+ICON_MAP = {
+ "Grey": "mdi:trash-can",
+ "Brown": "mdi:leaf",
+ "Green": "mdi:glass-fragile",
+ "Blue": "mdi:note",
+}
+
+
+class Source:
+ def __init__(self, postcode, uprn):
+ self._postcode = postcode
+ self._uprn = str(uprn).zfill(12)
+
+ def fetch(self):
+ session = requests.Session()
+ address_page = self.__get_address_page(session, self._postcode)
+ bin_collection_info_page = self.__get_bin_collection_info_page(
+ session, address_page, self._uprn
+ )
+ bin_collection_info = self.__get_bin_collection_info(bin_collection_info_page)
+ return self.__generate_collection_entries(bin_collection_info)
+
+ def __generate_collection_entries(self, bin_collection_info):
+ collection_results = bin_collection_info["residualWasteResponse"]["value"][
+ "collectionResults"
+ ]
+ entries = []
+ for collection in collection_results["binsOrderingArray"]:
+ for collection_date in collection["collectionDates"]:
+ entries.append(
+ Collection(
+ date=parser.parse(collection_date).date(),
+ t=collection["color"],
+ icon=ICON_MAP.get(collection["color"]),
+ )
+ )
+ return entries
+
+ def __get_bin_collection_info(self, bin_collection_info_page):
+ serialized_collection_info_pattern = re.compile(
+ r'var RESIDUALWASTEV2SerializedVariables = "(.*?)";$',
+ re.MULTILINE | re.DOTALL,
+ )
+ soup = BeautifulSoup(bin_collection_info_page, "html.parser")
+ script = soup.find("script", text=serialized_collection_info_pattern)
+ if not script:
+ raise Exception(
+ "no script tag cannot find RESIDUALWASTEV2SerializedVariables"
+ )
+ match = serialized_collection_info_pattern.search(script.text)
+ if not match:
+ raise Exception("no match cannot find RESIDUALWASTEV2SerializedVariables")
+ serialized_collection_info = match.group(1)
+ collection_info = json.loads(base64.b64decode(serialized_collection_info))
+ return collection_info
+
+ def __get_bin_collection_info_page(self, session, address_page, uprn):
+ soup = BeautifulSoup(address_page, "html.parser")
+ form = soup.find(id="RESIDUALWASTEV2_FORM")
+ goss_ids = self.__get_goss_form_ids(form["action"])
+ r = session.post(
+ form["action"],
+ data={
+ "RESIDUALWASTEV2_PAGESESSIONID": goss_ids["page_session_id"],
+ "RESIDUALWASTEV2_SESSIONID": goss_ids["session_id"],
+ "RESIDUALWASTEV2_NONCE": goss_ids["nonce"],
+ "RESIDUALWASTEV2_VARIABLES": "e30=",
+ "RESIDUALWASTEV2_PAGENAME": "PAGE2",
+ "RESIDUALWASTEV2_PAGEINSTANCE": "1",
+ "RESIDUALWASTEV2_PAGE2_FIELD201": "true",
+ "RESIDUALWASTEV2_PAGE2_UPRN": uprn,
+ "RESIDUALWASTEV2_FORMACTION_NEXT": "RESIDUALWASTEV2_PAGE2_FIELD206",
+ "RESIDUALWASTEV2_PAGE2_FIELD202": "false",
+ "RESIDUALWASTEV2_PAGE2_FIELD203": "false",
+ },
+ )
+ r.raise_for_status()
+ return r.text
+
+ def __get_address_page(self, s, postcode):
+ r = s.get("https://www.eastrenfrewshire.gov.uk/bin-days")
+ r.raise_for_status()
+ soup = BeautifulSoup(r.text, "html.parser")
+ form = soup.find(id="RESIDUALWASTEV2_FORM")
+ goss_ids = self.__get_goss_form_ids(form["action"])
+ r = s.post(
+ form["action"],
+ data={
+ "RESIDUALWASTEV2_PAGESESSIONID": goss_ids["page_session_id"],
+ "RESIDUALWASTEV2_SESSIONID": goss_ids["session_id"],
+ "RESIDUALWASTEV2_NONCE": goss_ids["nonce"],
+ "RESIDUALWASTEV2_VARIABLES": "e30=",
+ "RESIDUALWASTEV2_PAGENAME": "PAGE1",
+ "RESIDUALWASTEV2_PAGEINSTANCE": "0",
+ "RESIDUALWASTEV2_PAGE1_POSTCODE": postcode,
+ "RESIDUALWASTEV2_FORMACTION_NEXT": "RESIDUALWASTEV2_PAGE1_FIELD199",
+ },
+ )
+ r.raise_for_status()
+ return r.text
+
+ def __get_goss_form_ids(self, url):
+ parsed_form_url = urlparse(url)
+ form_url_values = parse_qs(parsed_form_url.query)
+ return {
+ "page_session_id": form_url_values["pageSessionId"][0],
+ "session_id": form_url_values["fsid"][0],
+ "nonce": form_url_values["fsn"][0],
+ }
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/eastriding_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/eastriding_gov_uk.py
index 6fd830a9..6edb299e 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/eastriding_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/eastriding_gov_uk.py
@@ -33,7 +33,7 @@ def fetch(self):
s = requests.Session()
# get api_key and licensee
- r1 = s.get("https://www.eastriding.gov.uk/environment/bins-rubbish-recycling/bins-and-collections/bin-collection-dates/")
+ r1 = s.get("https://www.eastriding.gov.uk/templates/eryc_corptranet/js/eryc-bin-checker.js")
api_key = re.findall(REGEX["API_KEY"], r1.text)[0]
licensee = re.findall(REGEX["LICENSEE"], r1.text)[0]
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/exeter_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/exeter_gov_uk.py
index b24744d2..185f1bf5 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/exeter_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/exeter_gov_uk.py
@@ -1,16 +1,16 @@
-import requests
import json
import re
+from datetime import datetime
+import requests
from bs4 import BeautifulSoup
-from datetime import datetime
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Exeter City Council"
DESCRIPTION = "Source for Exeter City services for Exeter City Council, UK."
URL = "https://exeter.gov.uk/"
TEST_CASES = {
- "Test_001": {"uprn": "10013049539"},
+ "Test_001": {"uprn": "100040227486"},
"Test_002": {"uprn": "10013043921"},
"Test_003": {"uprn": 10023120282},
"Test_004": {"uprn": 100040241022},
@@ -19,9 +19,10 @@
"REFUSE": "mdi:trash-can",
"RECYCLING": "mdi:recycle",
"GARDEN WASTE": "mdi:leaf",
- "FOOD WASTE": "mdi:food"
+ "FOOD WASTE": "mdi:food",
}
-REGEX_ORDINALS = r"(st|nd|rd|th) "
+REGEX_ORDINALS = r"(?<=[0-9])(?:st|nd|rd|th)"
+
class Source:
def __init__(self, uprn):
@@ -30,7 +31,9 @@ def __init__(self, uprn):
def fetch(self):
s = requests.Session()
- r = s.get(f"https://exeter.gov.uk/repositories/hidden-pages/address-finder/?qsource=UPRN&qtype=bins&term={self._uprn}")
+ r = s.get(
+ f"https://exeter.gov.uk/repositories/hidden-pages/address-finder/?qsource=UPRN&qtype=bins&term={self._uprn}"
+ )
json_data = json.loads(r.text)[0]["Results"]
soup = BeautifulSoup(json_data, "html.parser")
@@ -41,11 +44,12 @@ def fetch(self):
for (b, d) in zip(bins, dates):
entries.append(
Collection(
- date=datetime.strptime(re.compile(REGEX_ORDINALS).sub("",d.text),"%A, %d%B %Y").date(),
+ date=datetime.strptime(
+ re.compile(REGEX_ORDINALS).sub("", d.text), "%A, %d %B %Y"
+ ).date(),
t=b.text.replace(" collection", ""),
icon=ICON_MAP.get(b.text.replace(" collection", "").upper()),
)
)
-
return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/guildford_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/guildford_gov_uk.py
index b5130849..bebc9ec6 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/guildford_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/guildford_gov_uk.py
@@ -10,7 +10,7 @@
TEST_CASES = {
"GU12": {"uprn": "10007060305"},
"GU1": {"uprn": "100061398158"},
- "GU2": {"uprn": "100061391831"},
+ "GU2": {"uprn": 100061391831},
}
ICON_MAP = {
@@ -25,7 +25,7 @@
class Source:
def __init__(self, uprn):
- self._uprn = uprn
+ self._uprn = str(uprn)
def fetch(self):
# The API uses this framework cookie, which seems to last 2 weeks.
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/gwynedd_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/gwynedd_gov_uk.py
new file mode 100644
index 00000000..cee22eca
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/gwynedd_gov_uk.py
@@ -0,0 +1,56 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup, Tag
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Gwynedd"
+DESCRIPTION = "Source for Gwynedd."
+URL = "https://www.gwynedd.gov.uk/"
+TEST_CASES = {
+ "200003177805": {"uprn": 200003177805},
+ "200003175227": {"uprn": "200003175227"},
+ "10070340900": {"uprn": 10070340900},
+}
+
+
+ICON_MAP = {
+ "brown": "mdi:leaf",
+ "green": "mdi:trash-can",
+ "blue": "mdi:recycle",
+}
+
+
+API_URL = "https://diogel.gwynedd.llyw.cymru/Daearyddol/en/LleDwinByw/Index/{uprn}"
+
+
+class Source:
+ def __init__(self, uprn: str | int):
+ self._uprn: str | int = uprn
+
+ def fetch(self):
+ r = requests.get(API_URL.format(uprn=self._uprn))
+ r.raise_for_status()
+
+ soup = BeautifulSoup(r.text, "html.parser")
+ collections_headline = soup.find("h6", text="Next collection dates:")
+ if not isinstance(collections_headline, Tag):
+ raise Exception("Could not find collections")
+ collections = collections_headline.find_next("ul").find_all("li")
+
+ entries = []
+
+ for collection in collections:
+ if not isinstance(collection, Tag):
+ continue
+ for p in collection.find_all("p"):
+ p.extract()
+
+ bin_type, date_str = collection.text.strip().split(":")[:2]
+ bin_type, date_str = bin_type.strip(), date_str.strip()
+
+ date = datetime.strptime(date_str, "%A %d/%m/%Y").date()
+ icon = ICON_MAP.get(bin_type.split(" ")[0].lower()) # Collection icon
+ entries.append(Collection(date=date, t=bin_type, icon=icon))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/hausmuell_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/hausmuell_info.py
index 81593e08..7ad08483 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/hausmuell_info.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/hausmuell_info.py
@@ -1,8 +1,9 @@
+from datetime import datetime
+
import requests
+from bs4 import BeautifulSoup
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
-from bs4 import BeautifulSoup
-from datetime import datetime
TITLE = "hausmüll.info"
DESCRIPTION = "Source for hausmüll.info."
@@ -13,7 +14,7 @@
"ort": "Dietzhausen",
"strasse": "Am Rain",
"hausnummer": 10,
- "subdomain": "ebkds"
+ "subdomain": "ebkds",
},
"Adam-Ries-Straße 5, Erfurt": {
"subdomain": "erfurt",
@@ -28,6 +29,17 @@
"subdomain": "schmalkalden-meiningen",
"ort": "Dillstädt",
},
+ "schmalkalden-meiningen Zella-Mehils Benshausen Albrechter Straße": {
+ "subdomain": "schmalkalden-meiningen",
+ "ort": "Zella-Mehlis",
+ "ortsteil": "Benshausen",
+ "strasse": "Albrechtser Straße",
+ },
+ "schmalkalden-meiningen Breitungen, Bußhof": {
+ "subdomain": "schmalkalden-meiningen",
+ "ort": "Breitungen",
+ "ortsteil": "Bußhof",
+ },
"ew, Döringsdorf, Wanfrieder Str.": {
"subdomain": "ew",
"ort": "Döringsdorf",
@@ -78,82 +90,110 @@
"papier": "mdi:package-variant",
"papier, pappe, karton": "mdi:package-variant",
"papier, pappe & kart.": "mdi:package-variant",
+ "pappe, papier & kart.": "mdi:package-variant",
"altpapier": "mdi:package-variant",
"gelbe tonne": "mdi:recycle",
"gelber sack": "mdi:recycle",
"gelber sack / gelbe tonne": "mdi:recycle",
"leichtverpackungen": "mdi:recycle",
"leichtstoffverpackungen": "mdi:recycle",
- "Grünschnitt": "mdi:tree",
+ "grünschnitt": "mdi:tree",
+ "schadstoffe": "mdi:biohazard",
+ "schadstoffmobil": "mdi:biohazard",
+ "problemmüll": "mdi:biohazard",
}
SUPPORTED_PROVIDERS = [
{
"subdomain": "ebkds",
"title": "Eigenbetrieb Kommunalwirtschaftliche Dienstleistungen Suhl",
- "url": "https://www.ebkds.de/"
+ "url": "https://www.ebkds.de/",
},
{
"subdomain": "erfurt",
"title": "Stadtwerke Erfurt, SWE",
- "url": "https://www.stadtwerke-erfurt.de/"
+ "url": "https://www.stadtwerke-erfurt.de/",
},
{
"subdomain": "schmalkalden-meiningen",
"title": "Kreiswerke Schmalkalden-Meiningen GmbH",
- "url": "https://www.kwsm.de/"
+ "url": "https://www.kwsm.de/",
},
{
"subdomain": "ew",
"title": "Eichsfeldwerke GmbH",
- "url": "https://www.eichsfeldwerke.de/"
+ "url": "https://www.eichsfeldwerke.de/",
},
{
"subdomain": "azv",
"title": "Abfallwirtschaftszweckverband Wartburgkreis (AZV)",
- "url": "https://www.azv-wak-ea.de/"
+ "url": "https://www.azv-wak-ea.de/",
},
{
"subdomain": "boerde",
"title": "Landkreis Börde AöR (KsB)",
- "url": "https://www.ks-boerde.de"
+ "url": "https://www.ks-boerde.de",
},
{
"subdomain": "asc",
"title": "Chemnitz (ASR)",
- "url": "https://www.asr-chemnitz.de/"
- },
- {
- "subdomain": "wesel",
- "title": "ASG Wesel",
- "url": "https://www.asg-wesel.de/"
+ "url": "https://www.asr-chemnitz.de/",
},
+ {"subdomain": "wesel", "title": "ASG Wesel", "url": "https://www.asg-wesel.de/"},
]
-EXTRA_INFO = [{"title": p["title"], "url": p["url"], "country": "de"}
- for p in SUPPORTED_PROVIDERS]
+EXTRA_INFO = [
+ {"title": p["title"], "url": p["url"], "country": "de"} for p in SUPPORTED_PROVIDERS
+]
API_URL = "https://{}.hausmuell.info/"
def replace_special_chars(s: str) -> str:
- return s.replace("ß", "s").replace("ä", "a").replace("ö", "o").replace("ü", "u").replace("Ä", "A").replace("Ö", "O").replace("Ü", "U")
-
-
-def replace_special_chars_args(d: dict) -> dict:
+ return (
+ s.replace("ß", "s")
+ .replace("ä", "a")
+ .replace("ö", "o")
+ .replace("ü", "u")
+ .replace("Ä", "A")
+ .replace("Ö", "O")
+ .replace("Ü", "U")
+ )
+
+
+def replace_special_chars_with_underscore(s: str) -> str:
+ return (
+ s.replace("ß", "_")
+ .replace("ä", "_")
+ .replace("ö", "_")
+ .replace("ü", "_")
+ .replace("Ä", "_")
+ .replace("Ö", "_")
+ .replace("Ü", "_")
+ )
+
+
+def replace_special_chars_args(d: dict, replace_func=replace_special_chars) -> dict:
to_return = {}
for k, v in d.items():
if isinstance(v, list):
- to_return[k] = [replace_special_chars(i) for i in v]
+ to_return[k] = [replace_func(i) for i in v]
else:
- to_return[k] = replace_special_chars(v)
+ to_return[k] = replace_func(v)
return to_return
class Source:
- def __init__(self, subdomain: str, ort: str | None = None, ortsteil: str | None = None, strasse: str | None = None, hausnummer: str | int | None = None):
+ def __init__(
+ self,
+ subdomain: str,
+ ort: str | None = None,
+ ortsteil: str | None = None,
+ strasse: str | None = None,
+ hausnummer: str | int | None = None,
+ ):
self._ort: str = ort if ort else ""
self._strasse: str = strasse if strasse else ""
self._hausnummer: str = str(hausnummer) if hausnummer else ""
@@ -180,6 +220,44 @@ def _get_elemts(self, response_text: str) -> list[str]:
to_return = [i.strip() for i in ids if i.strip().isdigit()]
return to_return
+ def request_all(
+ self, url: str, data: dict, params: dict, error_message: str
+ ) -> requests.Response:
+ """Request url with data if not successful retry with different kinds of replaced special chars.
+
+ Args:
+ url (str): url to request
+ data (dict): data to send
+ params (dict): params to send
+ error_message (str): error message to raise if all requests fail
+
+ Raises:
+ Exception: if all requests fail
+
+ Returns:
+ requests.Response: the successful response
+ """
+ r = requests.post(url, data=data, params=params)
+ if "kein Eintrag gefunden" not in r.text and not "
" == r.text.strip():
+ return r
+ r = requests.post(
+ url,
+ data=replace_special_chars_args(data),
+ params=replace_special_chars_args(params),
+ )
+ if "kein Eintrag gefunden" not in r.text and not "
" == r.text.strip():
+ return r
+ r = requests.post(
+ url,
+ data=replace_special_chars_args(
+ data, replace_special_chars_with_underscore
+ ),
+ params=replace_special_chars_args(params),
+ )
+ if "kein Eintrag gefunden" not in r.text and not "
" == r.text.strip():
+ return r
+ raise Exception(error_message)
+
def fetch(self):
args = {
"hidden_kalenderart": "privat",
@@ -207,16 +285,12 @@ def fetch(self):
if self._ort:
args["input"] = self._ort
- r = requests.post(self._search_url+"search_orte.php", data=args,
- params={"input": self._ort, "ort_id": "0"})
-
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip() or "
" == r.text.strip():
- r = requests.post(self._search_url+"search_orte.php", data=replace_special_chars_args(args),
- params={"input": replace_special_chars(self._ort), "ort_id": "0"})
-
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- raise ValueError("Ort provided but not found in search results.")
-
+ r = self.request_all(
+ self._search_url + "search_orte.php",
+ args,
+ {"input": self._ort, "ort_id": "0"},
+ "Ort provided but not found in search results.",
+ )
ids = self._get_elemts(r.text)
args["hidden_id_ort"] = args["ort_id"] = ids[0]
@@ -224,29 +298,28 @@ def fetch(self):
args["hidden_id_egebiet"] = ids[1]
if self._ortsteil:
- r = requests.post(self._search_url+"search_ortsteile.php", data=args,
- params={"input": self._ortsteil, "ort_id": args["ort_id"]})
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- r = requests.post(self._search_url+"search_ortsteile.php", data=replace_special_chars_args(args),
- params={"input": replace_special_chars(self._ortsteil), "ort_id": args["ort_id"]})
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- raise ValueError("Ortsteil provided but not found in search results.")
-
+ r = self.request_all(
+ self._search_url + "search_ortsteile.php",
+ args,
+ {"input": self._ortsteil, "ort_id": args["ort_id"]},
+ "Ortsteil provided but not found in search results.",
+ )
+
ids = self._get_elemts(r.text)
- args["hidden_id_ortsteil"] = args["hidden_id_ort"] if ids[0] == "0" else ids[0]
+
+ args["ort_id"] = args["hidden_id_ortsteil"] = (
+ args["hidden_id_ort"] if ids[0] == "0" else ids[0]
+ )
if len(ids) > 1:
args["hidden_id_egebiet"] = ids[1]
if self._strasse:
- r = requests.post(self._search_url + "search_strassen.php", data=args, params={
- "input": self._strasse, "str_id": "0", "ort_id": args["ort_id"]})
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- r = requests.post(self._search_url + "search_strassen.php", data=replace_special_chars_args(args), params={
- "input": replace_special_chars(self._strasse), "str_id": "0", "ort_id": args["hidden_id_ort"]})
-
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- raise ValueError("strasse provided but not found in search results.")
-
+ r = self.request_all(
+ self._search_url + "search_strassen.php",
+ args,
+ {"input": self._strasse, "str_id": "0", "ort_id": args["ort_id"]},
+ "Strasse provided but not found in search results.",
+ )
ids = self._get_elemts(r.text)
args["hidden_id_str"] = args["str_id"] = ids[0]
if len(ids) > 1:
@@ -254,38 +327,38 @@ def fetch(self):
if self._hausnummer:
args["input"] = self._hausnummer
- r = requests.post(self._search_url+"search_hnr.php", data=args, params={
- "input": self._hausnummer, "hnr_id": "0", "str_id": args["str_id"]})
-
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- r = requests.post(self._search_url+"search_hnr.php",
- data=replace_special_chars_args(args))
-
- if "kein Eintrag gefunden" in r.text or "
" == r.text.strip():
- raise ValueError("hausnummer provided but not found in search results.")
+ r = self.request_all(
+ self._search_url + "search_hnr.php",
+ args,
+ {"input": self._hausnummer, "hnr_id": "0", "str_id": args["str_id"]},
+ "hausnummer provided but not found in search results.",
+ )
ids = self._get_elemts(r.text)
args["hidden_id_hnr"] = args["hnr_id"] = ids[0]
if len(ids) > 1:
args["hidden_id_egebiet"] = ids[1]
-
- r = requests.post(self._search_url+"check_zusatz.php", data=args)
+ r = requests.post(self._search_url + "check_zusatz.php", data=args)
id_string = BeautifulSoup(r.text, "html.parser").find("span")
- args["hidden_id_zusatz"] = args["hidden_id_hnr"] if id_string == None else id_string.text.strip()
+ args["hidden_id_zusatz"] = (
+ args["hidden_id_hnr"] if id_string is None else id_string.text.strip()
+ )
r = requests.post(self._ics_url, data=args)
r.raise_for_status()
if "Bitte geben Sie Ihre Daten korrekt an." in r.text:
- raise ValueError(
- "No Valid response, please check your configuration.")
+ raise ValueError("No Valid response, please check your configuration.")
r.encoding = "utf-8"
dates = self._ics.convert(r.text)
entries = []
for d in dates:
- bin_type = d[1].replace("Entsorgung:","").strip()
- entries.append(Collection(d[0], bin_type, ICON_MAP.get(bin_type.lower())))
+ bin_type = d[1].replace("ü", "ü").replace("Entsorgung:", "").strip()
+ icon = ICON_MAP.get(
+ bin_type.lower().replace("verschobene abholung:", "").strip()
+ )
+ entries.append(Collection(d[0], bin_type, icon))
return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/hcc_govt_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/hcc_govt_nz.py
new file mode 100644
index 00000000..e80652c3
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/hcc_govt_nz.py
@@ -0,0 +1,47 @@
+import datetime
+
+import requests
+from waste_collection_schedule import Collection
+
+TITLE = "Hamilton City Council"
+DESCRIPTION = "Source script for Hamilton City Council"
+URL = "https://www.fightthelandfill.co.nz/"
+TEST_CASES = {
+ "1 Hamilton Parade": {"address": "1 Hamilton Parade"},
+ "221b fox Street": {"address": "221b fox Street"},
+}
+
+API_URL = "https://api.hcc.govt.nz/FightTheLandFill/get_Collection_Dates"
+ICON_MAP = {"Rubbish": "mdi:trash-can", "Recycling": "mdi:recycle"}
+
+
+class Source:
+ def __init__(self, address):
+ self.address = address
+
+ def fetch(self):
+ r = requests.get(
+ API_URL,
+ params={"address_string": self.address},
+ )
+ json = r.json()
+
+ # Extract entries from RedBin/YellowBin fields
+ entries = [
+ Collection(
+ date=datetime.datetime.strptime(
+ json[0]["RedBin"], "%Y-%m-%dT%H:%M:%S"
+ ).date(),
+ t="Rubbish",
+ icon=ICON_MAP.get("Rubbish"),
+ ),
+ Collection(
+ date=datetime.datetime.strptime(
+ json[0]["YellowBin"], "%Y-%m-%dT%H:%M:%S"
+ ).date(),
+ t="Recycling",
+ icon=ICON_MAP.get("Recycling"),
+ ),
+ ]
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/innerwest_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/innerwest_nsw_gov_au.py
index cf3888ad..feccce39 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/innerwest_nsw_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/innerwest_nsw_gov_au.py
@@ -8,14 +8,31 @@
DESCRIPTION = "Source for Inner West Council (NSW) rubbish collection."
URL = "https://www.innerwest.nsw.gov.au"
TEST_CASES = {
- "Random address": {
+ "Random Marrickville address": {
"suburb": "Tempe",
"street_name": "Princes Highway",
"street_number": "810",
- }
+ },
+ "Random Leichhardt address": {
+ "suburb": "Rozelle",
+ "street_name": "Darling Street",
+ "street_number": "599",
+ },
+ "Random Ashfield address": {
+ "suburb": "Summer Hill",
+ "street_name": "Lackey Street",
+ "street_number": "29",
+ },
}
HEADERS = {"user-agent": "Mozilla/5.0"}
+# Inner West council merged 3 existing councils, but still hasn't merged their
+# data so details need to be found from one of three different databases.
+APIS = [
+ "https://marrickville.waste-info.com.au/api/v1",
+ "https://leichhardt.waste-info.com.au/api/v1",
+ "https://ashfield.waste-info.com.au/api/v1",
+]
class Source:
@@ -31,17 +48,18 @@ def fetch(self):
property_id = 0
today = date.today()
nextmonth = today + timedelta(30)
-
- # Retrieve suburbs
- r = requests.get(
- "https://marrickville.waste-info.com.au/api/v1/localities.json", headers=HEADERS
- )
- data = json.loads(r.text)
-
- # Find the ID for our suburb
- for item in data["localities"]:
- if item["name"] == self.suburb:
- suburb_id = item["id"]
+ council_api = ""
+
+ # Retrieve suburbs and council API
+ for api in APIS:
+ r = requests.get(f"{api}/localities.json", headers=HEADERS)
+ data = json.loads(r.text)
+ for item in data["localities"]:
+ if item["name"] == self.suburb:
+ council_api = api
+ suburb_id = item["id"]
+ break
+ if council_api:
break
if suburb_id == 0:
@@ -49,7 +67,7 @@ def fetch(self):
# Retrieve the streets in our suburb
r = requests.get(
- f"https://marrickville.waste-info.com.au/api/v1/streets.json?locality={suburb_id}",
+ f"{council_api}/streets.json?locality={suburb_id}",
headers=HEADERS,
)
data = json.loads(r.text)
@@ -65,7 +83,7 @@ def fetch(self):
# Retrieve the properties in our street
r = requests.get(
- f"https://marrickville.waste-info.com.au/api/v1/properties.json?street={street_id}",
+ f"{council_api}/properties.json?street={street_id}",
headers=HEADERS,
)
data = json.loads(r.text)
@@ -81,7 +99,7 @@ def fetch(self):
# Retrieve the upcoming collections for our property
r = requests.get(
- f"https://marrickville.waste-info.com.au/api/v1/properties/{property_id}.json?start={today}&end={nextmonth}",
+ f"{council_api}/properties/{property_id}.json?start={today}&end={nextmonth}",
headers=HEADERS,
)
@@ -94,7 +112,7 @@ def fetch(self):
collection_date = date.fromisoformat(item["start"])
if (collection_date - today).days >= 0:
# Only consider recycle and organic events
- if item["event_type"] in ["recycle","organic"]:
+ if item["event_type"] in ["recycle", "organic"]:
# Every collection day includes rubbish
entries.append(
Collection(
@@ -104,7 +122,9 @@ def fetch(self):
if item["event_type"] == "recycle":
entries.append(
Collection(
- date=collection_date, t="Recycling", icon="mdi:recycle"
+ date=collection_date,
+ t="Recycling",
+ icon="mdi:recycle",
)
)
if item["event_type"] == "organic":
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/iris_salten_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/iris_salten_no.py
new file mode 100644
index 00000000..6fe7415b
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/iris_salten_no.py
@@ -0,0 +1,77 @@
+import datetime
+
+import requests
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "IRiS"
+DESCRIPTION = "Source for IRiS."
+URL = "https://www.iris-salten.no"
+TEST_CASES = {
+ "Alsosgården 11, bodø": {"address": "Alsosgården 11", "kommune": "bodø"},
+ "Kalnesveien 3, Fauske kommune": {
+ "address": "kAlnesveien 3",
+ "kommune": "fAuske kommune",
+ },
+}
+
+
+ICON_MAP = {
+ "generalwaste": "mdi:trash-can",
+ "glass_metal": "mdi:bottle-soda",
+ "food": "mdi:food-apple",
+ "paper": "mdi:package-variant",
+ "plastic": "mdi:recycle",
+}
+
+
+API_URL = "https://www.iris-salten.no/tommekalender/"
+
+
+class Source:
+ def __init__(self, address: str, kommune: str):
+ self._address: str = address.lower().strip()
+ self._kommune: str = kommune.lower().strip()
+
+ def fetch(self):
+ s = requests.Session()
+ r = s.get(
+ "https://www.iris-salten.no/xmlhttprequest.php",
+ params={"service": "irisapi.realestates", "address": self._address},
+ )
+ r.raise_for_status()
+ responses = r.json()
+ if len(responses) == 0:
+ raise ValueError("No address found")
+ if len(responses) == 1:
+ response = responses[0]
+ else:
+ response = responses[0]
+ for r in responses:
+ if self._kommune in r["kommune"] or r["kommune"] in self._kommune:
+ response = r
+ break
+
+ r = s.get(
+ "https://www.iris-salten.no/xmlhttprequest.php",
+ params={
+ "service": "irisapi.setestate",
+ "estateid": response["id"],
+ "estatename": response["adresse"],
+ "estatemunicipality": response["kommune"],
+ },
+ )
+ r.raise_for_status()
+ r = s.get(
+ "https://www.iris-salten.no/xmlhttprequest.php?service=irisapi.estateempty"
+ )
+ r.raise_for_status()
+
+ entries = []
+ for d in r.json()["days"]:
+ date = datetime.datetime.strptime(d["date"], "%Y-%m-%d").date()
+ for event in d["events"]:
+ icon = ICON_MAP.get(event["fractionIcon"]) # Collection icon
+ type = event["fractionName"]
+ entries.append(Collection(date=date, t=type, icon=icon))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/iweb_itouchvision_com.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/iweb_itouchvision_com.py
new file mode 100644
index 00000000..4c44f6b0
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/iweb_itouchvision_com.py
@@ -0,0 +1,243 @@
+# Credit where it's due:
+# This is predominantly a refactoring of the Somerset Council script from the UKBinCollectionData repo
+# https://github.com/robbrad/UKBinCollectionData
+
+import json
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "iTouchVision"
+URL = "https://iweb.itouchvision.com/"
+COUNTRY = "uk"
+EXTRA_INFO = [
+ {
+ "title": "Somerset Council",
+ "url": "https://www.somerset.gov.uk/",
+ "country": "uk",
+ },
+ {
+ "title": "South Somerset District Council",
+ "url": "https://www.somerset.gov.uk/",
+ "country": "uk",
+ },
+ {
+ "title": "Mendip District Council",
+ "url": "https://www.somerset.gov.uk/",
+ "country": "uk",
+ },
+ {
+ "title": "Sedgemoor District Council",
+ "url": "https://www.somerset.gov.uk/",
+ "country": "uk",
+ },
+ {
+ "title": "Somerset West & Taunton District Council",
+ "url": "https://www.somerset.gov.uk/",
+ "country": "uk",
+ },
+ {
+ "title": "Somerset County Council",
+ "url": "https://www.somerset.gov.uk/",
+ "country": "uk",
+ },
+ {
+ "title": "Test Valley Borough Council",
+ "url": "https://www.testvalley.gov.uk/",
+ "country": "uk",
+ },
+]
+DESCRIPTION = """Consolidated source for waste collection services from:
+ Somerset Council, comprising four former District Councils (Mendip, Sedgemoor, Somerset West & Taunton, South Somerset) and Somerset County Council
+ Test Valley Borough Council
+ """
+HEADERS = {
+ "user-agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36",
+}
+URLS = {
+ "TEST_VALLEY": "https://iweb.itouchvision.com/portal/f?p=customer:BIN_DAYS:::NO:RP:UID:13353F039C4B1454827EE05536414091A8C058F4",
+ "SOMERSET": "https://iweb.itouchvision.com/portal/f?p=customer:BIN_DAYS:::NO:RP:UID:625C791B4D9301137723E9095361401AE8C03934",
+ "FLOW.ACCEPT": "https://iweb.itouchvision.com/portal/wwv_flow.accept",
+ "BIN_DAYS": "https://iweb.itouchvision.com/portal/itouchvision/r/customer/bin_days",
+}
+KEYLISTS = {
+ "POSTCODE_1": [
+ "P153_UPRN",
+ "P153_TEMP",
+ "P153_SYSDATE",
+ "P0_LANGUAGE",
+ "P153_POST_CODE",
+ ],
+ "POSTCODE_2": [
+ "p_flow_id",
+ "p_flow_step_id",
+ "p_instance",
+ "p_page_submission_id",
+ "p_request",
+ "p_reload_on_submit",
+ ],
+ "ADDRESS_1": ["P153_UPRN", "P153_TEMP", "P153_SYSDATE", "P0_LANGUAGE"],
+ "ADDRESS_2": [
+ "p_flow_id",
+ "p_flow_step_id",
+ "p_instance",
+ "p_page_submission_id",
+ "p_request",
+ "p_reload_on_submit",
+ ],
+}
+TEST_CASES = {
+ "Somerset #1": {"postcode": "TA20 2JG", "uprn": "30071283", "council": "SOMERSET"},
+ "Somerset #2": {"postcode": "BA9 9NF", "uprn": "30002380", "council": "SOMERSET"},
+ "Somerset #3": {"postcode": "TA24 7JE", "uprn": 10023837109, "council": "SOMERSET"},
+ "Test Valley #1": {
+ "postcode": "SP10 3JB",
+ "uprn": "100060559598",
+ "council": "TEST_VALLEY",
+ },
+ "Test Valley #2": {
+ "postcode": "SO20 6EJ",
+ "uprn": "100060583697",
+ "council": "TEST_VALLEY",
+ },
+ "Test Valley #3": {
+ "postcode": "SO51 5BE",
+ "uprn": 100060571645,
+ "council": "TEST_VALLEY",
+ },
+}
+ICON_MAP = {
+ "GARDEN": "mdi:leaf",
+ "RECYCLING": "mdi:recycle",
+ "REFUSE": "mdi:trash-can",
+ "HOUSEHOLD WASTE": "mdi:trash-can",
+ "GARDEN WASTE": "mdi:leaf",
+}
+
+
+class Source:
+ def __init__(self, council, postcode, uprn):
+ self._postcode = postcode.upper().strip()
+ self._uprn = str(uprn)
+ self._council = council.upper()
+
+ def get_payloads(self, s):
+ p1 = {i["name"]: i.get("value", "") for i in s.select("input[name]")}
+ p2 = {i["data-for"]: i.get("value", "") for i in s.select("input[data-for]")}
+ ps = s.select_one('input[id="pSalt"]').get("value")
+ pp = s.select_one('input[id="pPageItemsProtected"]').get("value")
+ return p1, p2, ps, pp
+
+ def fetch(self):
+ s = requests.Session()
+ s.headers.update(HEADERS)
+
+ # Get postcode search page
+ r0 = s.get(URLS[self._council])
+ # Extract values needed for the postcode search
+ soup = BeautifulSoup(r0.text, "html.parser")
+ payload1, payload2, payload_salt, payload_protected = self.get_payloads(soup)
+ payload1["p_request"] = "SEARCH"
+ payload1["P153_POST_CODE"] = self._postcode
+
+ # Build JSON for postcode search
+ merged_list = {**payload1, **payload2}
+ new_list = []
+ other_list = {}
+ for key in merged_list.keys():
+ temp_list = {}
+ val = merged_list[key]
+ if key in KEYLISTS["POSTCODE_1"]:
+ temp_list = {"n": key, "v": val}
+ new_list.append(temp_list)
+ elif key in KEYLISTS["POSTCODE_2"]:
+ other_list[key] = val
+ else:
+ temp_list = {"n": key, "v": "", "ck": val}
+ new_list.append(temp_list)
+ json_builder = {
+ "pageItems": {
+ "itemsToSubmit": new_list,
+ "protected": payload_protected,
+ "rowVersion": "",
+ "formRegionChecksums": [],
+ },
+ "salt": payload_salt,
+ }
+ json_object = json.dumps(json_builder, separators=(",", ":"))
+ other_list["p_json"] = json_object
+
+ # Update header and submit postcode search
+ s.headers.update(
+ {
+ "referer": URLS[self._council],
+ }
+ )
+ s.post(URLS["FLOW.ACCEPT"], data=other_list)
+
+ # Get address selection page
+ r2 = s.get(URLS["BIN_DAYS"])
+ # Extract values needed for address selection
+ soup = BeautifulSoup(r2.text, "html.parser")
+ payload1, payload2, payload_salt, payload_protected = self.get_payloads(soup)
+ payload1["p_request"] = "SUBMIT"
+ payload1["P153_UPRN"] = self._uprn
+
+ # Build JSON for address selection
+ merged_list = {**payload1, **payload2}
+ new_list = []
+ other_list = {}
+ for key in merged_list.keys():
+ temp_list = {}
+ val = merged_list[key]
+ if key in KEYLISTS["ADDRESS_1"]:
+ temp_list = {"n": key, "v": val}
+ new_list.append(temp_list)
+ elif key in ["P153_ZABY"]:
+ temp_list = {"n": key, "v": "1", "ck": val}
+ new_list.append(temp_list)
+ elif key in ["P153_POST_CODE"]:
+ temp_list = {"n": key, "v": self._postcode, "ck": val}
+ new_list.append(temp_list)
+ elif key in KEYLISTS["ADDRESS_2"]:
+ other_list[key] = val
+ else:
+ temp_list = {"n": key, "v": "", "ck": val}
+ new_list.append(temp_list)
+ json_builder = {
+ "pageItems": {
+ "itemsToSubmit": new_list,
+ "protected": payload_protected,
+ "rowVersion": "",
+ "formRegionChecksums": [],
+ },
+ "salt": payload_salt,
+ }
+ json_object = json.dumps(json_builder, separators=(",", ":"))
+ other_list["p_json"] = json_object
+
+ # Submit address selection
+ s.post(URLS["FLOW.ACCEPT"], data=other_list)
+
+ # Finally, get the collection schedule page
+ r4 = s.get(URLS["BIN_DAYS"])
+ soup = BeautifulSoup(r4.text, "html.parser")
+ entries = []
+ for item in soup.select(".t-MediaList-item"):
+ for value in item.select(".t-MediaList-body"):
+ waste_type = value.select("span")[1].get_text(strip=True).title()
+ waste_date = datetime.strptime(
+ value.select(".t-MediaList-desc")[0].get_text(strip=True),
+ "%A, %d %B, %Y",
+ ).date()
+ entries.append(
+ Collection(
+ date=waste_date,
+ t=waste_type,
+ icon=ICON_MAP.get(waste_type.upper()),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/jointwastesolutions_org.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/jointwastesolutions_org.py
new file mode 100644
index 00000000..bf5193eb
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/jointwastesolutions_org.py
@@ -0,0 +1,141 @@
+# Credit where it's due:
+# This is predominantly a refactoring of the Woking Borough Council script from the UKBinCollectionData repo
+# https://github.com/robbrad/UKBinCollectionData
+
+import re
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "Joint Waste Solutions"
+URL = "https://www.jointwastesolutions.org/"
+COUNTRY = "uk"
+EXTRA_INFO = [
+ {
+ "title": "Woking Borough Council",
+ "url": "https://www.woking.gov.uk",
+ "country": "uk",
+ },
+ {
+ "title": "Surrey Heath Borough Council",
+ "url": "https://www.surreyheath.gov.uk",
+ "country": "uk",
+ },
+]
+DESCRIPTION = "Manages Waste and Recycling services for Elmbridge, Mole Valley, Surrey Heath & Woking"
+TEST_CASES = {
+ "Test Woking #1": {
+ "house": "4",
+ "postcode": "GU21 4PQ",
+ },
+ "Test Woking #2": {
+ "house": 9,
+ "postcode": "GU22 8RW",
+ },
+ "Test Woking #3": {
+ "house": "49",
+ "postcode": "GU22 0AY",
+ },
+ "Test Woking #4": {
+ "house": 5,
+ "postcode": "GU21 4HW",
+ "borough": "woking",
+ },
+ "surrey heath #1": {
+ "house": "1",
+ "postcode": "GU15 1JT",
+ "borough": "surreyheath",
+ },
+}
+
+ICON_MAP = {
+ "RUBBISH": "mdi:trash-can",
+ "RECYCLING": "mdi:recycle",
+ "GARDEN": "mdi:leaf",
+ "BATTERIES-SMALL ELECTRICALS-TEXTILES": "mdi:battery",
+ "FOOD WASTE": "mdi:food",
+}
+REGEX = r"(\d+\/\d+\/\d+\/[\d\w]+)"
+
+
+class Source:
+ def __init__(self, house, postcode, borough="woking"):
+ self._house = str(house).upper().strip()
+ self._postcode = postcode.upper().replace(" ", "+").strip()
+ self._borough = borough.lower().strip()
+
+ def fetch(self):
+ s = requests.Session()
+
+ # Load landing page and extract tracking ID needed for subsequent requests
+ r0 = s.get(
+ f"https://asjwsw-wrp{self._borough}municipal-live.whitespacews.com/#!",
+ )
+ trackingID = re.findall(REGEX, r0.text)[0]
+
+ # Load search form
+ s.get(
+ f"https://asjwsw-wrp{self._borough}municipal-live.whitespacews.com/?Track={trackingID}&serviceID=A&seq=1#!",
+ )
+
+ # These headers seem to be required for subsequent queries
+ headers = {
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+ "Accept-Language": "en-GB,en;q=0.9",
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Origin": f"https://asjwsw-wrp{self._borough}municipal-live.whitespacews.com",
+ "Pragma": "no-cache",
+ "Referer": f"https://asjwsw-wrp{self._borough}municipal-live.whitespacews.com/",
+ "Sec-Fetch-Dest": "document",
+ "Sec-Fetch-Mode": "navigate",
+ "Sec-Fetch-Site": "same-origin",
+ "Sec-Fetch-User": "?1",
+ "Upgrade-Insecure-Requests": "1",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 OPR/98.0.0.0",
+ "sec-ch-ua": '"Chromium";v="112", "Not_A Brand";v="24", "Opera GX";v="98"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
+ }
+ # Supply search parameters
+ payload = {
+ "address_name_number": self._house,
+ "address_street": "",
+ "street_town": "",
+ "address_postcode": self._postcode,
+ }
+
+ # Post address search
+ s.post(
+ f"https://asjwsw-wrp{self._borough}municipal-live.whitespacews.com/mop.php?serviceID=A&Track={trackingID}&seq=2",
+ headers=headers,
+ params=payload,
+ )
+ # Now retrieve schedule
+ r3 = s.get(
+ f"https://asjwsw-wrp{self._borough}municipal-live.whitespacews.com/mop.php?Track={trackingID}&serviceID=A&seq=3&pIndex=1",
+ headers=headers,
+ )
+
+ # Extract dates and waste types
+ soup = BeautifulSoup(r3.text, "html.parser")
+ schedule = soup.findAll(
+ "p", {"class": "colorblack fontfamilyTahoma fontsize12rem"}
+ )
+ waste_types = schedule[1::2]
+ waste_dates = schedule[::2]
+
+ entries = []
+ for i in range(0, len(waste_dates)):
+ entries.append(
+ Collection(
+ date=datetime.strptime(waste_dates[i].text, "%d/%m/%Y").date(),
+ t=waste_types[i].text,
+ icon=ICON_MAP.get(waste_types[i].text.upper()),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/jumomind_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/jumomind_de.py
index e344908f..4e9f6665 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/jumomind_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/jumomind_de.py
@@ -8,7 +8,7 @@
DESCRIPTION = "Source for Jumomind.de waste collection."
URL = "https://www.jumomind.de"
TEST_CASES = {
- # DEPRICATED
+ # DEPRECATED
"ZAW": {"service_id": "zaw", "city_id": 106, "area_id": 94},
"Bad Homburg, Bahnhofstrasse": {"service_id": "hom", "city_id": 1, "area_id": 411},
"Bad Buchau via MyMuell": {
@@ -16,16 +16,12 @@
"city_id": 3031,
"area_id": 3031,
},
- # END DEPRICATED
- "Darmstaadt ": {
- "service_id": "mymuell",
- "city": "Darmstadt",
- "street": "Achatweg"
- },
+ # END DEPRECATED
+ "Darmstaadt ": {"service_id": "mymuell", "city": "Darmstadt", "street": "Achatweg"},
"zaw Alsbach-Hähnlein Hähnleiner Str.": {
"service_id": "zaw",
"city": "Alsbach-Hähnlein",
- "street": "Hähnleiner Str."
+ "street": "Hähnleiner Str.",
},
"ingolstadt": {
"service_id": "ingol",
@@ -37,6 +33,11 @@
"service_id": "mymuell",
"city": "Kipfenberg OT Arnsberg, Biberg, Dunsdorf, Schelldorf, Schambach, Mühlen im Schambachtal und Schambacher Leite, Järgerweg, Böllermühlstraße, Attenzell, Krut, Böhming, Regelmannsbrunn, Hirnstetten und Pfahldorf",
},
+ "neustadt": {
+ "service_id": "esn",
+ "city": "Neustadt",
+ "street": "Hauberallee (Kernstadt)",
+ },
}
@@ -54,16 +55,10 @@
"url": "https://www.zaw-online.de",
"list": ["Darmstadt-Dieburg (ZAW)"],
},
- "ingol": {
- "list": ["Ingolstadt"],
- "url": "https://www.in-kb.de",
- },
-
"aoe": {
"url": "https://www.lra-aoe.de",
"list": ["Altötting (LK)"],
},
-
"lka": {
"url": "https://mkw-grossefehn.de",
"list": ["Aurich (MKW)"],
@@ -85,7 +80,7 @@
"list": ["Ingolstadt"],
},
"lue": {
- "comment": "Jumomind", #has its own service
+ "comment": "Jumomind", # has its own service
"url": "https://www.luebbecke.de",
"list": ["Lübbecke"],
},
@@ -98,7 +93,7 @@
"list": ["Recklinghausen"],
},
"rhe": {
- "comment": "Jumomind", #has its own service
+ "comment": "Jumomind", # has its own service
"url": "https://www.rh-entsorgung.de/",
"list": ["Rhein-Hunsrück"],
},
@@ -107,11 +102,47 @@
"list": ["Uckermark"],
},
"mymuell": {
- "comment": "MyMuell App",
- "url": "https://www.mymuell.de/",
- "list": ['Aschaffenburg', 'Bad Arolsen', 'Beverungen', 'Darmstadt', 'Esens', 'Flensburg', 'Großkrotzenburg', 'Hainburg', 'Holtgast', 'Kamp-Lintfort', 'Kirchdorf', 'Landkreis Aschaffenburg', 'Landkreis Biberach', 'Landkreis Eichstätt', 'Landkreis Friesland', 'Landkreis Leer', 'Landkreis Mettmann', 'Landkreis Paderborn', 'Landkreis Wittmund', 'Landkreis Wittmund', 'Main-Kinzig-Kreis', 'Mühlheim am Main', 'Nenndorf', 'Neumünster', 'Salzgitter', 'Schmitten im Taunus', 'Schöneck', 'Seligenstadt', 'Ulm', 'Usingen', 'Volkmarsen', 'Vöhringen', 'Wegberg', 'Westerholt', 'Wilhelmshaven']
+ "comment": "MyMuell App",
+ "url": "https://www.mymuell.de/",
+ "list": [
+ "Aschaffenburg",
+ "Bad Arolsen",
+ "Beverungen",
+ "Darmstadt",
+ "Esens",
+ "Flensburg",
+ "Großkrotzenburg",
+ "Hainburg",
+ "Holtgast",
+ "Kamp-Lintfort",
+ "Kirchdorf",
+ "Landkreis Aschaffenburg",
+ "Landkreis Biberach",
+ "Landkreis Eichstätt",
+ "Landkreis Friesland",
+ "Landkreis Leer",
+ "Landkreis Mettmann",
+ "Landkreis Paderborn",
+ "Landkreis Wittmund",
+ "Landkreis Wittmund",
+ "Main-Kinzig-Kreis",
+ "Mühlheim am Main",
+ "Nenndorf",
+ "Neumünster",
+ "Salzgitter",
+ "Schmitten im Taunus",
+ "Schöneck",
+ "Seligenstadt",
+ "Ulm",
+ "Usingen",
+ "Volkmarsen",
+ "Vöhringen",
+ "Wegberg",
+ "Westerholt",
+ "Wilhelmshaven",
+ ],
},
-
+ "esn": {"list": ["Neustadt an der Weinstraße"], "url": "https://www.neustadt.eu/"},
}
@@ -137,13 +168,22 @@ def EXTRA_INFO():
class Source:
- def __init__(self, service_id: str, city: str = None, street: str = None, city_id=None, area_id=None, house_number=None):
+ def __init__(
+ self,
+ service_id: str,
+ city: str = None,
+ street: str = None,
+ city_id=None,
+ area_id=None,
+ house_number=None,
+ ):
self._search_url: str = API_SEARCH_URL.format(provider=service_id)
self._dates_url: str = API_DATES_URL.format(provider=service_id)
- self._city: str = city.lower().strip() if city else None
- self._street: str = street.lower().strip() if street else None
- self._house_number: str = str(
- house_number).lower().strip() if house_number else None
+ self._city: str | None = city.lower().strip() if city else None
+ self._street: str | None = street.lower().strip() if street else None
+ self._house_number: str | None = (
+ str(house_number).lower().strip() if house_number else None
+ )
self._service_id = service_id
self._city_id = city_id if city_id else None
@@ -165,14 +205,18 @@ def fetch(self):
cities = r.json()
- if not city_id is None:
+ if city_id is not None:
if area_id is None:
raise Exception(
- "no area id but needed when city id is given. Remove city id when using city (and street) name")
+ "no area id but needed when city id is given. Remove city id when using city (and street) name"
+ )
else:
has_streets = True
for city in cities:
- if city["name"].lower().strip() == self._city or city["_name"].lower().strip() == self._city:
+ if (
+ city["name"].lower().strip() == self._city
+ or city["_name"].lower().strip() == self._city
+ ):
city_id = city["id"]
area_id = city["area_id"]
has_streets = city["has_streets"]
@@ -182,19 +226,26 @@ def fetch(self):
raise Exception("City not found")
if has_streets:
- r = session.get(self._search_url, params={
- "r": "streets", "city_id": city_id})
+ r = session.get(
+ self._search_url, params={"r": "streets", "city_id": city_id}
+ )
r.raise_for_status()
streets = r.json()
street_found = False
for street in streets:
- if street["name"].lower().strip() == self._street or street["_name"].lower().strip() == self._street:
+ if (
+ street["name"].lower().strip() == self._street
+ or street["_name"].lower().strip() == self._street
+ ):
street_found = True
area_id = street["area_id"]
if "houseNumbers" in street:
for house_number in street["houseNumbers"]:
- if house_number[0].lower().strip() == self._house_number:
+ if (
+ house_number[0].lower().strip()
+ == self._house_number
+ ):
area_id = house_number[1]
break
break
@@ -203,12 +254,15 @@ def fetch(self):
else:
if self._street is not None:
LOGGER.warning(
- "City does not need street name please remove it, continuing anyway")
+ "City does not need street name please remove it, continuing anyway"
+ )
# get names for bins
bin_name_map = {}
- r = session.get(self._search_url, params={
- "r": "trash", "city_id": city_id, "area_id": area_id})
+ r = session.get(
+ self._search_url,
+ params={"r": "trash", "city_id": city_id, "area_id": area_id},
+ )
r.raise_for_status()
for bin_type in r.json():
@@ -216,38 +270,35 @@ def fetch(self):
if not bin_type["_name"] in bin_name_map:
bin_name_map[bin_type["_name"]] = bin_type["title"]
- r = session.get(self._dates_url, params={
- "idx": "termins", "city_id": city_id, "area_id": area_id, "ws": 3})
+ r = session.get(
+ self._dates_url,
+ params={"idx": "termins", "city_id": city_id, "area_id": area_id, "ws": 3},
+ )
r.raise_for_status()
entries = []
for event in r.json()[0]["_data"]:
bin_type = bin_name_map[event["cal_garbage_type"]]
- date = datetime.datetime.strptime(
- event["cal_date"], "%Y-%m-%d").date()
+ date = datetime.datetime.strptime(event["cal_date"], "%Y-%m-%d").date()
icon = ICON_MAP.get(bin_type.split(" ")[0]) # Collection icon
entries.append(Collection(date=date, t=bin_type, icon=icon))
return entries
-
-
def print_md_table():
table = "|service_id|cities|\n|---|---|\n"
-
+
for service, data in SERVICE_MAP.items():
-
+
args = {"r": "cities"}
- r = requests.get(
- f"https://{service}.jumomind.com/mmapp/api.php", params=args
- )
+ r = requests.get(f"https://{service}.jumomind.com/mmapp/api.php", params=args)
r.raise_for_status()
table += f"|{service}|"
-
+
for city in r.json():
table += f"`{city['name']}`,"
-
+
table += "|\n"
print(table)
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kwu_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kwu_de.py
index 4e99c4c4..ddb8da32 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kwu_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kwu_de.py
@@ -43,7 +43,7 @@ def fetch(self):
r = requests.get(
"https://kalender.kwu-entsorgung.de",
headers=HEADERS,
- verify=False
+ verify=True
)
parsed_html = BeautifulSoup(r.text, "html.parser")
@@ -58,7 +58,7 @@ def fetch(self):
"https://kalender.kwu-entsorgung.de/kal_str2ort.php",
params={"ort": OrtValue},
headers=HEADERS,
- verify=False
+ verify=True
)
parsed_html = BeautifulSoup(r.text, "html.parser")
@@ -73,7 +73,7 @@ def fetch(self):
"https://kalender.kwu-entsorgung.de/kal_str2ort.php",
params={"ort": OrtValue, "strasse": StrasseValue},
headers=HEADERS,
- verify=False
+ verify=True
)
parsed_html = BeautifulSoup(r.text, "html.parser")
@@ -94,7 +94,7 @@ def fetch(self):
"jahr": date.today().year
},
headers=HEADERS,
- verify=False
+ verify=True
)
parsed_html = BeautifulSoup(r.text, "html.parser")
@@ -111,7 +111,7 @@ def fetch(self):
ics_url = ics_url.replace("http://kalender.kwu.lokal", "https://kalender.kwu-entsorgung.de")
# get ics file
- r = session.get(ics_url, headers=HEADERS, verify=False)
+ r = session.get(ics_url, headers=HEADERS, verify=True)
r.raise_for_status()
# parse ics file
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/lakemac_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/lakemac_nsw_gov_au.py
index 45e441cd..b7ee05de 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/lakemac_nsw_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/lakemac_nsw_gov_au.py
@@ -58,7 +58,10 @@ def fetch(self):
waste_date = []
for tag in soup.find_all("div", {"class": "next-service"}):
- date_object = datetime.strptime(tag.text.strip(), "%a %d/%m/%Y").date()
+ try:
+ date_object = datetime.strptime(tag.text.strip(), "%a %d/%m/%Y").date()
+ except:
+ continue
waste_date.append(date_object)
waste = list(zip(waste_type, waste_date))
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_kusel_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_kusel_de.py
new file mode 100644
index 00000000..2f50fa62
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/landkreis_kusel_de.py
@@ -0,0 +1,86 @@
+import requests
+from bs4 import BeautifulSoup, NavigableString
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
+
+TITLE = "Landkreis Kusel"
+DESCRIPTION = "Source for Landkreis Kusel."
+URL = "https://www.landkreis-kusel.de/"
+TEST_CASES = {
+ "Adenbach": {"ortsgemeinde": "Adenbach"},
+ "St. Julian - Eschenau": {"ortsgemeinde": "St. Julian - Eschenau"},
+ "rutsweiler glan (wrong spelling)": {"ortsgemeinde": "rutsweiler glan"},
+}
+
+
+ICON_MAP = {
+ "restmüll": "mdi:trash-can",
+ "glasabfuhr": "mdi:bottle-soda",
+ "bioabfall": "mdi:leaf",
+ "Paper": "mdi:package-variant",
+ "wertstoffsäcke": "mdi:recycle",
+ "umweltmobil": "mdi:dump-truck",
+}
+
+
+API_URL = "https://abfallwirtschaft.landkreis-kusel.de"
+
+
+def make_comparable(ortsgemeinde: str) -> str:
+ return (
+ ortsgemeinde.lower()
+ .replace("-", "")
+ .replace(".", "")
+ .replace("/", "")
+ .replace(" ", "")
+ )
+
+
+class Source:
+ def __init__(self, ortsgemeinde: str):
+ self._ortsgemeinde: str = make_comparable(ortsgemeinde)
+ self._ics = ICS()
+
+ def fetch(self):
+ s = requests.Session()
+ # get json file
+ r = s.get(API_URL)
+ r.raise_for_status()
+
+ soup = BeautifulSoup(r.text, "html.parser")
+ select = soup.find("select", {"id": "search_ak_pickup_akPickup"})
+
+ if not select or isinstance(select, NavigableString):
+ raise Exception("Invalid response from API")
+
+ pickup_id = None
+ for option in select.find_all("option"):
+ if make_comparable(option.text) == self._ortsgemeinde:
+ pickup_id = option["value"]
+ break
+
+ if not pickup_id:
+ raise Exception(
+ f"could not find matching 'Ortsgemeinde' please check your spelling at {API_URL}"
+ )
+
+ args = {
+ "search_ak_pickup[akPickup]": pickup_id,
+ "search_ak_pickup[wasteType]": "0",
+ "search_ak_pickup[startDate]": "",
+ "search_ak_pickup[endDate]": "",
+ "search_ak_pickup[search]": "",
+ }
+
+ r = s.post(API_URL, data=args)
+ r.raise_for_status()
+
+ r = s.get(f"{API_URL}/ical")
+ r.raise_for_status()
+
+ dates = self._ics.convert(r.text)
+ entries = []
+ for d in dates:
+ entries.append(Collection(d[0], d[1], ICON_MAP.get(d[1].lower())))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/maroondah_vic_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/maroondah_vic_gov_au.py
index fdf21294..edebfc7a 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/maroondah_vic_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/maroondah_vic_gov_au.py
@@ -1,23 +1,34 @@
-import datetime
-from waste_collection_schedule import Collection
-
-import requests
import json
from datetime import datetime
+import requests
+from waste_collection_schedule import Collection
+
TITLE = "Maroondah City Council"
DESCRIPTION = "Source for Maroondah City Council. Finds both green waste and general recycling dates."
URL = "https://www.maroondah.vic.gov.au/"
TEST_CASES = {
- "Monday - Area A": {"address": "1 Abbey Court, RINGWOOD 3134"}, # Monday - Area A
- "Monday - Area B": {"address": "1 Angelica Crescent, CROYDON HILLS 3136"}, # Monday - Area B
- "Tuesday - Area B": {"address": "6 Como Close, CROYDON 3136"}, # Tuesday - Area B
- "Wednesday - Area A": {"address": "113 Dublin Road, RINGWOOD EAST 3135"}, # Wednesday - Area A
- "Wednesday - Area B": {"address": "282 Maroondah Highway, RINGWOOD 3134"}, # Wednesday - Area B
- "Thursday - Area A": {"address": "4 Albury Court, CROYDON NORTH 3136"}, # Thursday - Area A
- "Thursday - Area B": {"address": "54 Lincoln Road, CROYDON 3136"}, # Thursday - Area B
- "Friday - Area A": {"address": "6 Lionel Crescent, CROYDON 3136"}, # Friday - Area A
- "Friday - Area B": {"address": "61 Timms Avenue, KILSYTH 3137"}, # Friday - Area B
+ "Monday - Area A": {"address": "1 Abbey Court, RINGWOOD 3134"}, # Monday - Area A
+ "Monday - Area B": {
+ "address": "1 Angelica Crescent, CROYDON HILLS 3136"
+ }, # Monday - Area B
+ "Tuesday - Area B": {"address": "6 Como Close, CROYDON 3136"}, # Tuesday - Area B
+ "Wednesday - Area A": {
+ "address": "113 Dublin Road, RINGWOOD EAST 3135"
+ }, # Wednesday - Area A
+ "Wednesday - Area B": {
+ "address": "282 Maroondah Highway, RINGWOOD 3134"
+ }, # Wednesday - Area B
+ "Thursday - Area A": {
+ "address": "4 Albury Court, CROYDON NORTH 3136"
+ }, # Thursday - Area A
+ "Thursday - Area B": {
+ "address": "54 Lincoln Road, CROYDON 3136"
+ }, # Thursday - Area B
+ "Friday - Area A": {
+ "address": "6 Lionel Crescent, CROYDON 3136"
+ }, # Friday - Area A
+ "Friday - Area B": {"address": "61 Timms Avenue, KILSYTH 3137"}, # Friday - Area B
}
@@ -28,112 +39,97 @@ def __init__(self, address):
def fetch(self):
entries = []
- #initiate a session
- url = "https://enterprise.mapimage.net/IntraMaps99/ApplicationEngine/Projects/"
+ # initiate a session
+ url = "https://maroondah.spatial.t1cloud.com/spatial/IntraMaps/ApplicationEngine/Projects/"
- payload={}
+ payload = {}
params = {
"configId": "5bb5b19d-9071-475e-8139-c1402a12a785",
"appType": "MapBuilder",
"project": "e904c13a-b8da-41eb-b08f-20abc430a72a",
- "datasetCode": ""
+ "datasetCode": "",
}
headers = {
- 'Content-Type': 'application/json',
- 'X-Requested-With': 'XMLHttpRequest'
+ "Content-Type": "application/json",
+ "X-Requested-With": "XMLHttpRequest",
}
- response = requests.request("POST", url, headers=headers, data=payload, params=params)
- sessionid = response.headers['X-IntraMaps-Session']
-
-
-
- #Load the Map Project (further requests don't appear to work if this request is not made)
- url = "https://enterprise.mapimage.net/IntraMaps99/ApplicationEngine/Modules/"
+ response = requests.request(
+ "POST", url, headers=headers, data=payload, params=params
+ )
+ sessionid = response.headers["X-IntraMaps-Session"]
- payload = json.dumps({
- "module": "d41bec46-67ad-4f32-bcde-cebb62dce275"
- })
+ # Load the Map Project (further requests don't appear to work if this request is not made)
+ url = "https://maroondah.spatial.t1cloud.com/spatial/IntraMaps/ApplicationEngine/Modules/"
- params = {
- "IntraMapsSession": sessionid
- }
-
- response = requests.request("POST", url, headers=headers, data=payload, params=params)
+ payload = json.dumps({"module": "d41bec46-67ad-4f32-bcde-cebb62dce275"})
+ params = {"IntraMapsSession": sessionid}
+ response = requests.request(
+ "POST", url, headers=headers, data=payload, params=params
+ )
- #search for the address
- url = "https://enterprise.mapimage.net/IntraMaps99/ApplicationEngine/Search/"
+ # search for the address
+ url = "https://maroondah.spatial.t1cloud.com/spatial/IntraMaps/ApplicationEngine/Search/"
- payload = json.dumps({
- "fields": [
- self._address
- ]
- })
+ payload = json.dumps({"fields": [self._address]})
params = {
"infoPanelWidth": "0",
"mode": "Refresh",
"form": "1a33b2ba-5075-4224-9784-47a1f1478c0a",
"resubmit": "false",
- "IntraMapsSession": sessionid
+ "IntraMapsSession": sessionid,
}
- response = requests.request("POST", url, headers=headers, data=payload, params=params)
- #this request may return multiple addresses. Use the first one.
+ response = requests.request(
+ "POST", url, headers=headers, data=payload, params=params
+ )
+ # this request may return multiple addresses. Use the first one.
address_map_key = response.json()
- address_map_key = address_map_key['fullText'][0]['mapKey']
-
-
-
- #Lookup the specific property data
- url = "https://enterprise.mapimage.net/IntraMaps99/ApplicationEngine/Search/Refine/Set"
-
- payload = json.dumps({
- "selectionLayer": "4c3fc44c-4cd2-40ca-8e4d-da2b8765ed68",
- "mapKey": address_map_key,
- "mode": "Refresh",
- "dbKey": address_map_key,
- "zoomType": "current"
- })
+ address_map_key = address_map_key["fullText"][0]["mapKey"]
+
+ # Lookup the specific property data
+ url = "https://maroondah.spatial.t1cloud.com/spatial/IntraMaps/ApplicationEngine/Search/Refine/Set"
+
+ payload = json.dumps(
+ {
+ "selectionLayer": "4c3fc44c-4cd2-40ca-8e4d-da2b8765ed68",
+ "mapKey": address_map_key,
+ "mode": "Refresh",
+ "dbKey": address_map_key,
+ "zoomType": "current",
+ }
+ )
- params = {
- "IntraMapsSession": sessionid
- }
+ params = {"IntraMapsSession": sessionid}
- response = requests.request("POST", url, headers=headers, data=payload, params=params)
+ response = requests.request(
+ "POST", url, headers=headers, data=payload, params=params
+ )
response = response.json()
# Rubbish (green lid) - Happens on each recyclables and garden organics
# Recyclables (blue lid)
- recyclables_date_text = response['infoPanels']['info1']['feature']['fields'][2]['value']['value']
- recyclables_date = datetime.strptime(recyclables_date_text,"%A, %d %b %Y").date()
- entries.append(
- Collection(
- recyclables_date,"Recyclables","mdi:recycle"
- )
- )
- entries.append(
- Collection(
- recyclables_date,"Garbage"
- )
- )
+ recyclables_date_text = response["infoPanels"]["info1"]["feature"]["fields"][2][
+ "value"
+ ]["value"]
+ recyclables_date = datetime.strptime(
+ recyclables_date_text, "%A, %d %b %Y"
+ ).date()
+ entries.append(Collection(recyclables_date, "Recyclables", "mdi:recycle"))
+ entries.append(Collection(recyclables_date, "Garbage"))
# Garden Organics (maroon lid)
- garden_organics_date_text = response['infoPanels']['info1']['feature']['fields'][3]['value']['value']
- garden_organics_date = datetime.strptime(garden_organics_date_text,"%A, %d %b %Y").date()
- entries.append(
- Collection(
- garden_organics_date,"Garden Organics","mdi:leaf"
- )
- )
- entries.append(
- Collection(
- garden_organics_date,"Garbage"
- )
- )
-
-
- return entries
\ No newline at end of file
+ garden_organics_date_text = response["infoPanels"]["info1"]["feature"][
+ "fields"
+ ][3]["value"]["value"]
+ garden_organics_date = datetime.strptime(
+ garden_organics_date_text, "%A, %d %b %Y"
+ ).date()
+ entries.append(Collection(garden_organics_date, "Garden Organics", "mdi:leaf"))
+ entries.append(Collection(garden_organics_date, "Garbage"))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/midsussex_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/midsussex_gov_uk.py
index 3835ee8e..7c3ba364 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/midsussex_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/midsussex_gov_uk.py
@@ -1,20 +1,25 @@
import re
-import requests
-
+from datetime import datetime
+import requests
from bs4 import BeautifulSoup
-from datetime import datetime
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Mid-Sussex District Council"
-DESCRIPTION = "Source for midsussex.gov.uk services for Mid-Sussex District Council, UK."
+DESCRIPTION = (
+ "Source for midsussex.gov.uk services for Mid-Sussex District Council, UK."
+)
URL = "https://midsussex.gov.uk"
TEST_CASES = {
"Test_001": {"house_number": "6", "street": "Withypitts", "postcode": "RH10 4PJ"},
- "Test_002": {"house_name": "Oaklands", "street": "Oaklands Road", "postcode": "RH16 1SS"},
+ "Test_002": {
+ "house_name": "Oaklands",
+ "street": "Oaklands Road",
+ "postcode": "RH16 1SS",
+ },
"Test_003": {"house_number": 9, "street": "Bolnore Road", "postcode": "RH16 4AB"},
- "Test_004": {"address": "HAZELMERE REST HOME, 21 BOLNORE ROAD RH16 4AB"}
+ "Test_004": {"address": "HAZELMERE REST HOME, 21 BOLNORE ROAD RH16 4AB"},
}
ICON_MAP = {
@@ -24,10 +29,13 @@
}
API_URL = "https://www.midsussex.gov.uk/waste-recycling/bin-collection/"
-REGEX = "([A-Z]{1,2}\d[A-Z\d]?\s*\d[A-Z]{2})" # regex for UK postcode format
+REGEX = r"([A-Z]{1,2}\d[A-Z\d]?\s*\d[A-Z]{2})" # regex for UK postcode format
+
class Source:
- def __init__(self, house_name="", house_number="", street="", postcode="", address=""):
+ def __init__(
+ self, house_name="", house_number="", street="", postcode="", address=""
+ ):
self._house_name = str(house_name).upper()
self._house_number = str(house_number)
self._street = str(street).upper()
@@ -41,40 +49,57 @@ def fetch(self):
# extract postcode
self._postcode = re.findall(REGEX, self._address)
elif self._house_name == "":
- self._address = self._house_number + " " + self._street + " " + self._postcode
+ self._address = (
+ self._house_number + " " + self._street + " " + self._postcode
+ )
else:
- self._address = self._house_name + "," + self._house_number + " " + self._street + " " + self._postcode
+ self._address = (
+ self._house_name
+ + ","
+ + self._house_number
+ + " "
+ + self._street
+ + " "
+ + self._postcode
+ )
+
+ r0 = s.get(API_URL)
+ soup = BeautifulSoup(r0.text, features="html.parser")
payload = {
- "PostCodeStep.strAddressSearch": self._postcode,
- "AddressStep.strAddressSelect": self._address,
+ "__RequestVerificationToken": soup.find(
+ "input", {"name": "__RequestVerificationToken"}
+ ).get("value"),
+ "ufprt": soup.find("input", {"name": "ufprt"}).get("value"),
+ "StrPostcodeSearch": self._postcode,
+ "StrAddressSelect": self._address,
"Next": "true",
"StepIndex": "1",
}
# Seems to need a ufprt, so get that and then repeat query
- r0 = s.post(API_URL, data = payload)
- soup = BeautifulSoup(r0.text, features="html.parser")
- token = soup.find("input", {"name": "ufprt"}).get("value")
- payload.update({"ufprt": token})
-
- # Retrieve collection details
- r1 = s.post(API_URL, data = payload)
+ r1 = s.post(API_URL, data=payload)
+
soup = BeautifulSoup(r1.text, features="html.parser")
- tr = soup.findAll("tr")
- tr = tr[1:] # remove header row
+ ufprt = soup.find("input", {"name": "ufprt"}).get("value")
+ token = soup.find("input", {"name": "__RequestVerificationToken"}).get("value")
+ payload.update({"ufprt": ufprt, "__RequestVerificationToken": token})
+
+ # Retrieve collection details
+ r2 = s.post(API_URL, data=payload)
+ soup = BeautifulSoup(r2.text, features="html.parser")
+ trs = soup.findAll("tr")[1:] # remove header row
entries = []
- for td in tr:
- item = td.findAll("td")[1:]
+ for tr in trs:
+ td = tr.findAll("td")[1:]
entries.append(
Collection(
- date=datetime.strptime(
- item[1].text, "%A %d %B %Y").date(),
- t=item[0].text,
- icon=ICON_MAP.get(item[0].text),
+ date=datetime.strptime(td[1].text, "%A %d %B %Y").date(),
+ t=td[0].text,
+ icon=ICON_MAP.get(td[0].text),
)
)
- return entries
\ No newline at end of file
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/milton_keynes_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/milton_keynes_gov_uk.py
new file mode 100644
index 00000000..b6bfea3f
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/milton_keynes_gov_uk.py
@@ -0,0 +1,110 @@
+# modified version of bexley_gov_uk.py
+
+from datetime import datetime
+from time import time_ns
+
+import requests
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Milton Keynes council"
+DESCRIPTION = "Source for Milton Keynes council."
+URL = "milton-keynes.gov.uk"
+TEST_CASES = {
+ "North Row, Central": {"uprn": 25032037},
+ "Adelphi Street, Campbell Park": {"uprn": 25044504},
+}
+
+
+ICON_MAP = {
+ "REFUSE": "mdi:trash-can",
+ "RECYCLE": "mdi:recycle",
+ "RECYCLING": "mdi:recycle",
+ "FOOD": "mdi:leaf",
+ "GARDEN": "mdi:leaf",
+}
+
+
+SITE_URL = (
+ "https://mycouncil.milton-keynes.gov.uk/service/Waste_Collection_Round_Checker"
+)
+HEADERS = {
+ "user-agent": "Mozilla/5.0",
+}
+
+
+class Source:
+ def __init__(self, uprn: str | int):
+ self._uprn: str = str(uprn)
+
+ def fetch(self):
+ s = requests.Session()
+
+ # Set up session
+ timestamp = time_ns() // 1_000_000 # epoch time in milliseconds
+ s.get(
+ "https://mycouncil.milton-keynes.gov.uk/apibroker/domain/mycouncil.milton-keynes.gov.uk",
+ params={
+ "_": timestamp,
+ },
+ headers=HEADERS,
+ )
+
+ # This request gets the session ID
+ sid_request = s.get(
+ "https://mycouncil.milton-keynes.gov.uk/authapi/isauthenticated",
+ params={
+ "uri": "https://mycouncil.milton-keynes.gov.uk/service/Waste_Collection_Round_Checker",
+ "hostname": "mycouncil.milton-keynes.gov.uk",
+ "withCredentials": "true",
+ },
+ )
+ sid_data = sid_request.json()
+ sid = sid_data["auth-session"]
+
+ # This request retrieves the schedule
+ timestamp = time_ns() // 1_000_000 # epoch time in milliseconds
+ payload = {"formValues": {"Section 1": {"uprnCore": {"value": self._uprn}}}}
+ schedule_request = s.post(
+ "https://mycouncil.milton-keynes.gov.uk/apibroker/runLookup",
+ headers=HEADERS,
+ params={
+ # "id": "61320b2acf8a3",
+ "id": "64d9feda3a507",
+ "repeat_against": "",
+ "noRetry": "false",
+ "getOnlyTokens": "undefined",
+ "log_id": "",
+ "app_name": "AF-Renderer::Self",
+ "_": str(timestamp),
+ "sid": str(sid),
+ },
+ json=payload,
+ )
+
+ rowdata = schedule_request.json()["integration"]["transformed"]["rows_data"]
+
+ # Extract bin types and next collection dates
+ entries = []
+ for index, item in rowdata.items():
+ # print(item)
+ bin_type = item["AssetTypeName"]
+ icon = None
+
+ for key, icon_name in ICON_MAP.items():
+ if (
+ key in item["AssetTypeName"].upper()
+ or key in item["TaskTypeName"].upper()
+ or key in item["ServiceName"].upper()
+ ):
+ icon = icon_name
+ break
+
+ dates = [
+ datetime.strptime(item["NextInstance"], "%Y-%m-%d").date(),
+ datetime.strptime(item["LastInstance"], "%Y-%m-%d").date(),
+ ]
+ for date in dates:
+ entries.append(
+ Collection(t=bin_type, date=date, icon=icon),
+ )
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/nottingham_city_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/nottingham_city_gov_uk.py
index 7f60304b..94f42ffe 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/nottingham_city_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/nottingham_city_gov_uk.py
@@ -1,12 +1,13 @@
-import json
import datetime
-import time
+import json
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Nottingham City Council"
-DESCRIPTION = "Source for nottinghamcity.gov.uk services for the city of Nottingham, UK."
+DESCRIPTION = (
+ "Source for nottinghamcity.gov.uk services for the city of Nottingham, UK."
+)
URL = "https://nottinghamcity.gov.uk"
TEST_CASES = {
"Douglas Rd, Nottingham NG7 1NW": {"uprn": "100031540175"},
@@ -14,24 +15,14 @@
}
BINS = {
- "DryRecyclingDay": {
- "icon": "mdi:recycle",
- "name": "Recycling"
- },
- "DomesticDay": {
- "icon": "mdi:trash-can",
- "name": "General"
- },
- "GardenDay": {
- "icon": "mdi:leaf",
- "name": "Garden"
- },
- "FoodWaste": {
- "icon": "mdi:food-apple",
- "name": "Food"
- }
+ "Recycling": {"icon": "mdi:recycle", "name": "Recycling"},
+ "Waste": {"icon": "mdi:trash-can", "name": "General"},
+ "Garden": {"icon": "mdi:leaf", "name": "Garden"},
+ "Food23L": {"icon": "mdi:food-apple", "name": "Food"},
+ "Food23L_bags": {"icon": "mdi:food-apple", "name": "Food"},
}
+
class Source:
def __init__(self, uprn):
self._uprn = uprn
@@ -39,7 +30,7 @@ def __init__(self, uprn):
def fetch(self):
# get json file
r = requests.get(
- f"https://geoserver.nottinghamcity.gov.uk/myproperty/handler/proxy.ashx?http://geoserver.nottinghamcity.gov.uk/wcf/BinCollection.svc/livebin/{self._uprn}"
+ f"https://geoserver.nottinghamcity.gov.uk/myproperty/handler/proxy.ashx?https://geoserver.nottinghamcity.gov.uk/bincollections2/api/collection/{self._uprn}"
)
# extract data from json
@@ -47,34 +38,23 @@ def fetch(self):
entries = []
- today = datetime.date.today() - datetime.timedelta(days=datetime.date.today().isoweekday() - 1)
+ next_collections = data["nextCollections"]
- for bin in BINS.keys():
- props = BINS[bin]
- day = data["CollectionDetails"][bin]
- if day == "Not Applicable":
- continue
+ for collection in next_collections:
+ bin_type = collection["collectionType"]
- day = time.strptime(day, "%A").tm_wday
+ props = BINS[bin_type]
- # RecyclingWeek being B means recycling is on even numbered weeks
- week_offset = 0
- recycling_shift = data["CollectionDetails"]["RecyclingWeek"] == "A"
- domestic_shift = data["CollectionDetails"]["RecyclingWeek"] == "B"
-
- if bin == "DryRecyclingDay" or bin == "GardenDay":
- week_offset = (datetime.date.today().isocalendar().week + recycling_shift) % 2
- elif bin == "DomesticDay":
- week_offset = (datetime.date.today().isocalendar().week + domestic_shift) % 2
-
- next_date = today + datetime.timedelta(days=day, weeks=week_offset)
+ next_collection_date = datetime.datetime.fromisoformat(
+ collection["collectionDate"]
+ )
entries.append(
- Collection(
- date = next_date,
- t = props["name"],
- icon = props["icon"]
- )
+ Collection(
+ date=next_collection_date.date(),
+ t=props["name"],
+ icon=props["icon"],
+ )
)
return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/rbwm_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rbwm_gov_uk.py
new file mode 100644
index 00000000..cd9c6320
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rbwm_gov_uk.py
@@ -0,0 +1,67 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup, NavigableString
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Windsor and Maidenhead"
+DESCRIPTION = "Source for Windsor and Maidenhead."
+URL = "https://my.rbwm.gov.uk/"
+TEST_CASES = {
+ "Windsor 1": {"uprn": 100080381393},
+ "Windsor 2": {"uprn": "100080384194"},
+ "Maidenhead 1": {"uprn": "100080359672"},
+ "Maidenhead 2": {"uprn": 100080355442},
+}
+
+
+ICON_MAP = {
+ "refuse": "mdi:trash-can",
+ "garden waste": "mdi:leaf",
+ "recycling": "mdi:recycle",
+}
+
+
+API_URL = "https://my.rbwm.gov.uk/special/your-collection-dates"
+
+
+class Source:
+ def __init__(self, uprn: str | int):
+ self._uprn: str = str(uprn).zfill(12)
+
+ def fetch(self):
+ s = requests.Session()
+ args = {
+ "uprn": self._uprn,
+ "subdate": datetime.now().strftime("%Y-%m-%d"),
+ }
+
+ # request needs to be made twice to get the correct response
+ r = s.get(API_URL, params=args)
+ r.raise_for_status()
+
+ r = s.get(API_URL, params=args)
+ r.raise_for_status()
+
+ soup = BeautifulSoup(r.text, "html.parser")
+
+ table = soup.find("table")
+
+ if not table or isinstance(table, NavigableString):
+ raise Exception("Invalid response from API")
+
+ entries = []
+ for tr in table.find_all("tr"):
+ tds = tr.find_all("td")
+ if len(tds) != 2:
+ continue
+
+ bi_type = tds[0].text.split("Collection Service")[0].strip()
+
+ date_string = tds[1].text.strip()
+
+ date = datetime.strptime(date_string, "%d/%m/%Y").date()
+ icon = ICON_MAP.get(bi_type.lower()) # Collection icon
+ entries.append(Collection(date=date, t=bi_type, icon=icon))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py
new file mode 100644
index 00000000..24c281e9
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py
@@ -0,0 +1,57 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "Rhondda Cynon Taf County Borough Council"
+DESCRIPTION = "Source for rctcbc.gov.uk services for Rhondda Cynon Taf County Borough Council, Wales, UK"
+URL = "rctcbc.gov.uk"
+TEST_CASES = {
+ "Test_001": {"uprn": "10024274791"},
+ "Test_002": {"uprn": "100100718352"},
+ "Test_003": {"uprn": 100100733093},
+}
+ICON_MAP = {
+ "BLACK BAGS": "mdi:trash-can",
+ "RECYCLING": "mdi:recycle",
+ "FOOD WASTE": "mdi:food",
+ "GARDEN WASTE": "mdi:leaf",
+}
+
+
+class Source:
+ def __init__(self, uprn):
+ self._uprn = str(uprn)
+
+ def fetch(self):
+ s = requests.Session()
+ # website appears to display ~4 months worth of collections, so iterate through those pages
+ entries = []
+ for month in range(0, 4):
+ r = s.get(
+ f"https://www.rctcbc.gov.uk/EN/Resident/RecyclingandWaste/RecyclingandWasteCollectionDays.aspx?uprn={self._uprn}&month={month}"
+ )
+ soup = BeautifulSoup(r.text, "html.parser")
+ calendar_month = soup.find("div", {"class": "calendar-month"})
+ calendar_day = soup.find_all(
+ "div", {"class": "card-body card-body-padding"}
+ )
+ for day in calendar_day:
+ pickups = day.find_all("a")
+ if len(pickups) != 0:
+ d = day.find("div", {"class": "card-title"})
+ dt = d.text.strip() + " " + calendar_month.text.strip()
+ for pickup in pickups:
+ entries.append(
+ Collection(
+ date=datetime.strptime(
+ dt,
+ "%d %B %Y",
+ ).date(),
+ t=pickup.text,
+ icon=ICON_MAP.get(pickup.text.upper()),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py
index 97035894..9f5f1aa5 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py
@@ -1,8 +1,7 @@
-
-import requests
import json
-
from datetime import datetime
+
+import requests
from waste_collection_schedule import Collection
TITLE = "Recycle Coach"
@@ -19,142 +18,142 @@
EXTRA_INFO = [
{
"title": "Albuquerque, New Mexico, USA",
- "url": "https://recyclecoach.com/cities/usa-nm-city-of-albuquerque/"
+ "url": "https://recyclecoach.com/cities/usa-nm-city-of-albuquerque/",
},
{
"title": "Tucson, Arizona, USA",
- "url": "https://recyclecoach.com/cities/usa-az-city-of-tucson/"
+ "url": "https://recyclecoach.com/cities/usa-az-city-of-tucson/",
},
{
"title": "Olympia, Washington, USA",
- "url": "https://recyclecoach.com/cities/usa-wa-city-of-olympia/"
+ "url": "https://recyclecoach.com/cities/usa-wa-city-of-olympia/",
},
{
"title": "Newark, Delaware, USA",
- "url": "https://recyclecoach.com/cities/usa-de-city-of-newark/"
+ "url": "https://recyclecoach.com/cities/usa-de-city-of-newark/",
},
{
"title": "Louisville, Kentucky, USA",
- "url": "https://recyclecoach.com/cities/usa-ky-city-of-louisville/"
+ "url": "https://recyclecoach.com/cities/usa-ky-city-of-louisville/",
},
- {
- "title": "London, Ontario, Canada",
- "url": "https://london.ca/",
- "country": "ca"
- }
+ {"title": "London (ON)", "url": "https://london.ca/", "country": "ca"},
]
TEST_CASES = {
- "Default": {
- "street": "2242 grinstead drive",
- "city": "louisville",
- "state": "KY"
- },
- "Problematic City Lookup": {
- "street": "2202 E Florence Dr",
- "city": "Tucson",
- "state": "AZ",
- "district_id": "TUC",
- "project_id": "532"
- },
- "olympia": {
- "street": "1003 Lybarger St NE",
- "city": "Olympia",
- "state": "Washington"
- },
- "newark": {
- "street": "24 Townsend Rd",
- "city": "Newark",
- "state": "Delaware"
- },
- "albuquerque": {
- "street": "1505 Silver Ave SE",
- "city": "Albuquerque",
- "state": "New Mexico"
- },
- "london ontario": {
- "street": "1065 Sunningdale Rd E",
- "city": "London",
- "state": "Ontario"
- }
+ "Default": {"street": "2242 grinstead drive", "city": "louisville", "state": "KY"},
+ "Problematic City Lookup": {
+ "street": "2202 E Florence Dr",
+ "city": "Tucson",
+ "state": "AZ",
+ "district_id": "TUC",
+ "project_id": "532",
+ },
+ "olympia": {
+ "street": "1003 Lybarger St NE",
+ "city": "Olympia",
+ "state": "Washington",
+ },
+ "newark": {"street": "24 Townsend Rd", "city": "Newark", "state": "Delaware"},
+ "albuquerque": {
+ "street": "1505 Silver Ave SE",
+ "city": "Albuquerque",
+ "state": "New Mexico",
+ },
+ "london ontario": {
+ "street": "1065 Sunningdale Rd E",
+ "city": "London",
+ "state": "Ontario",
+ },
+ "london ontario with districtID": {
+ "street": "1065 Sunningdale Rd E",
+ "city": "London",
+ "state": "Ontario",
+ "project_id": "528",
+ "district_id": "CityofLondon",
+ "zone_id": "zone-z547",
+ },
}
class Source:
- def __init__(self, street, city, state, project_id=None, district_id=None, zone_id=None): # argX correspond to the args dict in the source configuration
+ def __init__(
+ self, street, city, state, project_id=None, district_id=None, zone_id=None
+ ): # argX correspond to the args dict in the source configuration
self.street = self._format_key(street)
self.city = self._format_key(city)
self.state = self._format_key(state)
self.project_id = self._format_key(project_id) if project_id else None
- self.district_id = self._format_key(district_id) if district_id else None
+ self.district_id = district_id.strip() if district_id else None
- self.zone_id = zone_id # uses lowercase z's, not sure if matters
+ self.zone_id = zone_id # uses lowercase z's, not sure if matters
self.stage = 0
def _format_key(self, param):
- """ Get rid of ambiguity in caps/spacing """
+ """Get rid of ambiguity in caps/spacing."""
return param.upper().strip()
def _lookup_city(self):
- city_finder = 'https://recyclecoach.com/wp-json/rec/v1/cities?find={}, {}'.format(self.city, self.state)
+ city_finder = f"https://recyclecoach.com/wp-json/rec/v1/cities?find={self.city}, {self.state}"
res = requests.get(city_finder)
city_data = res.json()
- if len(city_data['cities']) == 1:
- self.project_id = city_data['cities'][0]['project_id']
- self.district_id = city_data['cities'][0]['district_id']
- self.stage = float(city_data['cities'][0]['stage'])
+ if len(city_data["cities"]) == 1:
+ self.project_id = city_data["cities"][0]["project_id"]
+ self.district_id = city_data["cities"][0]["district_id"]
+ self.stage = float(city_data["cities"][0]["stage"])
if self.stage < 3:
- raise Exception("Found your city, but it is not yet supported fully by recycle coach.")
+ raise Exception(
+ "Found your city, but it is not yet supported fully by recycle coach."
+ )
- elif len(city_data['cities']) > 1:
+ elif len(city_data["cities"]) > 1:
- for city in city_data['cities']:
- if city['city_nm'].upper() == self.city.upper():
- self.project_id = city['project_id']
- self.district_id = city['district_id']
- self.stage = float(city['stage'])
+ for city in city_data["cities"]:
+ if city["city_nm"].upper() == self.city.upper():
+ self.project_id = city["project_id"]
+ self.district_id = city["district_id"]
+ self.stage = float(city["stage"])
return True
# not sure what to do with ambiguity here
# print(json.dumps(city_data['cities'], indent=4))
- raise Exception("Could not determine district or project, Debug here to find your discrict and project_id")
+ raise Exception(
+ "Could not determine district or project, Debug here to find your discrict and project_id"
+ )
def _lookup_zones(self):
- zone_finder = 'https://api-city.recyclecoach.com/zone-setup/address?sku={}&district={}&prompt=undefined&term={}'.format(self.project_id, self.district_id, self.street)
+ zone_finder = f"https://api-city.recyclecoach.com/zone-setup/address?sku={self.project_id}&district={self.district_id}&prompt=undefined&term={self.street}"
res = requests.get(zone_finder)
zone_data = res.json()
- for zone_res in zone_data['results']:
- streetpart = self._format_key(zone_res['address']).split(",")[0]
+ for zone_res in zone_data["results"]:
+ streetpart = self._format_key(zone_res["address"]).split(",")[0]
if streetpart in self.street:
- self.zone_id = self._build_zone_string(zone_res['zones'])
+ self.zone_id = self._build_zone_string(zone_res["zones"])
return self.zone_id
raise Exception("Unable to find zone")
def _build_zone_string(self, z_match):
- """ takes matching json and builds a format zone-z12312-z1894323-z8461 """
+ """Take matching json and build a format zone-z12312-z1894323-z8461."""
zone_str = "zone"
for zonekey in z_match:
- zone_str += "-{}".format(z_match[zonekey])
+ zone_str += f"-{z_match[zonekey]}"
return zone_str
def fetch(self):
- """Builds the date fetching request through looking up address on separate endpoints, will skip these requests if you can provide the district_id, project_id and/or zone_id
- """
-
+ """Build the date fetching request through looking up address on separate endpoints, skip these requests if you can provide the district_id, project_id and/or zone_id."""
if not self.project_id or not self.district_id:
self._lookup_city()
if not self.zone_id:
self._lookup_zones()
- collection_def_url = 'https://reg.my-waste.mobi/collections?project_id={}&district_id={}&zone_id={}&lang_cd=en_US'.format(self.project_id, self.district_id, self.zone_id)
- schedule_url = 'https://pkg.my-waste.mobi/app_data_zone_schedules?project_id={}&district_id={}&zone_id={}'.format(self.project_id, self.district_id, self.zone_id)
+ collection_def_url = f"https://reg.my-waste.mobi/collections?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}&lang_cd=en_US"
+ schedule_url = f"https://pkg.my-waste.mobi/app_data_zone_schedules?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}"
collection_def = None
schedule_def = None
@@ -183,5 +182,4 @@ def fetch(self):
)
entries.append(c)
-
return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/renosyd_dk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/renosyd_dk.py
new file mode 100644
index 00000000..1a1570e6
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/renosyd_dk.py
@@ -0,0 +1,134 @@
+import requests, re, time, datetime
+from bs4 import BeautifulSoup, NavigableString
+from dateutil import parser
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+from typing import List
+
+TITLE = "Renosyd"
+DESCRIPTION = "Renosyd collections for Skanderborg and Odder kommunes"
+URL = "https://renosyd.dk"
+TEST_CASES = {
+ "TestCase1": {
+ "kommune": "skanderborg",
+ "husnummer": 123000,
+ },
+ "TestCase2": {
+ "kommune": "skanderborg",
+ "husnummer": 186305,
+ },
+ "TestCase3": {
+ "kommune": "odder",
+ "husnummer": 89042,
+ },
+}
+
+ICON_MAP = {
+ "RESTAFFALD": "mdi:trash-can",
+ "PAPIR/PAP": "mdi:note-multiple",
+ "EMBALLAGE": "mdi:recycle",
+ "STORSKRALD": "mdi:dump-truck",
+ "HAVEAFFALD": "mdi:leaf", # Uncertain about this name, can't find an example
+}
+
+DANISH_MONTHS = [
+ None,
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "may",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "okt",
+ "nov",
+ "dec",
+]
+
+
+class Source:
+ def __init__(self, kommune: str, husnummer: int):
+ self._kommune = kommune
+ self._husnummer = husnummer
+ self._api_url = (
+ "https://"
+ + self._kommune.lower()
+ + ".netdialog.renosyd.dk/citizen/default.aspx"
+ )
+
+ def fetch(self) -> List[Collection]:
+ session = requests.Session()
+
+ session.headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept-Encoding": "gzip, deflate",
+ "Accept": "*/*",
+ "Connection": "keep-alive",
+ }
+
+ address_select = session.get(
+ self._api_url,
+ cookies={"StoredAddress": str(self._husnummer)},
+ )
+ address_select.raise_for_status()
+
+ address_select_soup = BeautifulSoup(address_select.text, "html.parser")
+ data = {
+ i["name"]: i.get("value", "")
+ for i in address_select_soup.select("input[name]")
+ }
+
+ binfo = session.post(self._api_url, data=data)
+ binfo.raise_for_status()
+
+ binfo_soup = BeautifulSoup(binfo.text, "html.parser")
+
+ calendar = binfo_soup.find_all(attrs={"class": "tableContainersAtProperty"})
+
+ months = []
+ this_year = time.localtime().tm_year
+
+ for month in calendar[1].find_all("th"):
+ value = month.contents[0].strip()
+ if value == "Beholder":
+ continue
+
+ months.append(datetime.date(this_year, DANISH_MONTHS.index(value), 1))
+
+ if value == "dec":
+ this_year = +1
+
+ entries = []
+
+ rows = calendar[1].find_all("tr")
+
+ for row in rows[1:]:
+ elements = row.find_all("td")
+
+ result = re.search(
+ r"^(\d{1,2}\s?x\s?)([A-Za-z\/]*)(\s*\d{1,4}L)?$",
+ elements[0].contents[0].strip(),
+ )
+ if result is None:
+ continue
+
+ container_type = result.groups()[1]
+
+ for idx, element in enumerate(elements[1:]):
+ for subelement in element.contents:
+ if not isinstance(subelement, NavigableString):
+ continue
+
+ if subelement.strip() == "":
+ continue
+
+ entries.append(
+ Collection(
+ date=months[idx]
+ + datetime.timedelta(days=int(subelement.strip()) - 1),
+ t=container_type,
+ icon=ICON_MAP.get(container_type.upper()),
+ )
+ )
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/samiljo_se.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/samiljo_se.py
index 10638140..a93886b7 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/samiljo_se.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/samiljo_se.py
@@ -115,7 +115,7 @@ def fetch(self):
adresslistalines = adresslist.text.lower().splitlines()
for line in adresslistalines:
if streetcityjoined in line:
- A = line.split("|")[-1]
+ A = line.split("|")[3]
payload = {"hsG": self.street, "hsO": self.city, "nrA": A}
payload_str = urllib.parse.urlencode(payload, encoding="cp1252")
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sandnes_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sandnes_no.py
new file mode 100644
index 00000000..2578759d
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sandnes_no.py
@@ -0,0 +1,81 @@
+# nearly identical to stavanger_no
+
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Stavanger Kommune"
+DESCRIPTION = "Source for Stavanger Kommune, Norway"
+URL = "https://www.sandnes.kommune.no/"
+TEST_CASES = {
+ "TestcaseI": {
+ "id": "181e5aac-3c88-4b0b-ad46-3bd246c2be2c",
+ "municipality": "Sandnes kommune 2020",
+ "gnumber": "62",
+ "bnumber": "281",
+ "snumber": "0",
+ },
+ "TestcaseII": {
+ "id": "cb263140-1743-4459-ab3a-a9677884904f",
+ "municipality": "Sandnes kommune 2020",
+ "gnumber": 33,
+ "bnumber": 844,
+ "snumber": 0,
+ },
+}
+
+ICON_MAP = {
+ "Restavfall": "mdi:trash-can",
+ "Papp/papir": "mdi:recycle",
+ "Papir": "mdi:recycle",
+ "Bio": "mdi:leaf",
+ "Våtorganisk avfall": "mdi:leaf",
+ "Juletre": "mdi:pine-tree",
+}
+
+
+class Source:
+ def __init__(self, id, municipality, gnumber, bnumber, snumber):
+ self._id = id
+ self._municipality = municipality
+ self._gnumber = gnumber
+ self._bnumber = bnumber
+ self._snumber = snumber
+
+ def fetch(self):
+ url = "https://www.hentavfall.no/rogaland/sandnes/tommekalender/show"
+
+ params = {
+ "id": self._id,
+ "municipality": self._municipality,
+ "gnumber": self._gnumber,
+ "bnumber": self._bnumber,
+ "snumber": self._snumber,
+ }
+
+ r = requests.get(url, params=params)
+ r.raise_for_status()
+
+ soup = BeautifulSoup(r.text, "html.parser")
+
+ tag = soup.find_all("option")
+ year = tag[0].get("value").split("-")
+ year = year[1]
+
+ entries = []
+ for tag in soup.find_all("tr", {"class": "waste-calendar__item"}):
+ if tag.text.strip() == "Dato og dag\nAvfallstype":
+ continue
+
+ date = tag.text.strip().split(" - ")
+ date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date()
+
+ for img in tag.find_all("img"):
+ waste_type = img.get("title")
+ entries.append(
+ Collection(date, waste_type, icon=ICON_MAP.get(waste_type))
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/scheibbs_umweltverbaende_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/scheibbs_umweltverbaende_at.py
new file mode 100644
index 00000000..3d84b1e8
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/scheibbs_umweltverbaende_at.py
@@ -0,0 +1,55 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection
+
+TITLE = "GVU Scheibbs"
+DESCRIPTION = "Source for waste collection services Association of Municipalities in the District of Scheibbs"
+URL = "https://scheibbs.umweltverbaende.at/"
+TEST_CASES = {
+ "Test_001": {"region": "Gaming"},
+ "Test_002": {"region": "Sankt Anton an der Jeßnitz"},
+ "Test_003": {"region": "Göstling an der Ybbs"},
+ "Test_004": {"region": "Wieselburg"},
+}
+ICON_MAP = {
+ "Restmüll": "mdi:trash-can",
+ "Gelber Sack": "mdi:sack",
+ "Altpapier": "mdi:package-variant",
+ "Biotonne": "mdi:leaf",
+}
+
+
+class Source:
+ def __init__(self, region):
+ self._region = region
+
+ def fetch(self):
+ s = requests.Session()
+ # get list of regions and weblinks
+ r0 = s.get("https://scheibbs.umweltverbaende.at/?kat=32")
+ soup = BeautifulSoup(r0.text, "html.parser")
+ table = soup.find_all("div", {"class": "col-sm-9"})
+ entries = []
+ for item in table:
+ weblinks = item.find_all("a", {"class": "weblink"})
+ for item in weblinks:
+ # match weblink with region to get collection schedule
+ if self._region in item.text:
+ r1 = s.get(f"https://scheibbs.umweltverbaende.at/{item['href']}")
+ soup = BeautifulSoup(r1.text, "html.parser")
+ schedule = soup.find_all("div", {"class": "tunterlegt"})
+ for day in schedule:
+ txt = day.text.strip().split(
+ " "
+ ) # this is not 3 space characters, the middle one is U+00a0
+ entries.append(
+ Collection(
+ date=datetime.strptime(txt[1], "%d.%m.%Y").date(),
+ t=txt[2],
+ icon=ICON_MAP.get(txt[2]),
+ )
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/south_norfolk_and_broadland_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/south_norfolk_and_broadland_gov_uk.py
index 6f5ffb08..7c7d3d2a 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/south_norfolk_and_broadland_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/south_norfolk_and_broadland_gov_uk.py
@@ -11,11 +11,11 @@
TITLE = "Broadland District Council"
DESCRIPTION = "Source for southnorfolkandbroadland.gov.uk services for South Norfolk and Broadland, UK"
-URL = "https://southnorfolkandbroadland.gov.uk/"
+URL = "https://area.southnorfolkandbroadland.gov.uk/"
EXTRA_INFO = [
{
"title": "South Norfolk Council",
- "url": "https://southnorfolkandbroadland.gov.uk/"
+ "url": "https://southnorfolkandbroadland.gov.uk/",
},
]
TEST_CASES = {
@@ -29,7 +29,7 @@
"Parish": "Sprowston",
"Village": "Sprowston",
"Street": "Mallard Way",
- "Authority": "2610"
+ "Authority": "2610",
}
},
"Random address new Method": {
@@ -46,35 +46,45 @@
"Parish": "Sprowston",
"Village": "Sprowston",
"Street": "Blue Boar Lane",
- "Authority": "2610"
+ "Authority": "2610",
}
- }
+ },
}
ICON_MAP = {
"Rubbish": "mdi:trash-can",
"Recycling": "mdi:recycle",
- "Garden (if applicable)": "mdi:leaf"
+ "Garden (if applicable)": "mdi:leaf",
}
matcher = re.compile(r"^([A-Z][a-z]+) (\d{1,2}) ([A-Z][a-z]+) (\d{4})$")
+
def parse_date(date_str: str) -> date:
match = matcher.match(date_str)
+ if match is None:
+ raise ValueError(f"Unable to parse date {date_str}")
+
return date(
int(match.group(4)),
strptime(match.group(3)[:3], "%b").tm_mon,
- int(match.group(2))
+ int(match.group(2)),
)
-def comparable(data:str) -> str:
+def comparable(data: str) -> str:
return data.replace(",", "").replace(" ", "").lower()
-class Source:
- _address_payload: dict
- def __init__(self, address_payload: dict = None, postcode: str = None, address: str = None):
+class Source:
+ _address_payload: dict | None
+
+ def __init__(
+ self,
+ address_payload: dict | None = None,
+ postcode: str | None = None,
+ address: str | None = None,
+ ):
self._address_payload = address_payload
self._postcode = comparable(postcode) if postcode else None
self._address = address if address else None
@@ -82,13 +92,14 @@ def __init__(self, address_payload: dict = None, postcode: str = None, address:
def fetch(self) -> List[Collection]:
if self._address_payload:
return self.__fetch_by_payload()
- if not self._postcode or not self._address:
- raise ValueError(
- "Either (address_payload) or (postcode and address) must be provided")
-
return self.__fetch_by_postcode_and_address()
def __fetch_by_postcode_and_address(self) -> List[Collection]:
+ if not self._postcode or not self._address:
+ raise ValueError(
+ "Either (address_payload) or (postcode and address) must be provided"
+ )
+
session = requests.Session()
r = session.get(URL + "FindAddress")
r.raise_for_status()
@@ -96,26 +107,32 @@ def __fetch_by_postcode_and_address(self) -> List[Collection]:
args = {
"Postcode": self._postcode,
- "__RequestVerificationToken": page.find("input", {"name": "__RequestVerificationToken"})["value"]
+ "__RequestVerificationToken": page.find(
+ "input", {"name": "__RequestVerificationToken"}
+ )["value"],
}
r = session.post(URL + "FindAddress", data=args)
r.raise_for_status()
page = soup(r.text, "html.parser")
- addresses = page.find(
- "select", {"id": "UprnAddress"}).find_all("option")
+ addresses = page.find("select", {"id": "UprnAddress"}).find_all("option")
if not addresses:
raise ValueError(f"no addresses found for postcode {self._postcode}")
- args["__RequestVerificationToken"] = page.find("input", {"name": "__RequestVerificationToken"})["value"]
+ args["__RequestVerificationToken"] = page.find(
+ "input", {"name": "__RequestVerificationToken"}
+ )["value"]
found = False
compare_address = self._address.replace(",", "").replace(" ", "").lower()
for address in addresses:
address_text = comparable(address.text)
-
- if address_text == compare_address or address_text == compare_address.replace(self._postcode, ""):
+
+ if (
+ address_text == compare_address
+ or address_text == compare_address.replace(self._postcode, "")
+ ):
args["UprnAddress"] = address["value"]
found = True
break
@@ -123,12 +140,17 @@ def __fetch_by_postcode_and_address(self) -> List[Collection]:
if not found:
raise ValueError(f"Address {self._address} not found")
- r = session.post(URL+"FindAddress/Submit", data=args)
+ r = session.post(URL + "FindAddress/Submit", data=args)
r.raise_for_status()
return self.__get_data(r)
def __fetch_by_payload(self) -> List[Collection]:
- r = requests.get(URL, headers={"Cookie": f"MyArea.Data={quote(json.dumps(self._address_payload))}"})
+ r = requests.get(
+ URL,
+ headers={
+ "Cookie": f"MyArea.Data={quote(json.dumps(self._address_payload))}"
+ },
+ )
r.raise_for_status()
return self.__get_data(r)
@@ -141,8 +163,7 @@ def __get_data(self, r: requests.Response) -> List[Collection]:
Collection(
parse_date(tuple(bin_category.children)[3].strip()),
tuple(bin_category.children)[1].text.strip(),
- icon=ICON_MAP.get(tuple(bin_category.children)[1].text.strip())
+ icon=ICON_MAP.get(tuple(bin_category.children)[1].text.strip()),
)
- for bin_category
- in bin_categories
+ for bin_category in bin_categories
]
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stockton_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stockton_gov_uk.py
index 4076bd23..caf7978e 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stockton_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stockton_gov_uk.py
@@ -1,9 +1,9 @@
+import base64
+import json
+import re
from datetime import datetime
import requests
-import re
-import json
-import base64
from bs4 import BeautifulSoup
from waste_collection_schedule import Collection
@@ -44,10 +44,19 @@ def fetch(self):
soup = BeautifulSoup(r.text, features="html.parser")
# Extract form submission url and form data
- form_url = soup.find("form", attrs={"id": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_FORM"})["action"]
- pageSessionId = soup.find("input", attrs={"name": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_PAGESESSIONID"})["value"]
- sessionId = soup.find("input", attrs={"name": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_SESSIONID"})["value"]
- nonce = soup.find("input", attrs={"name": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_NONCE"})["value"]
+ form_url = soup.find(
+ "form", attrs={"id": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_FORM"}
+ )["action"]
+ pageSessionId = soup.find(
+ "input",
+ attrs={"name": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_PAGESESSIONID"},
+ )["value"]
+ sessionId = soup.find(
+ "input", attrs={"name": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_SESSIONID"}
+ )["value"]
+ nonce = soup.find(
+ "input", attrs={"name": "LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_NONCE"}
+ )["value"]
form_data = {
"LOOKUPBINDATESBYADDRESSSKIPOUTOFREGION_PAGESESSIONID": pageSessionId,
@@ -65,7 +74,9 @@ def fetch(self):
# Extract encoded response data
soup = BeautifulSoup(r.text, features="html.parser")
pattern = re.compile(
- r"var LOOKUPBINDATESBYADDRESSSKIPOUTOFREGIONFormData = \"(.*?)\";$", re.MULTILINE | re.DOTALL)
+ r"var LOOKUPBINDATESBYADDRESSSKIPOUTOFREGIONFormData = \"(.*?)\";$",
+ re.MULTILINE | re.DOTALL,
+ )
script = soup.find("script", text=pattern)
response_data = pattern.search(script.text).group(1)
@@ -77,21 +88,30 @@ def fetch(self):
entries = []
for key in data["_PAGEORDER_"]:
soup = BeautifulSoup(
- data[key]["COLLECTIONDETAILS2"], features="html.parser")
+ data[key]["COLLECTIONDETAILS2"], features="html.parser"
+ )
for waste_type_div in soup.find_all("div", attrs={"class": "grid__cell"}):
- waste_type = waste_type_div.find("p", attrs={"class": "myaccount-block__title--bin"}).text.strip()
-
+ waste_type = waste_type_div.find(
+ "p", attrs={"class": "myaccount-block__title--bin"}
+ ).text.strip()
+
# Get date nodes from not garden waste
- date_nodes = waste_type_div.find_all("p", attrs={"class": "myaccount-block__date--bin"})
-
- #Get Both dates from Garden Waste
+ date_nodes = waste_type_div.find_all(
+ "p", attrs={"class": "myaccount-block__date--bin"}
+ )
+
+ # Get Both dates from Garden Waste
if date_nodes is None or len(date_nodes) == 0:
- date_nodes = [waste_type_div.find_all("p")[1].find_all("strong")[i] for i in range(2)]
+ date_nodes = [
+ waste_type_div.find_all("p")[1].find_all("strong")[i]
+ for i in range(2)
+ ]
for date_node in date_nodes:
# Remove ordinal suffixes from date string
date_string = re.sub(
- r"(st|nd|rd|th)", "", date_node.text.strip())
+ r"(?<=[0-9])(?:st|nd|rd|th)", "", date_node.text.strip()
+ )
date = datetime.strptime(date_string, "%a %d %B %Y").date()
entries.append(
Collection(
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stoke_gov_uk.py
new file mode 100644
index 00000000..134d91c6
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stoke_gov_uk.py
@@ -0,0 +1,52 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Stoke-on-Trent"
+DESCRIPTION = "Source for Stoke-on-Trent"
+URL = "https://www.stoke.gov.uk/"
+
+TEST_CASES = {
+ "Test1": {"uprn": "3455011383"},
+ "Test2": {"uprn": 3455011391},
+}
+
+ICON_MAP = {"ORG": "mdi:leaf", "RES": "mdi:trash-can", "REC": "mdi:recycle"}
+
+API_URL = "https://www.stoke.gov.uk/jadu/custom/webserviceLookUps/BarTecWebServices_missed_bin_calendar.php?UPRN="
+
+DATE_FORMAT = "%d/%m/%Y %H:%M:%S" # format of the date string in the collection table
+
+
+class Source:
+ def __init__(self, uprn):
+ self._uprn = str(uprn).zfill(12)
+
+ def fetch(self):
+ r = requests.get(API_URL + self._uprn)
+ soup = BeautifulSoup(r.text, features="xml")
+
+ # find all BinRound elements
+ bin_rounds = soup.find_all("BinRound")
+
+ entries = []
+
+ for bin_round in bin_rounds:
+ bin = bin_round.find("Bin").text
+ bintype = "RES"
+ if "REC" in bin.upper():
+ bintype = "REC"
+ if "ORG" in bin.upper():
+ bintype = "ORG"
+ if "RES" in bin.upper():
+ bintype = "RES"
+
+ # round_name = bin_round.find('RoundName').text
+ date_time = bin_round.find("DateTime").text
+
+ date = datetime.strptime(date_time, DATE_FORMAT).date()
+ entries.append(Collection(date=date, t=bin, icon=ICON_MAP.get(bintype)))
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stratford_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stratford_gov_uk.py
new file mode 100644
index 00000000..feed0db7
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stratford_gov_uk.py
@@ -0,0 +1,76 @@
+from datetime import datetime
+
+import requests
+from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+
+TITLE = "Stratford District Council"
+DESCRIPTION = (
+ "Source for Stratford District Council and their 123+ bin collection system"
+)
+URL = "https://stratford.gov.uk"
+
+TEST_CASES = (
+ { # if you want your address removed, please submit a request and this will be done
+ "Stratford DC": {"uprn": "100071513500"}, # doesn't have food waste
+ "Alscot Estate": {"uprn": 10024633309},
+ }
+)
+
+ICON_MAP = {
+ "Garden waste": "mdi:leaf",
+ "Refuse": "mdi:trash-can",
+ "Recycling": "mdi:recycle",
+ "Food waste": "mdi:food-apple",
+}
+# order of BINS is important, it's the order they appear left-to-right in the table.
+# these names have been chosen to accurately reflect naming convention on Stratford.gov
+BINS = ["Food waste", "Recycling", "Refuse", "Garden waste"]
+
+API_URL = (
+ "https://www.stratford.gov.uk/waste-recycling/when-we-collect.cfm/part/calendar"
+)
+HEADERS = {"Content-Type": "application/x-www-form-urlencoded"}
+
+DATE_FORMAT = "%A, %d/%m/%Y" # format of the date string in the collection table
+
+
+class Source:
+ def __init__(self, uprn):
+ # fill in the address with blanks, dont need it
+ # self._payload += "&frmUPRN=" + uprn # only need to provide uprn. but we DO need to have the keys for the rest of the address.
+ self._payload = {
+ "frmAddress1": "",
+ "frmAddress2": "",
+ "frmAddress3": "",
+ "frmAddress4": "",
+ "frmPostcode": "",
+ "frmUPRN": uprn,
+ }
+
+ def fetch(self):
+ r = requests.post(API_URL, data=self._payload, headers=HEADERS)
+ soup = BeautifulSoup(r.text, features="html.parser")
+
+ # Retrieve collection details
+ entries = []
+ table = soup.find("table", class_="table") # yes really
+
+ # each row is a date, and its given collections
+ for row in table.tbody.find_all("tr"):
+ # first td is the date of the collection
+ # format is day / month / year
+ date = datetime.strptime(row.find("td").text, DATE_FORMAT).date()
+
+ # there are 4 bins per row, this gets them
+ all_bins = row.find_all("td", class_="text-center")
+
+ # each bin may be "checked" to show it can be collected on that date
+ for idx, cell in enumerate(all_bins):
+ if cell.find("img", class_="check-img"):
+
+ entries.append(
+ Collection(date=date, t=BINS[idx], icon=ICON_MAP.get(BINS[idx]))
+ )
+
+ return entries
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/toronto_ca.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/toronto_ca.py
index b4685c25..1192d3a5 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/toronto_ca.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/toronto_ca.py
@@ -6,7 +6,7 @@
from ..collection import Collection
-TITLE = "City of Toronto"
+TITLE = "Toronto (ON)"
DESCRIPTION = "Source for Toronto waste collection"
URL = "https://www.toronto.ca"
TEST_CASES = {
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/warszawa19115_pl.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/warszawa19115_pl.py
index 593150a5..aeed0889 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/warszawa19115_pl.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/warszawa19115_pl.py
@@ -9,7 +9,7 @@
URL = "https://warszawa19115.pl"
TEST_CASES = {
"Street Name": {"street_address": "MARSZAŁKOWSKA 84/92, 00-514 Śródmieście"},
- "Geolocation ID": {"geolocation_id": "76802934"},
+ "Geolocation ID": {"geolocation_id": "3830963"},
}
_LOGGER = logging.getLogger(__name__)
@@ -62,9 +62,13 @@ def get_geolocation_id(self, street_address) -> str:
OC_PARAMS["p_p_resource_id"] = "autocompleteResource"
# Search for geolocation ID
- payload = f"_{OC_PARAMS['p_p_id']}_name={street_address}"
- geolocation_response = geolocation_session.post(
- OC_URL, headers=OC_HEADERS, params=OC_PARAMS, data=payload.encode("utf-8"),
+ OC_PARAMS[
+ "_portalCKMjunkschedules_WAR_portalCKMjunkschedulesportlet_INSTANCE_o5AIb2mimbRJ_name"
+ ] = street_address
+ geolocation_response = geolocation_session.get(
+ OC_URL,
+ headers=OC_HEADERS,
+ params=OC_PARAMS,
)
geolocation_response.raise_for_status()
@@ -104,9 +108,13 @@ def fetch(self):
# Calendar call requires 'ajaxResourceURL' param to work
OC_PARAMS["p_p_resource_id"] = "ajaxResource"
- payload = f"_{OC_PARAMS['p_p_id']}_addressPointId={str(self._geolocation_id)}"
- calendar_request = calendar_session.post(
- OC_URL, data=payload, headers=OC_HEADERS, params=OC_PARAMS,
+ OC_PARAMS[
+ "_portalCKMjunkschedules_WAR_portalCKMjunkschedulesportlet_INSTANCE_o5AIb2mimbRJ_addressPointId"
+ ] = self._geolocation_id
+ calendar_request = calendar_session.get(
+ OC_URL,
+ headers=OC_HEADERS,
+ params=OC_PARAMS,
)
calendar_request.raise_for_status()
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/west_norfolk_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/west_norfolk_gov_uk.py
new file mode 100644
index 00000000..90c1a941
--- /dev/null
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/west_norfolk_gov_uk.py
@@ -0,0 +1,79 @@
+import re
+import requests
+from bs4 import BeautifulSoup
+
+from datetime import datetime
+from waste_collection_schedule import Collection
+
+
+TITLE = "Borough Council of King's Lynn & West Norfolk"
+DESCRIPTION = "Source for www.west-norfolk.gov.uk services for Borough Council of King's Lynn & West Norfolk, UK."
+URL = "https://www.west-norfolk.gov.uk"
+HEADERS = {
+ "user-agent": "Mozilla/5.0",
+}
+TEST_CASES = {
+ "Test_001": {"uprn": "100090969937"},
+ "Test_002": {"uprn": "100090989776"},
+ "Test_003": {"uprn": "10000021270"},
+ "Test_004": {"uprn": 100090969937},
+}
+ICON_MAP = {
+ "REFUSE": "mdi:trash-can",
+ "RECYCLING": "mdi:recycle",
+ "GARDEN": "mdi:leaf"
+}
+
+class Source:
+ def __init__(self, uprn):
+ self._uprn = str(uprn).zfill(12)
+
+ def fetch(self):
+
+ # Get session and amend cookies
+ s = requests.Session()
+ r0 = s.get(
+ "https://www.west-norfolk.gov.uk/info/20174/bins_and_recycling_collection_dates",
+ headers=HEADERS
+ )
+ s.cookies.update(
+ {
+ "bcklwn_store": s.cookies.get("PHPSESSID"),
+ "bcklwn_uprn": self._uprn,
+ }
+ )
+
+ # Get initial collection dates using updated cookies
+ r1= s.get(
+ "https://www.west-norfolk.gov.uk/info/20174/bins_and_recycling_collection_dates",
+ headers=HEADERS,
+ cookies=s.cookies
+ )
+
+ # Get extended collection schedule from calendar end point
+ r2 = s.get(
+ "https://www.west-norfolk.gov.uk/bincollectionscalendar",
+ headers=HEADERS,
+ cookies=s.cookies
+ )
+
+ # Extract dates and waste types: Extracts ~6 months worth of collections from the optional website calendar page
+ entries = []
+ soup = BeautifulSoup(r2.text, "html.parser")
+ pickups = soup.findAll("div", {"class": "cldr_month"})
+ for item in pickups:
+ month = item.find("h2")
+ dates = item.findAll("td", {"class": re.compile(" (recycling|refuse|garden)")})
+ for d in dates:
+ attr = d.attrs.get("class")
+ for a in attr[2:]:
+ dt = d.text + " " + month.text
+ entries.append(
+ Collection(
+ date = datetime.strptime(dt, "%d %B %Y").date(),
+ t = a,
+ icon = ICON_MAP.get(a.upper())
+ )
+ )
+
+ return entries
\ No newline at end of file
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/wizard/cmcitymedia_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/wizard/cmcitymedia_de.py
index 6d34919a..275c3c15 100644
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/wizard/cmcitymedia_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/wizard/cmcitymedia_de.py
@@ -1,20 +1,8 @@
import requests
-import ssl
-class TLSAdapter(requests.adapters.HTTPAdapter):
-
- def init_poolmanager(self, *args, **kwargs):
- ctx = ssl.create_default_context()
- ctx.set_ciphers('DEFAULT@SECLEVEL=1')
- kwargs['ssl_context'] = ctx
- return super(TLSAdapter, self).init_poolmanager(*args, **kwargs)
-
-
-def get_waste_types(hpid, realmid):
- session = requests.session()
- session.mount('https://', TLSAdapter())
-
- r = session.get(f"https://sslslim.cmcitymedia.de/v1/{hpid}/waste/{realmid}/types")
+def get_waste_types(hpid, realmid):
+ r = requests.get(
+ f"http://slim.cmcitymedia.de/v1/{hpid}/waste/{realmid}/types")
r.raise_for_status()
r = r.json()
@@ -23,10 +11,8 @@ def get_waste_types(hpid, realmid):
return items
def get_waste_districts(hpid, realmid):
- session = requests.session()
- session.mount('https://', TLSAdapter())
-
- r = session.get(f"https://sslslim.cmcitymedia.de/v1/{hpid}/waste/{realmid}/districts")
+ r = requests.get(
+ f"http://slim.cmcitymedia.de/v1/{hpid}/waste/{realmid}/districts")
r.raise_for_status()
r = r.json()
@@ -35,10 +21,7 @@ def get_waste_districts(hpid, realmid):
return items
def get_waste_realms(hpid):
- session = requests.session()
- session.mount('https://', TLSAdapter())
-
- r = session.get(f"https://sslslim.cmcitymedia.de/v1/{hpid}/waste")
+ r = requests.get(f"http://slim.cmcitymedia.de/v1/{hpid}/waste")
r.raise_for_status()
r = r.json()
@@ -48,15 +31,11 @@ def get_waste_realms(hpid):
def get_all_hpid():
i_from = 0
- i_to = 1000 # currently max i found is 447
+ i_to = 1000 # currently max hpid found is 447
founds = []
-
- session = requests.session()
- session.mount('https://', TLSAdapter())
-
for i in range(i_from, i_to):
- r = session.get(f"https://sslslim.cmcitymedia.de/v1/{i}/waste")
+ r = requests.get(f"http://slim.cmcitymedia.de/v1/{i}/waste")
if r.status_code == 200:
r = r.json()
items = r["result"][1]["items"]