Skip to content

Commit

Permalink
feat: update Waster Collection Schedule via HACS
Browse files Browse the repository at this point in the history
  • Loading branch information
aronnebrivio committed Sep 1, 2023
1 parent a3bad3f commit cbd543e
Show file tree
Hide file tree
Showing 55 changed files with 3,264 additions and 544 deletions.
2 changes: 1 addition & 1 deletion custom_components/waste_collection_schedule/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@
"integration_type": "hub",
"iot_class": "cloud_polling",
"requirements": ["icalendar", "recurring_ical_events", "icalevents", "bs4"],
"version": "1.41.0"
"version": "1.42.0"
}
12 changes: 11 additions & 1 deletion custom_components/waste_collection_schedule/sensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
CONF_DATE_TEMPLATE = "date_template"
CONF_COLLECTION_TYPES = "types"
CONF_ADD_DAYS_TO = "add_days_to"
CONF_EVENT_INDEX = "event_index"


class DetailsFormat(Enum):
Expand All @@ -52,6 +53,7 @@ class DetailsFormat(Enum):
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DATE_TEMPLATE): cv.template,
vol.Optional(CONF_ADD_DAYS_TO, default=False): cv.boolean,
vol.Optional(CONF_EVENT_INDEX, default=0): cv.positive_int,
}
)

Expand Down Expand Up @@ -88,6 +90,7 @@ async def async_setup_platform(hass, config, async_add_entities, discovery_info=
value_template=value_template,
date_template=date_template,
add_days_to=config.get(CONF_ADD_DAYS_TO),
event_index=config.get(CONF_EVENT_INDEX),
)
)

Expand All @@ -110,6 +113,7 @@ def __init__(
value_template,
date_template,
add_days_to,
event_index,
):
"""Initialize the entity."""
self._api = api
Expand All @@ -121,6 +125,7 @@ def __init__(
self._value_template = value_template
self._date_template = date_template
self._add_days_to = add_days_to
self._event_index = event_index

self._value = None

Expand Down Expand Up @@ -201,6 +206,7 @@ def _update_sensor(self):
count=1,
include_types=self._collection_types,
include_today=self._include_today,
start_index=self._event_index,
)

self._set_state(upcoming1)
Expand All @@ -220,6 +226,7 @@ def _update_sensor(self):
leadtime=self._leadtime,
include_types=self._collection_types,
include_today=self._include_today,
start_index=self._event_index,
)
for collection in upcoming:
attributes[self._render_date(collection)] = self._separator.join(
Expand All @@ -229,7 +236,10 @@ def _update_sensor(self):
# show list of collections in details
for t in collection_types:
collections = self._aggregator.get_upcoming(
count=1, include_types=[t], include_today=self._include_today
count=1,
include_types=[t],
include_today=self._include_today,
start_index=self._event_index,
)
date = (
"" if len(collections) == 0 else self._render_date(collections[0])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def get_upcoming(
include_types=None,
exclude_types=None,
include_today=False,
start_index=None,
):
"""Return list of all entries, limited by count and/or leadtime.
Expand All @@ -47,6 +48,7 @@ def get_upcoming(
include_types=include_types,
exclude_types=exclude_types,
include_today=include_today,
start_index=start_index,
)

def get_upcoming_group_by_day(
Expand All @@ -56,6 +58,7 @@ def get_upcoming_group_by_day(
include_types=None,
exclude_types=None,
include_today=False,
start_index=None,
):
"""Return list of all entries, grouped by day, limited by count and/or leadtime."""
entries = []
Expand All @@ -73,6 +76,8 @@ def get_upcoming_group_by_day(

for key, group in iterator:
entries.append(CollectionGroup.create(list(group)))
if start_index is not None:
entries = entries[start_index:]
if count is not None:
entries = entries[:count]

Expand All @@ -86,6 +91,7 @@ def _filter(
include_types=None,
exclude_types=None,
include_today=False,
start_index=None,
):
# remove unwanted waste types from include list
if include_types is not None:
Expand Down Expand Up @@ -115,6 +121,8 @@ def _filter(
entries.sort(key=lambda e: e.date)

# remove surplus entries
if start_index is not None:
entries = entries[start_index:]
if count is not None:
entries = entries[:count]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,25 @@
"service_id": "aachen",
},
{
"title": "AWA Entsorgungs GmbH",
"url": "https://www.awa-gmbh.de/",
"service_id": "zew2",
"title": "Abfallwirtschaft Stadt Nürnberg",
"url": "https://www.nuernberg.de/",
"service_id": "nuernberg",
},
{
"title": "Abfallwirtschaftsbetrieb Bergisch Gladbach",
"url": "https://www.bergischgladbach.de/",
"service_id": "aw-bgl2",
},
{
"title": "AWA Entsorgungs GmbH",
"url": "https://www.awa-gmbh.de/",
"service_id": "zew2",
},
{
"title": "AWG Kreis Warendorf",
"url": "https://www.awg-waf.de/",
"service_id": "krwaf",
},
{
"title": "Bergischer Abfallwirtschaftverbund",
"url": "https://www.bavweb.de/",
Expand All @@ -46,6 +56,11 @@
"url": "https://www.ebd-dorsten.de/",
"service_id": "dorsten",
},
{
"title": "EGW Westmünsterland",
"url": "https://www.egw.de/",
"service_id": "wml2",
},
{
"title": "Gütersloh",
"url": "https://www.guetersloh.de/",
Expand All @@ -62,9 +77,9 @@
"service_id": "krhs",
},
{
"title": "AWG Kreis Warendorf",
"url": "https://www.awg-waf.de/",
"service_id": "krwaf",
"title": "Kronberg im Taunus",
"url": "https://www.kronberg.de/",
"service_id": "kronberg",
},
{
"title": "Gemeinde Lindlar",
Expand All @@ -76,16 +91,6 @@
"url": "https://www.betriebsamt-norderstedt.de/",
"service_id": "nds",
},
{
"title": "Abfallwirtschaft Stadt Nürnberg",
"url": "https://www.nuernberg.de/",
"service_id": "nuernberg",
},
{
"title": "WBO Wirtschaftsbetriebe Oberhausen",
"url": "https://www.wbo-online.de/",
"service_id": "oberhausen",
},
{
"title": "Kreis Pinneberg",
"url": "https://www.kreis-pinneberg.de/",
Expand All @@ -106,27 +111,26 @@
"url": "https://www.stl-luedenscheid.de/",
"service_id": "stl",
},
# {
# "title": "'Stadt Straelen",
# "url": "https://www.straelen.de/",
# "service_id": "straelen",
# },
{
"title": "Kreis Viersen",
"url": "https://www.kreis-viersen.de/",
"service_id": "viersen",
},
{
"title": "EGW Westmünsterland",
"url": "https://www.egw.de/",
"service_id": "wml2",
"title": "WBO Wirtschaftsbetriebe Oberhausen",
"url": "https://www.wbo-online.de/",
"service_id": "oberhausen",
},
{
"title": "Kronberg im Taunus",
"url": "https://www.kronberg.de/",
"service_id": "kronberg",
"title": "ZEW Zweckverband Entsorgungsregion West",
"url": "https://zew-entsorgung.de/",
"service_id": "zew2",
},

# {
# "title": "'Stadt Straelen",
# "url": "https://www.straelen.de/",
# "service_id": "straelen",
# },
]


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,6 @@
"43": "mdi:package-variant", # Papiertonne
},
},
{
"hpid": 107,
"realm": 10701,
"name": "www.kressbronn.de - Müllkalender",
"region": "Gemeinde Kressbronn am Bodensee",
"icons": {
"47": "mdi:trash-can", # Bio- und Restmüllabfuhr
"46": "mdi:recycle", # Gelbe Säcke
"48": "mdi:package-variant", # Papiertonne
},
},
{
"hpid": 168,
"realm": 16801,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import datetime

import requests
from bs4 import BeautifulSoup, Tag
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS

TITLE = "Abfallwirtschaftsverbandes Lippe"
DESCRIPTION = "Source for Abfallwirtschaftsverbandes Lippe."
URL = "https://abfall-lippe.de"
TEST_CASES = {
"Bad Salzuflen BB": {"gemeinde": "Bad Salzuflen", "bezirk": "BB"},
"Augustdorf": {"gemeinde": "Augustdorf"},
"Barntrup 3B": {"gemeinde": "Barntrup", "bezirk": "3-B"},
}


ICON_MAP = {
"Graue": "mdi:trash-can",
"Glass": "mdi:bottle-soda",
"Grüne": "mdi:leaf",
"Laubannahme": "mdi:leaf-maple",
"Blaue": "mdi:package-variant",
"Gelbe": "mdi:recycle",
"Schadstoffsammlung": "mdi:biohazard",
"Groß-Container Altpapier|Pappe": "mdi:package-variant-closed",
}


API_URL = "https://abfall-lippe.de/service/abfuhrkalender"


class Source:
def __init__(self, gemeinde: str, bezirk: str | None = None):
self._gemeinde: str = gemeinde
self._bezirk: str = bezirk if bezirk is not None else ""
self._ics = ICS()

def fetch(self):
year = datetime.datetime.now().year
urls = [
API_URL,
f"{API_URL}-{year}",
f"{API_URL}-{year-1}",
f"{API_URL}-{year+1}",
]
for url in urls:
r = requests.get(url)
if r.status_code == 200 and r.request.url != "https://abfall-lippe.de":
break
if r.status_code != 200 or r.request.url == "https://abfall-lippe.de":
raise Exception(
"Failed to fetch data from Abfallwirtschaftsverbandes Lippe The URL may have changed."
)
r.raise_for_status()

soup = BeautifulSoup(r.text, "html.parser")
headlines = soup.find_all("div", class_="elementor-widget-heading")

gemeinde_headline: Tag | None = None
for headline in headlines:
if not isinstance(headline, Tag):
continue
h3 = headline.find("h3")
if not isinstance(h3, Tag):
continue

if h3.text.lower().strip() == self._gemeinde.lower().strip():
gemeinde_headline = headline
break

if gemeinde_headline is None:
raise Exception("Gemeinde not found, please check spelling")

links_container = gemeinde_headline.parent

if links_container is None:
raise Exception(f"No links found for {self._gemeinde}")

link: Tag | None = None
for a in links_container.find_all("a"):
if not isinstance(a, Tag):
continue
if (
a.text.lower().replace("ics", "").strip()
== self._bezirk.lower().replace("ics", "").strip()
):
link = a.get("href")
break

if link is None:
raise Exception("Did not found matching ICS link for gemeinde and (bezirk)")

# get ICS file
r = requests.get(link)
r.raise_for_status()
r.encoding = "utf-8"
dates = self._ics.convert(r.text)
entries = []
for d in dates:
icon = ICON_MAP.get(d[1].split(" ")[0])
if icon is None:
icon = ICON_MAP.get(d[1])
entries.append(Collection(d[0], d[1], icon=icon))

return entries
Loading

0 comments on commit cbd543e

Please sign in to comment.