From 9baaa8b7683a74a2a6954be5698319eb8a234fb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Bj=C3=A4reholt?= Date: Sun, 14 Aug 2022 15:31:14 +0200 Subject: [PATCH] fix: lots of fixes and improvements, added TimelineFigure --- .gitattributes | 1 - Makefile | 3 + QSlang | 2 +- chatalysis | 2 +- notebooks/Dashboard.ipynb | 234 ++++++++++++++++++++------ notebooks/Makefile | 100 ++++++++--- notebooks/keep-updated.sh | 5 +- src/quantifiedme/habitbull.py | 6 +- src/quantifiedme/qslang.py | 8 +- src/quantifiedme/timelineplot/plot.py | 92 ++++++++++ src/quantifiedme/timelineplot/util.py | 30 ++++ 11 files changed, 403 insertions(+), 80 deletions(-) create mode 100644 src/quantifiedme/timelineplot/plot.py create mode 100644 src/quantifiedme/timelineplot/util.py diff --git a/.gitattributes b/.gitattributes index bcb63aa..0f98801 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1 @@ *.ipynb filter=notebook - diff --git a/Makefile b/Makefile index 98e4e5e..355ea76 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,6 @@ +# today +date := $(shell date +%Y-%m-%d) + all: notebooks notebooks: notebooks/output/Dashboard.html diff --git a/QSlang b/QSlang index 7bb7fc3..1272d30 160000 --- a/QSlang +++ b/QSlang @@ -1 +1 @@ -Subproject commit 7bb7fc3ff01eb6e5f7267af3a009f1ed30060e8b +Subproject commit 1272d3072d83702abb603529f4249844a97afc5e diff --git a/chatalysis b/chatalysis index 014ca39..1b33b39 160000 --- a/chatalysis +++ b/chatalysis @@ -1 +1 @@ -Subproject commit 014ca396c62c0e39d92bf7eab4e8f6512477955a +Subproject commit 1b33b39f5cf82f8782b562b1f36cd6eb04c209aa diff --git a/notebooks/Dashboard.ipynb b/notebooks/Dashboard.ipynb index 95a1ef6..9c534b5 100644 --- a/notebooks/Dashboard.ipynb +++ b/notebooks/Dashboard.ipynb @@ -73,7 +73,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "import random\n", @@ -116,7 +120,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# Set this to your timezone\n", @@ -124,7 +132,7 @@ "tz_offset = your_timezone.utcoffset(datetime.now())\n", "\n", "# Use personal data, not fake data\n", - "personal = False\n", + "personal = True\n", "\n", "# Set to True to limit amount of data loaded (useful when developing/debugging)\n", "fast = True\n", @@ -148,7 +156,7 @@ " ]\n", " \n", "# Days of history to use\n", - "days_back = 30 if fast else 1*365\n", + "days_back = 30 if fast else 3*365\n", "\n", "#logging.basicConfig(level=logging.DEBUG)\n", "#logging.getLogger(\"aw_research.classify\").setLevel(logging.DEBUG)" @@ -164,7 +172,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "%%javascript\n", @@ -181,7 +193,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# Now let's just set the current time and our query interval and we're ready to load data!\n", @@ -210,7 +226,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "events = load_complete_timeline(since, datasources=datasources, hostnames=hostnames, personal=personal, testing=not personal, cache=True)\n", @@ -222,14 +242,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Verify data\n", - "Just to make sure there are no bugs in underlying code." + "# Inspect data\n", + "\n", + "Now lets take a look at the data. Could help us discover potential bugs in loading code or issues with the data." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# Inspect the distribution of event duration\n", @@ -251,7 +276,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "total_events = len(events)\n", @@ -273,7 +302,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# TODO: Include sleep for improved coverage\n", @@ -291,7 +324,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "time_by_source = defaultdict(float)\n", @@ -346,7 +383,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "df = pd.DataFrame({\"source\": time_by_source.keys(), \"duration\": time_by_source.values()})\n", @@ -380,7 +421,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "from aw_research import split_event_on_hour, categorytime_per_day, categorytime_during_day, start_of_day, end_of_day\n", @@ -416,7 +461,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "plot_categorytime_during_day(events, \"\")\n", @@ -435,38 +484,44 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ - "def plot_category(cat, big=False):\n", + "def plot_category(cat, big=False, barcolor=(0.2, 0.4, 0.8, 0.5)):\n", " #aw_research.classify._plot_category_daily_trend(events, [cat])\n", " try:\n", " ts = categorytime_per_day(events, cat)\n", " except Exception as e:\n", " print(f\"Error for category '{cat}': {e}\")\n", " return\n", - " #fig = plt.figure(figsize=(18, 5 if big else 3))\n", " fig, ax = plt.subplots(figsize=(24, 5 if big else 3))\n", - " ax.bar(ts.index, ts, label=f\"{cat}: daily\", color=(0.2, 0.4, 0.8, 0.5))\n", + " ax.bar(ts.index, ts, label=f\"{cat}: daily\", color=barcolor)\n", " ax.plot(ts.index, ts.rolling(7, min_periods=4).mean(), label=f\"7d SMA\")\n", " ax.plot(ts.index, ts.rolling(30, min_periods=14).mean(), label=f\"30d SMA\")\n", " ax.plot(ts.index, ts.rolling(60, min_periods=30).mean(), label=f\"60d SMA\")\n", - " #ts.plot.bar(label=f\": daily\", legend=True, ax=ax)\n", - " #ts.rolling(7, min_periods=4).mean().plot(label=f\"7d SMA\", legend=True, ax=ax)\n", - " #ts.rolling(30, min_periods=14).mean().plot(label=f\"30d SMA\", legend=True, ax=ax)\n", - " #ts.rolling(60, min_periods=30).mean().plot(label=f\"60d SMA\", legend=True, ax=ax)\n", " plt.legend(loc='upper left')\n", " plt.title(cat)\n", " plt.xlim(pd.Timestamp(since), pd.Timestamp(now))\n", " plt.ylim(0)\n", " plt.grid(linestyle='--')\n", - " plt.tight_layout()" + " plt.tight_layout()\n", + " \n", + "color_prod = (0.1, 0.8, 0.1, 0.8)\n", + "color_unprod = (0.8, 0.1, 0.1, 0.8)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# All logged activity\n", @@ -476,21 +531,31 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# Work-related\n", - "plot_category('Work', big=True)\n", + "plot_category('Work', big=True, barcolor=color_prod)\n", "plot_category('Programming')\n", "plot_category('ActivityWatch')\n", "plot_category('QuantifiedMe')\n", - "plot_category('Thankful')" + "plot_category('Thankful')\n", + "plot_category('Algobit')\n", + "plot_category('uniswap-python')" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# School-related\n", @@ -502,11 +567,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# Entertainment\n", - "plot_category('Media', big=True)\n", + "plot_category('Media', big=True, barcolor=color_unprod)\n", "plot_category('Social Media')\n", "plot_category('Video')\n", "plot_category('Music')\n", @@ -516,7 +585,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# All uncategorized time\n", @@ -535,7 +608,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "events_today = [e for e in events if today < e.timestamp]" @@ -544,7 +621,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "def plot_sunburst(events):\n", @@ -556,7 +637,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "plot_sunburst(events_today)" @@ -565,7 +650,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "plot_sunburst([e for e in events if today - timedelta(days=30) < e.timestamp])" @@ -574,7 +663,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "plot_sunburst(events)" @@ -594,7 +687,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "# NOTE: Setting a rate for a subcategory will add to the rate for the parent category, if any\n", @@ -655,6 +752,9 @@ "cell_type": "code", "execution_count": null, "metadata": { + "jupyter": { + "source_hidden": true + }, "slideshow": { "slide_type": "-" } @@ -687,7 +787,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "events_uncategorized_today = [e for e in events_uncategorized if e.timestamp > today]\n", @@ -706,7 +810,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "events_programming = [e for e in events if 'Work -> Programming' == e.data['$category_hierarchy']]\n", @@ -716,7 +824,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "#print_time_per_keyval(events, \"$category_hierarchy\", limit=100, sortby='key')\n", @@ -726,7 +838,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "print_time_per_keyval(events, '$source')" @@ -744,7 +860,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "from aw_research.tree import Node\n", @@ -822,21 +942,33 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [] } @@ -857,8 +989,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" - } + "version": "3.10.4" + }, + "toc-autonumbering": false, + "toc-showcode": false, + "toc-showmarkdowntxt": false, + "toc-showtags": false }, "nbformat": 4, "nbformat_minor": 4 diff --git a/notebooks/Makefile b/notebooks/Makefile index 38cd3fc..534b624 100644 --- a/notebooks/Makefile +++ b/notebooks/Makefile @@ -1,23 +1,69 @@ -DASHBOARD_SRC=Dashboard.ipynb -DASHBOARD=output/Dashboard.html +SHELL=/bin/bash SRC_FILES := $(wildcard *.ipynb personal/*.ipynb) OUT_FILES := $(patsubst %.ipynb,output/%.html,$(SRC_FILES)) -build: $(OUT_FILES) +# today +TODAY := $(shell date +%Y-%m-%d) +MONTH := $(shell date +%Y-%m) +YEAR := $(shell date +%Y) -output/%.html: %.ipynb -ifeq ($(PERSONAL),true) - make set-personal FILE=$^ -else - make set-testing FILE=$^ -endif -ifeq ($(FAST),true) - make set-fast FILE=$^ -else - make set-slow FILE=$^ -endif - poetry run jupyter nbconvert $^ --output-dir output --to=html --execute --ExecutePreprocessor.kernel_name=python3 --ExecutePreprocessor.timeout=600 +build: $(OUT_FILES) output/index.html + +.PHONY: output/index.html +output/index.html: + @echo -e "QuantifiedMe Index" > $@ + @echo -e "

Index of notebooks

" >> $@ + @echo -e "" >> $@ + @# if built then include modifed time + @for i in $(patsubst output/%,%,$(patsubst output/personal/%,%,$(OUT_FILES))); do \ + echo -e '' >> $@; \ + echo -e "" >> $@; \ + if [ -f output/$$i ]; then \ + AGE=$$(($$(date +%s) - $$(date +%s -r output/$$i))); \ + echo -e "" >> $@; \ + ERRORS=$$(grep 'error' --ignore-case output/$$i | wc -l); \ + ERRORS=$$(($$ERRORS - 20)); \ + if [ $$ERRORS -gt 0 ]; then \ + echo -e "" >> $@; \ + else \ + echo -e "" >> $@; \ + fi; \ + WARNINGS=$$(grep 'warning' --ignore-case output/$$i | wc -l); \ + WARNINGS=$$(($$WARNINGS - 160)); \ + if [ $$WARNINGS -gt 0 ]; then \ + echo -e "" >> $@; \ + else \ + echo -e "" >> $@; \ + fi; \ + else \ + echo -e "" >> $@; \ + fi; \ + echo -e '' >> $@; \ + done + @echo -e "" >> $@ + @echo "Built index.html" + +output/%.html: %.ipynb set-env + poetry run jupyter nbconvert $< \ + --output-dir output --to=html \ + --execute --ExecutePreprocessor.kernel_name=python3 --ExecutePreprocessor.timeout=1200 \ + --allow-errors #--no-input + @make output/index.html # update index + +output/%/index.md: %.ipynb + poetry run jupyter nbconvert $< \ + --output-dir output --to md \ + --execute --ExecutePreprocessor.kernel_name=python3 --ExecutePreprocessor.timeout=600 + +# WIP +output/%/index.html: %.ipynb + make output/$(notdir $<) + +# WIP +output/%/$TODAY/index.html: %.ipynb + # TODO: Set query range via env variable + make output/$(notdir $<)/$TODAY precommit: poetry run jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace *.ipynb @@ -27,21 +73,37 @@ clean: # Notebook settings +.PHONY: set-env +set-env: +ifeq ($(PERSONAL),true) + make set-personal +else + make set-testing +endif +# fall back to fast mode +ifeq ($(FAST),true) + make set-fast +else ifeq ($(FAST),false) + make set-slow +else + make set-fast +endif + .PHONY: set-fast set-fast: - sed -i 's/fast = False/fast = True/' $(FILE) + sed -i 's/fast = False/fast = True/' $(SRC_FILES) .PHONY: set-slow set-slow: - sed -i 's/fast = True/fast = False/' $(FILE) + sed -i 's/fast = True/fast = False/' $(SRC_FILES) .PHONY: set-testing set-testing: - sed -i 's/personal = True/personal = False/' $(FILE) # Set personal = False, in case it was accidentally comitted with `personal = True` + sed -i 's/personal = True/personal = False/' $(SRC_FILES) # Set personal = False, in case it was accidentally comitted with `personal = True` .PHONY: set-personal set-personal: - sed -i 's/personal = False/personal = True/' $(FILE) + sed -i 's/personal = False/personal = True/' $(SRC_FILES) debug: echo $(SRC_FILES) diff --git a/notebooks/keep-updated.sh b/notebooks/keep-updated.sh index cade50b..ceb4fba 100755 --- a/notebooks/keep-updated.sh +++ b/notebooks/keep-updated.sh @@ -1,11 +1,12 @@ #!/bin/bash -DELAY=60m -DELAY_INT=$(( 60 * 60 )) +DELAY=15m +DELAY_INT=$(( $(echo $DELAY | grep -oP '[0-9]+') * 60 )) while true; do echo 'Running...'; time make -B output/Dashboard.html PERSONAL=true FAST=true; + notify-send -a 'Job' 'Build successful' echo "Done at $(date --iso-8601=seconds)!"; read -t $DELAY_INT -p "Waiting for $DELAY, or enter..."; done diff --git a/src/quantifiedme/habitbull.py b/src/quantifiedme/habitbull.py index b02ae0b..caa6995 100755 --- a/src/quantifiedme/habitbull.py +++ b/src/quantifiedme/habitbull.py @@ -1,7 +1,7 @@ import click import pandas as pd import matplotlib.pyplot as plt -import calmap +import calplot from .config import load_config @@ -19,9 +19,9 @@ def plot_calendar(df, habitname, show=True, year=None): df = df[df.index.get_level_values("HabitName").isin([habitname])].reset_index() df = df.set_index(pd.DatetimeIndex(df["CalendarDate"])) if year: - calmap.yearplot(df["Value"], year=year) + calplot.yearplot(df["Value"], year=year) else: - calmap.calendarplot(df["Value"]) + calplot.calendarplot(df["Value"]) if show: plt.show() diff --git a/src/quantifiedme/qslang.py b/src/quantifiedme/qslang.py index 949761e..c7c2f99 100644 --- a/src/quantifiedme/qslang.py +++ b/src/quantifiedme/qslang.py @@ -7,8 +7,8 @@ from joblib import Memory import pint -from aw_core import Event -from qslang.main import qslang as qslang_entry, load_events +from qslang import Event +from qslang.main import main as qslang_main, load_events from .config import load_config logger = logging.getLogger(__name__) @@ -154,9 +154,9 @@ def _missing_dates(): # Entrypoint -qslang = qslang_entry +main = qslang_main if __name__ == "__main__": _missing_dates() - # qslang() + # main() diff --git a/src/quantifiedme/timelineplot/plot.py b/src/quantifiedme/timelineplot/plot.py new file mode 100644 index 0000000..cc2143c --- /dev/null +++ b/src/quantifiedme/timelineplot/plot.py @@ -0,0 +1,92 @@ +import sys +from typing import List, Callable, Iterable, Union, TypeVar, Tuple +from dataclasses import dataclass +from datetime import datetime, timezone + +import matplotlib.pyplot as plt + +from .util import take_until_next + + +T = TypeVar("T") +Color = Union[str, Tuple[float, float, float]] +Index = TypeVar("Index", int, datetime) +Limits = Tuple[Index, Index] +Event = Tuple[Limits, Color, str] + + +@dataclass +class Bar: + title: str + events: List[Event] + show_label: bool + + +class TimelineFigure: + def __init__(self, title=None, **kwargs): + self.fig = plt.figure(**kwargs) + self.ax = plt.gca() + if title: + self.ax.set_title(title) + self.bars: List[Bar] = [] + + def plot(self): + # We're assuming all bars share the same index type + index_example = self.bars[0].events[0][0][0] + if isinstance(index_example, int): + limits: Limits = (sys.maxsize, 0) + elif isinstance(index_example, datetime): + limits = ( + datetime(2100, 1, 1, tzinfo=timezone.utc), + datetime(1900, 1, 1, tzinfo=timezone.utc), + ) + else: + raise ValueError(f"Unknown index type: {type(index_example)}") + + # Check that type assumption is true + assert all( + [ + isinstance(event[0][0], type(index_example)) + and isinstance(event[0][1], type(index_example)) + for bar in self.bars + for event in bar.events + ] + ) + + for bar_idx, bar in enumerate(self.bars): + for event in bar.events: + (start, end), color, label = event + length = end - start + plt.barh(-bar_idx, length, left=start, color=color) + limits = (min(limits[0], start), max(limits[1], end)) + plt.text( + start + length / 2, + -bar_idx, + label, + horizontalalignment="center", + verticalalignment="center", + ) + + tick_idxs = list(range(0, -len(self.bars), -1)) + self.ax.set_yticks(tick_idxs) + self.ax.set_yticklabels([bar.title for bar in self.bars]) + self.ax.set_xlim(*limits) + + plt.show() + + def add_bar(self, events: List[Event], title: str, show_label: bool = False): + self.bars.append(Bar(title, events, show_label)) + + def add_chunked( + self, + ls: Iterable[T], + cmap: Callable[[T], Color], + title: str, + show_label: bool = False, + ): + """Optimized version of add_bar that takes care of identical subsequent values""" + bars = [ + ((i_start, i_end + 1), cmap(v), str(v) if show_label else "") + for (i_start, i_end), v in take_until_next(ls) + ] + self.add_bar(bars, title, show_label) diff --git a/src/quantifiedme/timelineplot/util.py b/src/quantifiedme/timelineplot/util.py new file mode 100644 index 0000000..420d1d7 --- /dev/null +++ b/src/quantifiedme/timelineplot/util.py @@ -0,0 +1,30 @@ +from typing import Iterable, Tuple, Generator, TypeVar + +T = TypeVar("T") + + +def take_until_next( + ls: Iterable[T], +) -> Generator[Tuple[Tuple[int, int], T], None, None]: + """ + Given an iterable with duplicate entries, chunk them together and return + each chunk with its start and stop index. + """ + last_v = None + last_i = 0 + i = None + v = None + for i, v in enumerate(ls): + if v == last_v: + continue + elif last_v is not None: + yield (last_i, i - 1), last_v + last_v = v + last_i = i + if v and i is not None and i != last_i: + yield (last_i, i), v + + +def test_take_until_next(): + ls = [1, 1, 1, 2, 3, 3] + assert [((0, 2), 1), ((3, 3), 2), ((4, 5), 3)] == list(take_until_next(ls))
NotebookAgeErrorsWarnings
$(basename $$i)$$AGE s ago$$ERRORS$$ERRORS$$WARNINGS$$WARNINGSNot built