diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..bc0225179 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +ignore = E203,W503 +max-line-length = 159 +exclude = .git,__pycache__ diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 000000000..f3baeb563 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,6 @@ +[settings] +known_first_party = code_coverage_backend,code_coverage_bot,code_coverage_tools,conftest +known_third_party = connexion,datadog,dateutil,fakeredis,flask,flask_cors,flask_talisman,google,hglib,jsone,jsonschema,libmozdata,logbook,pytest,pytz,redis,requests,responses,setuptools,structlog,taskcluster,werkzeug,zstandard +force_single_line = True +default_section=FIRSTPARTY +line_length=159 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..5fe7e0295 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,46 @@ +repos: + - repo: https://github.com/asottile/seed-isort-config + rev: v1.9.2 + hooks: + - id: seed-isort-config + - repo: https://github.com/pre-commit/mirrors-isort + rev: v4.3.21 + hooks: + - id: isort + - repo: https://github.com/ambv/black + rev: stable + hooks: + - id: black + - repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.8 + hooks: + - id: flake8 + additional_dependencies: + - 'flake8-coding==1.3.1' + - 'flake8-copyright==0.2.2' + - 'flake8-debugger==3.1.0' + - 'flake8-mypy==17.8.0' + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.2.3 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: mixed-line-ending + - id: name-tests-test + args: ['--django'] + - id: check-json + - repo: https://github.com/codespell-project/codespell + rev: v1.15.0 + hooks: + - id: codespell + args: ['--exclude-file=bot/tests/fixtures/activedata_chunk_to_tests.json'] + - repo: https://github.com/marco-c/taskcluster_yml_validator + rev: v0.0.2 + hooks: + - id: taskcluster_yml + - repo: meta + hooks: + - id: check-useless-excludes + +default_language_version: + python: python3.7 diff --git a/.taskcluster.yml b/.taskcluster.yml index 8439108be..efc7161a3 100644 --- a/.taskcluster.yml +++ b/.taskcluster.yml @@ -43,7 +43,7 @@ tasks: taskboot_image: "mozilla/taskboot:0.1.9" in: - - taskId: {$eval: as_slugid("bot_check_lint")} + - taskId: {$eval: as_slugid("check_lint")} provisionerId: aws-provisioner-v1 workerType: github-worker created: {$fromNow: ''} @@ -57,31 +57,11 @@ tasks: - "git clone --quiet ${repository} /src && cd /src && git checkout ${head_rev} -b checks && cd /src/tools && python setup.py install && cd /src/bot && pip install --quiet . && pip install --quiet -r requirements-dev.txt && - flake8" - metadata: - name: "Code Coverage Bot checks: linting" - description: Check python code style with flake8 - owner: bastien@mozilla.com - source: https://github.com/mozilla/code-coverage - - - taskId: {$eval: as_slugid("backend_check_lint")} - provisionerId: aws-provisioner-v1 - workerType: github-worker - created: {$fromNow: ''} - deadline: {$fromNow: '1 hour'} - payload: - maxRunTime: 3600 - image: python:3 - command: - - sh - - -lxce - - "git clone --quiet ${repository} /src && cd /src && git checkout ${head_rev} -b checks && - cd /src/tools && python setup.py install && cd /src/backend && pip install --quiet . && pip install --quiet -r requirements-dev.txt && - flake8" + cd /src && pre-commit run -a" metadata: - name: "Code Coverage Backend checks: linting" - description: Check python code style with flake8 + name: "Code Coverage checks: linting" + description: Check code style with pre-commit hooks owner: bastien@mozilla.com source: https://github.com/mozilla/code-coverage @@ -134,7 +114,7 @@ tasks: provisionerId: aws-provisioner-v1 workerType: releng-svc dependencies: - - {$eval: as_slugid("backend_check_lint")} + - {$eval: as_slugid("check_lint")} - {$eval: as_slugid("backend_check_tests")} payload: capabilities: @@ -175,7 +155,7 @@ tasks: provisionerId: aws-provisioner-v1 workerType: releng-svc dependencies: - - {$eval: as_slugid("bot_check_lint")} + - {$eval: as_slugid("check_lint")} - {$eval: as_slugid("bot_check_tests")} payload: capabilities: diff --git a/backend/README.md b/backend/README.md index 700b0791e..1a09f9186 100644 --- a/backend/README.md +++ b/backend/README.md @@ -12,20 +12,19 @@ We currently have several endpoints implemented: * `/v2/path` provides the code coverage information for a directory or file in a repository, at a given revision. -## Setup instructions for developpers +## Setup instructions for developers ```shell mkvirtualenv -p /usr/bin/python3 ccov-backend cd backend/ -pip install -r requirements.txt -r requirements-dev.txt +pip install -r requirements.txt -r requirements-dev.txt pip install -e . ``` You should now be able to run tests and linting: ```shell -pytest -flake8 +pre-commit run -a ``` ## Run a redis instance through docker diff --git a/backend/code_coverage_backend/api.py b/backend/code_coverage_backend/api.py index 2f88c9ed8..e407e8664 100644 --- a/backend/code_coverage_backend/api.py +++ b/backend/code_coverage_backend/api.py @@ -9,49 +9,46 @@ from code_coverage_backend.config import COVERAGE_EXTENSIONS from code_coverage_backend.gcp import load_cache -DEFAULT_REPOSITORY = 'mozilla-central' +DEFAULT_REPOSITORY = "mozilla-central" logger = structlog.get_logger(__name__) def coverage_supported_extensions(): - ''' + """ List all the file extensions we currently support - ''' + """ return COVERAGE_EXTENSIONS def coverage_latest(repository=DEFAULT_REPOSITORY): - ''' + """ List the last 10 reports available on the server - ''' + """ gcp = load_cache() if gcp is None: - logger.error('No GCP cache available') + logger.error("No GCP cache available") abort(500) try: return [ - { - 'revision': revision, - 'push': push_id, - } + {"revision": revision, "push": push_id} for revision, push_id in gcp.list_reports(repository, 10) ] except Exception as e: - logger.warn('Failed to retrieve latest reports: {}'.format(e)) + logger.warn("Failed to retrieve latest reports: {}".format(e)) abort(404) -def coverage_for_path(path='', changeset=None, repository=DEFAULT_REPOSITORY): - ''' +def coverage_for_path(path="", changeset=None, repository=DEFAULT_REPOSITORY): + """ Aggregate coverage for a path, regardless of its type: * file, gives its coverage percent * directory, gives coverage percent for its direct sub elements files and folders (recursive average) - ''' + """ gcp = load_cache() if gcp is None: - logger.error('No GCP cache available') + logger.error("No GCP cache available") abort(500) try: @@ -62,28 +59,41 @@ def coverage_for_path(path='', changeset=None, repository=DEFAULT_REPOSITORY): # Fallback to latest report changeset, _ = gcp.find_report(repository) except Exception as e: - logger.warn('Failed to retrieve report: {}'.format(e)) + logger.warn("Failed to retrieve report: {}".format(e)) abort(404) # Load tests data from GCP try: return gcp.get_coverage(repository, changeset, path) except Exception as e: - logger.warn('Failed to load coverage', repo=repository, changeset=changeset, path=path, error=str(e)) + logger.warn( + "Failed to load coverage", + repo=repository, + changeset=changeset, + path=path, + error=str(e), + ) abort(400) -def coverage_history(repository=DEFAULT_REPOSITORY, path='', start=None, end=None): - ''' +def coverage_history(repository=DEFAULT_REPOSITORY, path="", start=None, end=None): + """ List overall coverage from ingested reports over a period of time - ''' + """ gcp = load_cache() if gcp is None: - logger.error('No GCP cache available') + logger.error("No GCP cache available") abort(500) try: return gcp.get_history(repository, path=path, start=start, end=end) except Exception as e: - logger.warn('Failed to load history', repo=repository, path=path, start=start, end=end, error=str(e)) + logger.warn( + "Failed to load history", + repo=repository, + path=path, + start=start, + end=end, + error=str(e), + ) abort(400) diff --git a/backend/code_coverage_backend/backend/__init__.py b/backend/code_coverage_backend/backend/__init__.py index d94afa364..63f52f132 100644 --- a/backend/code_coverage_backend/backend/__init__.py +++ b/backend/code_coverage_backend/backend/__init__.py @@ -19,27 +19,25 @@ def create_app(): # Load secrets from Taskcluster taskcluster.auth() taskcluster.load_secrets( - os.environ.get('TASKCLUSTER_SECRET'), + os.environ.get("TASKCLUSTER_SECRET"), code_coverage_backend.config.PROJECT_NAME, - required=['GOOGLE_CLOUD_STORAGE', 'APP_CHANNEL'], - existing={ - 'REDIS_URL': os.environ.get('REDIS_URL', 'redis://localhost:6379') - } + required=["GOOGLE_CLOUD_STORAGE", "APP_CHANNEL"], + existing={"REDIS_URL": os.environ.get("REDIS_URL", "redis://localhost:6379")}, ) # Configure logger init_logger( code_coverage_backend.config.PROJECT_NAME, - PAPERTRAIL_HOST=taskcluster.secrets.get('PAPERTRAIL_HOST'), - PAPERTRAIL_PORT=taskcluster.secrets.get('PAPERTRAIL_PORT'), - SENTRY_DSN=taskcluster.secrets.get('SENTRY_DSN'), + PAPERTRAIL_HOST=taskcluster.secrets.get("PAPERTRAIL_HOST"), + PAPERTRAIL_PORT=taskcluster.secrets.get("PAPERTRAIL_PORT"), + SENTRY_DSN=taskcluster.secrets.get("SENTRY_DSN"), ) logger = structlog.get_logger(__name__) app = build_flask_app( project_name=code_coverage_backend.config.PROJECT_NAME, app_name=code_coverage_backend.config.APP_NAME, - openapi=os.path.join(os.path.dirname(__file__), '../api.yml') + openapi=os.path.join(os.path.dirname(__file__), "../api.yml"), ) # Setup datadog stats @@ -49,6 +47,6 @@ def create_app(): try: code_coverage_backend.gcp.load_cache() except Exception as e: - logger.warn('GCP cache warmup failed: {}'.format(e)) + logger.warn("GCP cache warmup failed: {}".format(e)) return app diff --git a/backend/code_coverage_backend/backend/build.py b/backend/code_coverage_backend/backend/build.py index bf5d2ef00..898aeb057 100644 --- a/backend/code_coverage_backend/backend/build.py +++ b/backend/code_coverage_backend/backend/build.py @@ -24,7 +24,7 @@ TALISMAN_CONFIG = dict( # on heroku force https redirect - force_https='DYNO' in os.environ, + force_https="DYNO" in os.environ, force_https_permanent=False, force_file_save=False, frame_options=flask_talisman.talisman.SAMEORIGIN, @@ -34,12 +34,12 @@ strict_transport_security_max_age=flask_talisman.talisman.ONE_YEAR_IN_SECS, strict_transport_security_include_subdomains=True, content_security_policy={ - 'default-src': '\'none\'', + "default-src": "'none'", # unsafe-inline is needed for the Swagger UI - 'script-src': '\'self\' \'unsafe-inline\'', - 'style-src': '\'self\' \'unsafe-inline\'', - 'img-src': '\'self\'', - 'connect-src': '\'self\'', + "script-src": "'self' 'unsafe-inline'", + "style-src": "'self' 'unsafe-inline'", + "img-src": "'self'", + "connect-src": "'self'", }, content_security_policy_report_uri=None, content_security_policy_report_only=False, @@ -50,23 +50,23 @@ def handle_default_exceptions(e): error = { - 'type': 'about:blank', - 'title': str(e), - 'status': getattr(e, 'code', 500), - 'detail': getattr(e, 'description', str(e)), - 'instance': 'about:blank', + "type": "about:blank", + "title": str(e), + "status": getattr(e, "code", 500), + "detail": getattr(e, "description", str(e)), + "instance": "about:blank", } - return flask.jsonify(error), error['status'] + return flask.jsonify(error), error["status"] def build_flask_app(project_name, app_name, openapi): - ''' + """ Create a new Flask backend application app_name is the Python application name, used as Flask import_name project_name is a "nice" name, used to identify the application - ''' - assert os.path.exists(openapi), 'Missing openapi file {}'.format(openapi) - logger.debug('Initializing', app=app_name, openapi=openapi) + """ + assert os.path.exists(openapi), "Missing openapi file {}".format(openapi) + logger.debug("Initializing", app=app_name, openapi=openapi) # Start OpenAPI app app = connexion.App(import_name=app_name) @@ -79,19 +79,21 @@ def build_flask_app(project_name, app_name, openapi): # Enable wildcard CORS cors = flask_cors.CORS() - cors.init_app(app.app, origins=['*']) + cors.init_app(app.app, origins=["*"]) # Add exception Json renderer for code, exception in werkzeug.exceptions.default_exceptions.items(): app.app.register_error_handler(exception, handle_default_exceptions) # Redirect root to API - app.add_url_rule('/', 'root', lambda: flask.redirect(app.options.openapi_console_ui_path)) + app.add_url_rule( + "/", "root", lambda: flask.redirect(app.options.openapi_console_ui_path) + ) # Dockerflow checks - app.add_url_rule('/__heartbeat__', view_func=heartbeat_response) - app.add_url_rule('/__lbheartbeat__', view_func=lbheartbeat_response) - app.add_url_rule('/__version__', view_func=get_version) + app.add_url_rule("/__heartbeat__", view_func=heartbeat_response) + app.add_url_rule("/__lbheartbeat__", view_func=lbheartbeat_response) + app.add_url_rule("/__version__", view_func=get_version) - logger.debug('Initialized', app=app.name) + logger.debug("Initialized", app=app.name) return app diff --git a/backend/code_coverage_backend/backend/dockerflow.py b/backend/code_coverage_backend/backend/dockerflow.py index 5c2ecee05..68402224d 100644 --- a/backend/code_coverage_backend/backend/dockerflow.py +++ b/backend/code_coverage_backend/backend/dockerflow.py @@ -12,36 +12,38 @@ def get_version(): version_json = { - 'source': 'https://github.com/mozilla-releng/services', - 'version': 'unknown', - 'commit': 'unknown', - 'build': 'unknown' + "source": "https://github.com/mozilla-releng/services", + "version": "unknown", + "commit": "unknown", + "build": "unknown", } return flask.jsonify(version_json) def lbheartbeat_response(): - '''Per the Dockerflow spec: + """Per the Dockerflow spec: Respond to /__lbheartbeat__ with an HTTP 200. This is for load balancer - checks and should not check any dependent services.''' - return flask.Response('OK!', headers={'Cache-Control': 'no-cache'}) + checks and should not check any dependent services.""" + return flask.Response("OK!", headers={"Cache-Control": "no-cache"}) def heartbeat_response(): - '''Per the Dockerflow spec: + """Per the Dockerflow spec: Respond to /__heartbeat__ with a HTTP 200 or 5xx on error. This should - depend on services like the database to also ensure they are healthy.''' + depend on services like the database to also ensure they are healthy.""" response = dict() # TODO: check redis is alive check = True if check is True: - return flask.Response('OK', headers={'Cache-Control': 'public, max-age=60'}) + return flask.Response("OK", headers={"Cache-Control": "public, max-age=60"}) else: - return flask.Response(status=502, - response=json.dumps(response), - headers={ - 'Content-Type': 'application/json', - 'Cache-Control': 'public, max-age=60', - }) + return flask.Response( + status=502, + response=json.dumps(response), + headers={ + "Content-Type": "application/json", + "Cache-Control": "public, max-age=60", + }, + ) diff --git a/backend/code_coverage_backend/config.py b/backend/code_coverage_backend/config.py index c1f2fb3aa..9a084a9bc 100644 --- a/backend/code_coverage_backend/config.py +++ b/backend/code_coverage_backend/config.py @@ -5,13 +5,24 @@ from __future__ import absolute_import -PROJECT_NAME = 'code-coverage-backend' -APP_NAME = 'code_coverage_backend' +PROJECT_NAME = "code-coverage-backend" +APP_NAME = "code_coverage_backend" COVERAGE_EXTENSIONS = [ # C - 'c', 'h', + "c", + "h", # C++ - 'cpp', 'cc', 'cxx', 'hh', 'hpp', 'hxx', + "cpp", + "cc", + "cxx", + "hh", + "hpp", + "hxx", # JavaScript - 'js', 'jsm', 'xul', 'xml', 'html', 'xhtml', + "js", + "jsm", + "xul", + "xml", + "html", + "xhtml", ] diff --git a/backend/code_coverage_backend/covdir.py b/backend/code_coverage_backend/covdir.py index c04732331..0308240f3 100644 --- a/backend/code_coverage_backend/covdir.py +++ b/backend/code_coverage_backend/covdir.py @@ -8,17 +8,17 @@ def open_report(report_path): - ''' + """ Helper to load and validate a report - ''' + """ try: - assert os.path.exists(report_path), 'Missing file' + assert os.path.exists(report_path), "Missing file" # TODO: move to ijson to reduce loading time report = json.load(open(report_path)) - assert isinstance(report, dict), 'Invalid data structure' + assert isinstance(report, dict), "Invalid data structure" except Exception as e: - logger.warn('Failed to load report', path=report_path, error=str(e)) + logger.warn("Failed to load report", path=report_path, error=str(e)) if os.path.exists(report_path): # Remove invalid files os.unlink(report_path) @@ -28,39 +28,39 @@ def open_report(report_path): def get_path_coverage(report, object_path, max_depth=1): - ''' - Recursively format the paths encountered, adding informations relative + """ + Recursively format the paths encountered, adding information relative to file type (file|directory) - ''' + """ assert isinstance(report, dict) # Find the section from the path - parts = object_path.split('/') + parts = object_path.split("/") for part in filter(None, parts): - if part not in report['children']: - raise Exception('Path {} not found in report'.format(object_path)) - report = report['children'][part] + if part not in report["children"]: + raise Exception("Path {} not found in report".format(object_path)) + report = report["children"][part] def _clean_object(obj, base_path, depth=0): assert isinstance(obj, dict) - if 'children' in obj: + if "children" in obj: # Directory - obj['type'] = 'directory' - obj['path'] = base_path + obj["type"] = "directory" + obj["path"] = base_path if depth >= max_depth: - obj['children'] = len(obj['children']) + obj["children"] = len(obj["children"]) else: - obj['children'] = [ - _clean_object(child, os.path.join(base_path, child_name), depth+1) - for child_name, child in obj['children'].items() + obj["children"] = [ + _clean_object(child, os.path.join(base_path, child_name), depth + 1) + for child_name, child in obj["children"].items() ] else: # File - obj['type'] = 'file' - obj['path'] = base_path - obj['children'] = None + obj["type"] = "file" + obj["path"] = base_path + obj["children"] = None if depth >= max_depth: - del obj['coverage'] + del obj["coverage"] return obj @@ -68,20 +68,18 @@ def _clean_object(obj, base_path, depth=0): def get_overall_coverage(report, max_depth=2): - ''' + """ Load a covdir report and recursively extract the overall coverage of folders until the max depth is reached - ''' + """ assert isinstance(report, dict) - def _extract(obj, base_path='', depth=0): - if 'children' not in obj or depth > max_depth: + def _extract(obj, base_path="", depth=0): + if "children" not in obj or depth > max_depth: return {} - out = { - base_path: obj['coveragePercent'], - } - for child_name, child in obj['children'].items(): - out.update(_extract(child, os.path.join(base_path, child_name), depth+1)) + out = {base_path: obj["coveragePercent"]} + for child_name, child in obj["children"].items(): + out.update(_extract(child, os.path.join(base_path, child_name), depth + 1)) return out return _extract(report) diff --git a/backend/code_coverage_backend/datadog.py b/backend/code_coverage_backend/datadog.py index 6f6d7d4ac..efdada95b 100644 --- a/backend/code_coverage_backend/datadog.py +++ b/backend/code_coverage_backend/datadog.py @@ -15,31 +15,28 @@ def get_stats(): - ''' + """ Configure a shared ThreadStats instance for datadog - ''' + """ global __stats if __stats is not None: return __stats - app_channel = taskcluster.secrets['APP_CHANNEL'] + app_channel = taskcluster.secrets["APP_CHANNEL"] - if taskcluster.secrets['DATADOG_API_KEY']: + if taskcluster.secrets["DATADOG_API_KEY"]: datadog.initialize( - api_key=taskcluster.secrets['DATADOG_API_KEY'], - host_name=f'coverage.{app_channel}.moz.tools', + api_key=taskcluster.secrets["DATADOG_API_KEY"], + host_name=f"coverage.{app_channel}.moz.tools", ) else: - logger.info('No datadog credentials') + logger.info("No datadog credentials") # Must be instantiated after initialize # https://datadogpy.readthedocs.io/en/latest/#datadog-threadstats-module __stats = datadog.ThreadStats( - constant_tags=[ - config.PROJECT_NAME, - f'channel:{app_channel}', - ], + constant_tags=[config.PROJECT_NAME, f"channel:{app_channel}"] ) __stats.start(flush_in_thread=True) return __stats diff --git a/backend/code_coverage_backend/gcp.py b/backend/code_coverage_backend/gcp.py index fb8f4bad3..a1660374e 100644 --- a/backend/code_coverage_backend/gcp.py +++ b/backend/code_coverage_backend/gcp.py @@ -19,27 +19,27 @@ logger = structlog.get_logger(__name__) __cache = None -KEY_REPORTS = 'reports:{repository}' -KEY_CHANGESET = 'changeset:{repository}:{changeset}' -KEY_HISTORY = 'history:{repository}' -KEY_OVERALL_COVERAGE = 'overall:{repository}:{changeset}' +KEY_REPORTS = "reports:{repository}" +KEY_CHANGESET = "changeset:{repository}:{changeset}" +KEY_HISTORY = "history:{repository}" +KEY_OVERALL_COVERAGE = "overall:{repository}:{changeset}" -HGMO_REVISION_URL = 'https://hg.mozilla.org/{repository}/json-rev/{revision}' -HGMO_PUSHES_URL = 'https://hg.mozilla.org/{repository}/json-pushes' +HGMO_REVISION_URL = "https://hg.mozilla.org/{repository}/json-rev/{revision}" +HGMO_PUSHES_URL = "https://hg.mozilla.org/{repository}/json-pushes" -REPOSITORIES = ('mozilla-central', ) +REPOSITORIES = ("mozilla-central",) MIN_PUSH = 0 MAX_PUSH = math.inf def load_cache(): - ''' + """ Manage singleton instance of GCPCache when configuration is available - ''' + """ global __cache - if taskcluster.secrets['GOOGLE_CLOUD_STORAGE'] is None: + if taskcluster.secrets["GOOGLE_CLOUD_STORAGE"] is None: return if __cache is None: @@ -49,38 +49,39 @@ def load_cache(): def hgmo_revision_details(repository, changeset): - ''' + """ HGMO helper to retrieve details for a changeset - ''' - url = HGMO_REVISION_URL.format( - repository=repository, - revision=changeset, - ) + """ + url = HGMO_REVISION_URL.format(repository=repository, revision=changeset) resp = requests.get(url) resp.raise_for_status() data = resp.json() - assert 'pushid' in data, 'Missing pushid' - return data['pushid'], data['date'][0] + assert "pushid" in data, "Missing pushid" + return data["pushid"], data["date"][0] class GCPCache(object): - ''' + """ Cache on Redis GCP results - ''' + """ + def __init__(self, reports_dir=None): # Open redis connection - self.redis = redis.from_url(taskcluster.secrets['REDIS_URL']) - assert self.redis.ping(), 'Redis server does not ping back' + self.redis = redis.from_url(taskcluster.secrets["REDIS_URL"]) + assert self.redis.ping(), "Redis server does not ping back" # Open gcp connection to bucket - assert taskcluster.secrets['GOOGLE_CLOUD_STORAGE'] is not None, \ - 'Missing GOOGLE_CLOUD_STORAGE secret' - self.bucket = get_bucket(taskcluster.secrets['GOOGLE_CLOUD_STORAGE']) + assert ( + taskcluster.secrets["GOOGLE_CLOUD_STORAGE"] is not None + ), "Missing GOOGLE_CLOUD_STORAGE secret" + self.bucket = get_bucket(taskcluster.secrets["GOOGLE_CLOUD_STORAGE"]) # Local storage for reports - self.reports_dir = reports_dir or os.path.join(tempfile.gettempdir(), 'ccov-reports') + self.reports_dir = reports_dir or os.path.join( + tempfile.gettempdir(), "ccov-reports" + ) os.makedirs(self.reports_dir, exist_ok=True) - logger.info('Reports will be stored in {}'.format(self.reports_dir)) + logger.info("Reports will be stored in {}".format(self.reports_dir)) # Load most recent reports in cache for repo in REPOSITORIES: @@ -88,36 +89,36 @@ def __init__(self, reports_dir=None): self.download_report(repo, rev) def ingest_pushes(self, repository, min_push_id=None, nb_pages=3): - ''' + """ Ingest HGMO changesets and pushes into our Redis Cache The pagination goes from oldest to newest, starting from the optional min_push_id - ''' + """ chunk_size = 8 - params = { - 'version': 2, - } + params = {"version": 2} if min_push_id is not None: assert isinstance(min_push_id, int) - params['startID'] = min_push_id - params['endID'] = min_push_id + chunk_size + params["startID"] = min_push_id + params["endID"] = min_push_id + chunk_size for page in range(nb_pages): - r = requests.get(HGMO_PUSHES_URL.format(repository=repository), params=params) + r = requests.get( + HGMO_PUSHES_URL.format(repository=repository), params=params + ) data = r.json() # Sort pushes to go from oldest to newest - pushes = sorted([ - (int(push_id), push) - for push_id, push in data['pushes'].items() - ], key=lambda p: p[0]) + pushes = sorted( + [(int(push_id), push) for push_id, push in data["pushes"].items()], + key=lambda p: p[0], + ) if not pushes: return for push_id, push in pushes: - changesets = push['changesets'] - date = push['date'] + changesets = push["changesets"] + date = push["date"] self.store_push(repository, push_id, changesets, date) reports = [ @@ -126,16 +127,16 @@ def ingest_pushes(self, repository, min_push_id=None, nb_pages=3): if self.ingest_report(repository, push_id, changeset, date) ] if reports: - logger.info('Found reports in that push', push_id=push_id) + logger.info("Found reports in that push", push_id=push_id) newest = pushes[-1][0] - params['startID'] = newest - params['endID'] = newest + chunk_size + params["startID"] = newest + params["endID"] = newest + chunk_size def ingest_report(self, repository, push_id, changeset, date): - ''' + """ When a report exist for a changeset, download it and update redis data - ''' + """ assert isinstance(push_id, int) assert isinstance(date, int) @@ -145,14 +146,11 @@ def ingest_report(self, repository, push_id, changeset, date): return False # Read overall coverage for history - key = KEY_OVERALL_COVERAGE.format( - repository=repository, - changeset=changeset, - ) + key = KEY_OVERALL_COVERAGE.format(repository=repository, changeset=changeset) report = covdir.open_report(report_path) - assert report is not None, 'No report to ingest' + assert report is not None, "No report to ingest" overall_coverage = covdir.get_overall_coverage(report) - assert len(overall_coverage) > 0, 'No overall coverage' + assert len(overall_coverage) > 0, "No overall coverage" self.redis.hmset(key, overall_coverage) # Add the changeset to the sorted sets of known reports @@ -163,32 +161,32 @@ def ingest_report(self, repository, push_id, changeset, date): # Add the changeset to the sorted sets of known reports by date self.redis.zadd(KEY_HISTORY.format(repository=repository), {changeset: date}) - logger.info('Ingested report', changeset=changeset) + logger.info("Ingested report", changeset=changeset) return True def download_report(self, repository, changeset): - ''' + """ Download and extract a json+zstd covdir report - ''' - # Chek the report is available on remote storage - path = '{}/{}.json.zstd'.format(repository, changeset) + """ + # Check the report is available on remote storage + path = "{}/{}.json.zstd".format(repository, changeset) blob = self.bucket.blob(path) if not blob.exists(): - logger.debug('No report found on GCP', path=path) + logger.debug("No report found on GCP", path=path) return False archive_path = os.path.join(self.reports_dir, blob.name) - json_path = os.path.join(self.reports_dir, blob.name.rstrip('.zstd')) + json_path = os.path.join(self.reports_dir, blob.name.rstrip(".zstd")) if os.path.exists(json_path): - logger.info('Report already available', path=json_path) + logger.info("Report already available", path=json_path) return json_path os.makedirs(os.path.dirname(archive_path), exist_ok=True) blob.download_to_filename(archive_path) - logger.info('Downloaded report archive', path=archive_path) + logger.info("Downloaded report archive", path=archive_path) - with open(json_path, 'wb') as output: - with open(archive_path, 'rb') as archive: + with open(json_path, "wb") as output: + with open(archive_path, "rb") as archive: dctx = zstd.ZstdDecompressor() reader = dctx.stream_reader(archive) while True: @@ -198,76 +196,63 @@ def download_report(self, repository, changeset): output.write(chunk) os.unlink(archive_path) - logger.info('Decompressed report', path=json_path) + logger.info("Decompressed report", path=json_path) return json_path def store_push(self, repository, push_id, changesets, date): - ''' + """ Store a push on redis cache, with its changesets - ''' + """ assert isinstance(push_id, int) assert isinstance(changesets, list) # Store changesets initial data for changeset in changesets: - key = KEY_CHANGESET.format( - repository=repository, - changeset=changeset, - ) - self.redis.hmset(key, { - 'push': push_id, - 'date': date, - }) + key = KEY_CHANGESET.format(repository=repository, changeset=changeset) + self.redis.hmset(key, {"push": push_id, "date": date}) - logger.info('Stored new push data', push_id=push_id) + logger.info("Stored new push data", push_id=push_id) def find_report(self, repository, push_range=(MAX_PUSH, MIN_PUSH)): - ''' + """ Find the first report available before that push - ''' - results = self.list_reports( - repository, - nb=1, - push_range=push_range, - ) + """ + results = self.list_reports(repository, nb=1, push_range=push_range) if not results: - raise Exception('No report found') + raise Exception("No report found") return results[0] def find_closest_report(self, repository, changeset): - ''' + """ Find the closest report from specified changeset: 1. Lookup the changeset push in cache 2. Lookup the changeset push in HGMO 3. Find the first report after that push - ''' + """ # Lookup push from cache (fast) - key = KEY_CHANGESET.format( - repository=repository, - changeset=changeset, - ) - push_id = self.redis.hget(key, 'push') + key = KEY_CHANGESET.format(repository=repository, changeset=changeset) + push_id = self.redis.hget(key, "push") if push_id: # Redis lib uses bytes for all output - push_id = int(push_id.decode('utf-8')) + push_id = int(push_id.decode("utf-8")) else: # Lookup push from HGMO (slow) push_id, _ = hgmo_revision_details(repository, changeset) # Ingest pushes as we clearly don't have it in cache - self.ingest_pushes(repository, min_push_id=push_id-1, nb_pages=1) + self.ingest_pushes(repository, min_push_id=push_id - 1, nb_pages=1) # Load report from that push return self.find_report(repository, push_range=(push_id, MAX_PUSH)) def list_reports(self, repository, nb=5, push_range=(MAX_PUSH, MIN_PUSH)): - ''' + """ List the last reports available on the server, ordered by push by default from newer to older The order is detected from the push range - ''' + """ assert isinstance(nb, int) assert nb > 0 assert isinstance(push_range, tuple) and len(push_range) == 2 @@ -278,43 +263,44 @@ def list_reports(self, repository, nb=5, push_range=(MAX_PUSH, MIN_PUSH)): reports = op( KEY_REPORTS.format(repository=repository), - start, end, + start, + end, start=0, num=nb, withscores=True, ) - return [ - (changeset.decode('utf-8'), int(push)) - for changeset, push in reports - ] + return [(changeset.decode("utf-8"), int(push)) for changeset, push in reports] def get_coverage(self, repository, changeset, path): - ''' + """ Load a report and its coverage for a specific path and build a serializable representation - ''' - report_path = os.path.join(self.reports_dir, '{}/{}.json'.format(repository, changeset)) + """ + report_path = os.path.join( + self.reports_dir, "{}/{}.json".format(repository, changeset) + ) report = covdir.open_report(report_path) if report is None: # Try to download the report if it's missing locally report_path = self.download_report(repository, changeset) - assert report_path is not False, \ - 'Missing report for {} at {}'.format(repository, changeset) + assert report_path is not False, "Missing report for {} at {}".format( + repository, changeset + ) report = covdir.open_report(report_path) assert report out = covdir.get_path_coverage(report, path) - out['changeset'] = changeset + out["changeset"] = changeset return out - def get_history(self, repository, path='', start=None, end=None): - ''' + def get_history(self, repository, path="", start=None, end=None): + """ Load the history overall coverage from the redis cache Default to date range from now back to a year - ''' + """ if end is None: end = calendar.timegm(datetime.utcnow().timetuple()) if start is None: @@ -326,50 +312,40 @@ def get_history(self, repository, path='', start=None, end=None): # Load changesets ordered by date, in that range history = self.redis.zrevrangebyscore( - KEY_HISTORY.format(repository=repository), - end, start, - withscores=True, + KEY_HISTORY.format(repository=repository), end, start, withscores=True ) def _coverage(changeset, date): # Load overall coverage for specified path - changeset = changeset.decode('utf-8') + changeset = changeset.decode("utf-8") key = KEY_OVERALL_COVERAGE.format( - repository=repository, - changeset=changeset, + repository=repository, changeset=changeset ) coverage = self.redis.hget(key, path) if coverage is not None: coverage = float(coverage) - return { - 'changeset': changeset, - 'date': int(date), - 'coverage': coverage, - } + return {"changeset": changeset, "date": int(date), "coverage": coverage} - return [ - _coverage(changeset, date) - for changeset, date in history - ] + return [_coverage(changeset, date) for changeset, date in history] def ingest_available_reports(self, repository): - ''' + """ Ingest all the available reports for a repository - ''' + """ assert isinstance(repository, str) - REGEX_BLOB = re.compile(r'^{}/(\w+).json.zstd$'.format(repository)) + REGEX_BLOB = re.compile(r"^{}/(\w+).json.zstd$".format(repository)) for blob in self.bucket.list_blobs(prefix=repository): # Get changeset from blob name match = REGEX_BLOB.match(blob.name) if match is None: - logger.warn('Invalid blob found {}'.format(blob.name)) + logger.warn("Invalid blob found {}".format(blob.name)) continue changeset = match.group(1) - # Get extra informations from HGMO + # Get extra information from HGMO push_id, date = hgmo_revision_details(repository, changeset) - logger.info('Found report', changeset=changeset, push=push_id) + logger.info("Found report", changeset=changeset, push=push_id) # Ingest report self.ingest_report(repository, push_id, changeset, int(date)) diff --git a/backend/requirements-dev.txt b/backend/requirements-dev.txt index 545fe67bb..f885331bd 100644 --- a/backend/requirements-dev.txt +++ b/backend/requirements-dev.txt @@ -1,5 +1,4 @@ fakeredis==1.0.3 -flake8==3.7.8 -flake8-isort==2.7.0 +pre-commit==1.18.0 pytest==5.0.1 responses==0.10.6 diff --git a/backend/settings.py b/backend/settings.py index fa60131d8..52f894031 100644 --- a/backend/settings.py +++ b/backend/settings.py @@ -5,4 +5,4 @@ import os -DEBUG = bool(os.environ.get('DEBUG', False)) +DEBUG = bool(os.environ.get("DEBUG", False)) diff --git a/backend/setup.cfg b/backend/setup.cfg deleted file mode 100644 index 618244776..000000000 --- a/backend/setup.cfg +++ /dev/null @@ -1,66 +0,0 @@ -[flake8] -max-line-length = 159 -exclude=nix_run_setup.py,migrations/,build/,dist/ - -# https://pypi.python.org/pypi/flake8-coding -accept-encodings = utf-8 - -# https://pypi.python.org/pypi/flake8-quotes -inline-quotes = single -multiline-quotes = ''' -docstring-quotes = ''' - -# https://pypi.python.org/pypi/isort -[isort] -line_length = 159 -force_single_line = True -default_section=FIRSTPARTY -sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER -known_first_party = - code_coverage_tools - code_coverage_bot - code_coverage_tools - codecoverage_backend - -[mypy] -# Specify the target platform details in config, so your developers are -# free to run mypy on Windows, Linux, or macOS and get consistent -# results. -# python version will be autodetected -#python_version=3.6 -platform=linux - -# flake8-mypy expects the two following for sensible formatting -show_column_numbers=True -show_error_context=False - -# do not follow imports (except for ones found in typeshed) -follow_imports=skip - -# since we're ignoring imports, writing .mypy_cache doesn't make any sense -cache_dir=/dev/null - -# suppress errors about unsatisfied imports -ignore_missing_imports=True - -# allow untyped calls as a consequence of the options above -disallow_untyped_calls=False - -# allow returning Any as a consequence of the options above -warn_return_any=False - -# treat Optional per PEP 484 -strict_optional=True - -# ensure all execution paths are returning -warn_no_return=True - -# lint-style cleanliness for typing needs to be disabled; returns more errors -# than the full run. -warn_redundant_casts=False -warn_unused_ignores=False - -# The following are off by default since they're too noisy. -# Flip them on if you feel adventurous. -disallow_untyped_defs=False -check_untyped_defs=False diff --git a/backend/setup.py b/backend/setup.py index 3f045aeec..5fc57b44a 100644 --- a/backend/setup.py +++ b/backend/setup.py @@ -8,7 +8,7 @@ from setuptools import find_packages from setuptools import setup -with open('VERSION') as f: +with open("VERSION") as f: version = f.read().strip() @@ -17,29 +17,33 @@ def read_requirements(file_): with open(file_) as f: for line in f.readlines(): line = line.strip() - if line.startswith('-e ') or line.startswith('http://') or line.startswith('https://'): - extras = '' - if '[' in line: - extras = '[' + line.split('[')[1].split(']')[0] + ']' - line = line.split('#')[1].split('egg=')[1] + extras - elif line == '' or line.startswith('#') or line.startswith('-'): + if ( + line.startswith("-e ") + or line.startswith("http://") + or line.startswith("https://") + ): + extras = "" + if "[" in line: + extras = "[" + line.split("[")[1].split("]")[0] + "]" + line = line.split("#")[1].split("egg=")[1] + extras + elif line == "" or line.startswith("#") or line.startswith("-"): continue - line = line.split('#')[0].strip() + line = line.split("#")[0].strip() lines.append(line) return sorted(list(set(lines))) setup( - name='code_coverage_backend', + name="code_coverage_backend", version=version, - description='The code behind https://coverage.moz.tools', - author='Mozilla Release Management', - author_email='release-mgmt-analysis@mozilla.com', - url='https://api.coverage.moz.tools', - tests_require=read_requirements('requirements-dev.txt'), - install_requires=read_requirements('requirements.txt'), + description="The code behind https://coverage.moz.tools", + author="Mozilla Release Management", + author_email="release-mgmt-analysis@mozilla.com", + url="https://api.coverage.moz.tools", + tests_require=read_requirements("requirements-dev.txt"), + install_requires=read_requirements("requirements.txt"), packages=find_packages(), include_package_data=True, zip_safe=False, - license='MPL2', + license="MPL2", ) diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py index 195458491..5d3eed30f 100644 --- a/backend/tests/conftest.py +++ b/backend/tests/conftest.py @@ -19,41 +19,41 @@ import code_coverage_backend.backend -FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures') +FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures") -@pytest.fixture(autouse=True, scope='function') +@pytest.fixture(autouse=True, scope="function") def mock_secrets(): - ''' + """ Provide configuration through mock Taskcluster secrets - ''' + """ from code_coverage_backend import taskcluster - taskcluster.options = { - 'rootUrl': 'http://taskcluster.test', - } + taskcluster.options = {"rootUrl": "http://taskcluster.test"} taskcluster.secrets = { - 'REDIS_URL': 'redis://unitest:1234', - 'APP_CHANNEL': 'test', - 'GOOGLE_CLOUD_STORAGE': { - 'token_uri': 'secret', - 'client_email': 'xxx@mozilla.test', - 'private_key': 'somethingHere', - 'bucket': 'unittest', - } + "REDIS_URL": "redis://unitest:1234", + "APP_CHANNEL": "test", + "GOOGLE_CLOUD_STORAGE": { + "token_uri": "secret", + "client_email": "xxx@mozilla.test", + "private_key": "somethingHere", + "bucket": "unittest", + }, } @pytest.fixture def app(mock_secrets): - ''' + """ Load code_coverage_backend app in test mode - ''' + """ app = code_coverage_backend.backend.build_flask_app( - project_name='Test', - app_name='test', - openapi=os.path.join(os.path.dirname(__file__), '../code_coverage_backend/api.yml') + project_name="Test", + app_name="test", + openapi=os.path.join( + os.path.dirname(__file__), "../code_coverage_backend/api.yml" + ), ) with app.app.app_context(): @@ -67,9 +67,10 @@ def client(app): @pytest.fixture def mock_bucket(mock_secrets): - ''' + """ Mock a GCP bucket & blobs - ''' + """ + class MockBlob(object): def __init__(self, name, content=None, exists=False): self.name = name @@ -77,7 +78,7 @@ def __init__(self, name, content=None, exists=False): assert isinstance(content, bytes) # Auto zstandard compression - if self.name.endswith('.zstd'): + if self.name.endswith(".zstd"): compressor = zstd.ZstdCompressor() self._content = compressor.compress(content) else: @@ -91,17 +92,16 @@ def exists(self): def download_to_filename(self, path): assert self._exists and self._content - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(self._content) class MockBucket(object): _blobs = {} def add_mock_blob(self, name, coverage=0.0): - content = json.dumps({ - 'coveragePercent': coverage, - 'children': {} - }).encode('utf-8') + content = json.dumps({"coveragePercent": coverage, "children": {}}).encode( + "utf-8" + ) self._blobs[name] = MockBlob(name, content, exists=True) def blob(self, name): @@ -114,13 +114,12 @@ def blob(self, name): @pytest.fixture def mock_cache(mock_secrets, mock_bucket, tmpdir): - ''' + """ Mock a GCPCache instance, using fakeredis and a mocked GCP bucket - ''' + """ from code_coverage_backend.gcp import GCPCache class MockCache(GCPCache): - def __init__(self): self.redis = fakeredis.FakeStrictRedis() self.reports_dir = tmpdir.mkdtemp() @@ -131,69 +130,61 @@ def __init__(self): @pytest.fixture def mock_hgmo(): - ''' + """ Mock HGMO responses for pushes - ''' - headers = { - 'content-type': 'application/json' - } + """ + headers = {"content-type": "application/json"} max_push = 1000 def _test_rev(request): # The push id is in the first 3 characters of the revision requested revision = request.path_url[17:] assert len(revision) == 32 - resp = { - 'pushid': int(revision[:3]), - 'date': [time.time(), 0], - } + resp = {"pushid": int(revision[:3]), "date": [time.time(), 0]} return (200, headers, json.dumps(resp)) def _changesets(push_id): # random changesets - changesets = [ - uuid.uuid4().hex - for _ in range(random.randint(2, 20)) - ] + changesets = [uuid.uuid4().hex for _ in range(random.randint(2, 20))] # Add the MD5 hash of the push id to test specific cases - changesets.append(hashlib.md5(str(push_id).encode('utf-8')).hexdigest()) + changesets.append(hashlib.md5(str(push_id).encode("utf-8")).hexdigest()) return changesets def _test_pushes(request): - ''' + """ Build pushes list, limited to a maximum push id - ''' + """ query = urllib.parse.parse_qs(urllib.parse.urlparse(request.path_url).query) - assert int(query['version'][0]) == 2 - start = 'startID' in query and int(query['startID'][0]) or (max_push - 8) - end = 'endID' in query and int(query['endID'][0]) or max_push + assert int(query["version"][0]) == 2 + start = "startID" in query and int(query["startID"][0]) or (max_push - 8) + end = "endID" in query and int(query["endID"][0]) or max_push assert end > start now = time.time() resp = { - 'lastpushid': max_push, - 'pushes': { + "lastpushid": max_push, + "pushes": { push: { - 'changesets': _changesets(push), - 'date': int((now % 1000000) + push * 10), # fake timestamp + "changesets": _changesets(push), + "date": int((now % 1000000) + push * 10), # fake timestamp } for push in range(start, end + 1) if push <= max_push - } + }, } return (200, headers, json.dumps(resp)) with responses.RequestsMock(assert_all_requests_are_fired=False) as resps: resps.add_callback( responses.GET, - re.compile('https://hg.mozilla.org/(.+)/json-rev/(.+)'), + re.compile("https://hg.mozilla.org/(.+)/json-rev/(.+)"), callback=_test_rev, ) resps.add_callback( responses.GET, - re.compile('https://hg.mozilla.org/(.+)/json-pushes'), + re.compile("https://hg.mozilla.org/(.+)/json-pushes"), callback=_test_pushes, ) yield resps @@ -201,7 +192,7 @@ def _test_pushes(request): @pytest.fixture def mock_covdir_report(): - ''' + """ Path to the covdir mock in repository - ''' - return os.path.join(FIXTURES_DIR, 'covdir.json') + """ + return os.path.join(FIXTURES_DIR, "covdir.json") diff --git a/backend/tests/test_covdir.py b/backend/tests/test_covdir.py index 10d32e5bd..d2a6f77af 100644 --- a/backend/tests/test_covdir.py +++ b/backend/tests/test_covdir.py @@ -3,19 +3,19 @@ def test_open_report(tmpdir, mock_covdir_report): - ''' + """ Test opening reports - ''' + """ from code_coverage_backend import covdir - empty = tmpdir.join('empty.json') + empty = tmpdir.join("empty.json") assert covdir.open_report(empty.realpath()) is None - bad = tmpdir.join('bad.json') - bad.write('not json') + bad = tmpdir.join("bad.json") + bad.write("not json") assert covdir.open_report(bad.realpath()) is None - invalid = tmpdir.join('invalid.json') + invalid = tmpdir.join("invalid.json") invalid.write('"string"') assert covdir.open_report(invalid.realpath()) is None @@ -23,118 +23,122 @@ def test_open_report(tmpdir, mock_covdir_report): assert report is not None assert isinstance(report, dict) - assert list(report.keys()) == ['children', 'coveragePercent', 'linesCovered', 'linesMissed', 'linesTotal', 'name'] + assert list(report.keys()) == [ + "children", + "coveragePercent", + "linesCovered", + "linesMissed", + "linesTotal", + "name", + ] def test_get_path_coverage(mock_covdir_report): - ''' + """ Test covdir report parsing to obtain coverage for a specific path - ''' + """ from code_coverage_backend import covdir # Full coverage report = covdir.open_report(mock_covdir_report) assert report is not None - out = covdir.get_path_coverage(report, '') + out = covdir.get_path_coverage(report, "") assert isinstance(out, dict) - assert out['coveragePercent'] == 85.11 - assert out['linesCovered'] == 267432 - assert out['linesMissed'] == 46779 - assert out['linesTotal'] == 314211 - assert out['name'] == 'src' - assert out['path'] == '' - assert out['type'] == 'directory' - assert len(out['children']) == 12 - assert [c['name'] for c in out['children']] == [ - 'builtin', - 'ctypes', - 'frontend', - 'jsapi.cpp', - 'jsdate.cpp', - 'jsexn.cpp', - 'jsexn.h', - 'jsmath.cpp', - 'perf', - 'shell', - 'threading', - 'util', + assert out["coveragePercent"] == 85.11 + assert out["linesCovered"] == 267432 + assert out["linesMissed"] == 46779 + assert out["linesTotal"] == 314211 + assert out["name"] == "src" + assert out["path"] == "" + assert out["type"] == "directory" + assert len(out["children"]) == 12 + assert [c["name"] for c in out["children"]] == [ + "builtin", + "ctypes", + "frontend", + "jsapi.cpp", + "jsdate.cpp", + "jsexn.cpp", + "jsexn.h", + "jsmath.cpp", + "perf", + "shell", + "threading", + "util", ] # Subfolder report = covdir.open_report(mock_covdir_report) assert report is not None - out = covdir.get_path_coverage(report, 'perf') + out = covdir.get_path_coverage(report, "perf") assert isinstance(out, dict) - assert out['coveragePercent'] == 65.45 - assert out['linesCovered'] == 125 - assert out['linesMissed'] == 66 - assert out['linesTotal'] == 191 - assert out['name'] == 'perf' - assert out['path'] == 'perf' - assert out['type'] == 'directory' - assert len(out['children']) == 2 - assert [c['name'] for c in out['children']] == [ - 'pm_linux.cpp', - 'pm_stub.cpp', - ] + assert out["coveragePercent"] == 65.45 + assert out["linesCovered"] == 125 + assert out["linesMissed"] == 66 + assert out["linesTotal"] == 191 + assert out["name"] == "perf" + assert out["path"] == "perf" + assert out["type"] == "directory" + assert len(out["children"]) == 2 + assert [c["name"] for c in out["children"]] == ["pm_linux.cpp", "pm_stub.cpp"] # File report = covdir.open_report(mock_covdir_report) assert report is not None - out = covdir.get_path_coverage(report, 'perf/pm_linux.cpp') + out = covdir.get_path_coverage(report, "perf/pm_linux.cpp") assert isinstance(out, dict) assert out == { - 'children': None, - 'coverage': [66, 138, 6, -1, -1], - 'coveragePercent': 81.69, - 'linesCovered': 58, - 'linesMissed': 13, - 'linesTotal': 71, - 'name': 'pm_linux.cpp', - 'path': 'perf/pm_linux.cpp', - 'type': 'file' + "children": None, + "coverage": [66, 138, 6, -1, -1], + "coveragePercent": 81.69, + "linesCovered": 58, + "linesMissed": 13, + "linesTotal": 71, + "name": "pm_linux.cpp", + "path": "perf/pm_linux.cpp", + "type": "file", } # Missing file with pytest.raises(Exception) as e: report = covdir.open_report(mock_covdir_report) assert report is not None - covdir.get_path_coverage(report, 'nope.py') - assert str(e.value) == 'Path nope.py not found in report' + covdir.get_path_coverage(report, "nope.py") + assert str(e.value) == "Path nope.py not found in report" def test_get_overall_coverage(mock_covdir_report): - ''' + """ Test covdir report overall coverage extraction - ''' + """ from code_coverage_backend import covdir report = covdir.open_report(mock_covdir_report) assert report is not None out = covdir.get_overall_coverage(report, max_depth=1) assert out == { - '': 85.11, - 'builtin': 84.4, - 'ctypes': 80.83, - 'frontend': 78.51, - 'perf': 65.45, - 'shell': 69.95, - 'threading': 90.54, - 'util': 73.29, + "": 85.11, + "builtin": 84.4, + "ctypes": 80.83, + "frontend": 78.51, + "perf": 65.45, + "shell": 69.95, + "threading": 90.54, + "util": 73.29, } report = covdir.open_report(mock_covdir_report) assert report is not None out = covdir.get_overall_coverage(report, max_depth=2) assert out == { - '': 85.11, - 'builtin': 84.4, - 'builtin/intl': 78.62, - 'ctypes': 80.83, - 'ctypes/libffi': 49.59, - 'frontend': 78.51, - 'perf': 65.45, - 'shell': 69.95, - 'threading': 90.54, - 'util': 73.29 + "": 85.11, + "builtin": 84.4, + "builtin/intl": 78.62, + "ctypes": 80.83, + "ctypes/libffi": 49.59, + "frontend": 78.51, + "perf": 65.45, + "shell": 69.95, + "threading": 90.54, + "util": 73.29, } diff --git a/backend/tests/test_coverage.py b/backend/tests/test_coverage.py index fc06f0ecc..5e31c0e9f 100644 --- a/backend/tests/test_coverage.py +++ b/backend/tests/test_coverage.py @@ -8,10 +8,24 @@ def test_coverage_supported_extensions_api(client): # List supported extensions for coverage analysis through the API - resp = client.get('/v2/extensions') + resp = client.get("/v2/extensions") assert resp.status_code == 200 - data = json.loads(resp.data.decode('utf-8')) - assert set(data) == set([ - 'c', 'h', 'cpp', 'cc', 'cxx', 'hh', 'hpp', - 'hxx', 'js', 'jsm', 'xul', 'xml', 'html', 'xhtml', - ]) + data = json.loads(resp.data.decode("utf-8")) + assert set(data) == set( + [ + "c", + "h", + "cpp", + "cc", + "cxx", + "hh", + "hpp", + "hxx", + "js", + "jsm", + "xul", + "xml", + "html", + "xhtml", + ] + ) diff --git a/backend/tests/test_gcp.py b/backend/tests/test_gcp.py index 609161ba5..9ed9fac1b 100644 --- a/backend/tests/test_gcp.py +++ b/backend/tests/test_gcp.py @@ -8,237 +8,234 @@ def test_store_push(mock_cache): - ''' + """ Test base method to store a push & changesets on redis - ''' - assert mock_cache.redis.keys('*') == [] - mock_cache.store_push('myrepo', 1234, ['deadbeef', 'coffee'], 111222333) - - assert mock_cache.redis.keys('*') == [b'changeset:myrepo:deadbeef', b'changeset:myrepo:coffee'] - assert mock_cache.redis.hgetall('changeset:myrepo:deadbeef') == { - b'push': b'1234', - b'date': b'111222333', + """ + assert mock_cache.redis.keys("*") == [] + mock_cache.store_push("myrepo", 1234, ["deadbeef", "coffee"], 111222333) + + assert mock_cache.redis.keys("*") == [ + b"changeset:myrepo:deadbeef", + b"changeset:myrepo:coffee", + ] + assert mock_cache.redis.hgetall("changeset:myrepo:deadbeef") == { + b"push": b"1234", + b"date": b"111222333", } - assert mock_cache.redis.hgetall('changeset:myrepo:coffee') == { - b'push': b'1234', - b'date': b'111222333', + assert mock_cache.redis.hgetall("changeset:myrepo:coffee") == { + b"push": b"1234", + b"date": b"111222333", } def test_download_report(mock_cache): - ''' + """ Test base method to download a report & store it on local FS - ''' - mock_cache.bucket.add_mock_blob('myrepo/deadbeef123.json.zstd') + """ + mock_cache.bucket.add_mock_blob("myrepo/deadbeef123.json.zstd") # Does not exist - assert mock_cache.download_report('myrepo', 'missing') is False + assert mock_cache.download_report("myrepo", "missing") is False - archive = os.path.join(mock_cache.reports_dir, 'myrepo', 'deadbeef123.json.zstd') - payload = os.path.join(mock_cache.reports_dir, 'myrepo', 'deadbeef123.json') + archive = os.path.join(mock_cache.reports_dir, "myrepo", "deadbeef123.json.zstd") + payload = os.path.join(mock_cache.reports_dir, "myrepo", "deadbeef123.json") assert not os.path.exists(archive) assert not os.path.exists(payload) # Valid blob - assert mock_cache.download_report('myrepo', 'deadbeef123') == payload + assert mock_cache.download_report("myrepo", "deadbeef123") == payload # Only the payload remains after download assert not os.path.exists(archive) assert os.path.exists(payload) - assert json.load(open(payload)) == { - 'children': {}, - 'coveragePercent': 0.0, - } + assert json.load(open(payload)) == {"children": {}, "coveragePercent": 0.0} def test_ingestion(mock_cache): - ''' + """ Test ingestion of several reports and their retrieval through Redis index - ''' + """ # Setup blobs - mock_cache.bucket.add_mock_blob('myrepo/rev1.json.zstd', coverage=0.1) - mock_cache.bucket.add_mock_blob('myrepo/rev2.json.zstd', coverage=0.2) - mock_cache.bucket.add_mock_blob('myrepo/rev10.json.zstd', coverage=1.0) + mock_cache.bucket.add_mock_blob("myrepo/rev1.json.zstd", coverage=0.1) + mock_cache.bucket.add_mock_blob("myrepo/rev2.json.zstd", coverage=0.2) + mock_cache.bucket.add_mock_blob("myrepo/rev10.json.zstd", coverage=1.0) # No reports at first - assert mock_cache.redis.zcard(b'reports:myrepo') == 0 - assert mock_cache.redis.zcard(b'history:myrepo') == 0 - assert mock_cache.list_reports('myrepo') == [] + assert mock_cache.redis.zcard(b"reports:myrepo") == 0 + assert mock_cache.redis.zcard(b"history:myrepo") == 0 + assert mock_cache.list_reports("myrepo") == [] # Ingest those 3 reports - mock_cache.ingest_report('myrepo', 1, 'rev1', 1000) - mock_cache.ingest_report('myrepo', 2, 'rev2', 2000) - mock_cache.ingest_report('myrepo', 10, 'rev10', 9000) + mock_cache.ingest_report("myrepo", 1, "rev1", 1000) + mock_cache.ingest_report("myrepo", 2, "rev2", 2000) + mock_cache.ingest_report("myrepo", 10, "rev10", 9000) # They must be in redis and on the file system - assert mock_cache.redis.zcard(b'reports:myrepo') == 3 - assert mock_cache.redis.zcard(b'history:myrepo') == 3 - assert os.path.exists(os.path.join(mock_cache.reports_dir, 'myrepo', 'rev1.json')) - assert os.path.exists(os.path.join(mock_cache.reports_dir, 'myrepo', 'rev2.json')) - assert os.path.exists(os.path.join(mock_cache.reports_dir, 'myrepo', 'rev10.json')) + assert mock_cache.redis.zcard(b"reports:myrepo") == 3 + assert mock_cache.redis.zcard(b"history:myrepo") == 3 + assert os.path.exists(os.path.join(mock_cache.reports_dir, "myrepo", "rev1.json")) + assert os.path.exists(os.path.join(mock_cache.reports_dir, "myrepo", "rev2.json")) + assert os.path.exists(os.path.join(mock_cache.reports_dir, "myrepo", "rev10.json")) # Reports are exposed, and sorted by push - assert mock_cache.list_reports('another') == [] - assert mock_cache.list_reports('myrepo') == [ - ('rev10', 10), - ('rev2', 2), - ('rev1', 1), + assert mock_cache.list_reports("another") == [] + assert mock_cache.list_reports("myrepo") == [ + ("rev10", 10), + ("rev2", 2), + ("rev1", 1), ] - assert mock_cache.find_report('myrepo') == ('rev10', 10) - assert mock_cache.get_history('myrepo', start=200, end=20000) == [ - {'changeset': 'rev10', 'coverage': 1.0, 'date': 9000}, - {'changeset': 'rev2', 'coverage': 0.2, 'date': 2000}, - {'changeset': 'rev1', 'coverage': 0.1, 'date': 1000}, + assert mock_cache.find_report("myrepo") == ("rev10", 10) + assert mock_cache.get_history("myrepo", start=200, end=20000) == [ + {"changeset": "rev10", "coverage": 1.0, "date": 9000}, + {"changeset": "rev2", "coverage": 0.2, "date": 2000}, + {"changeset": "rev1", "coverage": 0.1, "date": 1000}, ] # Even if we add a smaller one later on, reports are still sorted - mock_cache.bucket.add_mock_blob('myrepo/rev5.json.zstd', coverage=0.5) - mock_cache.ingest_report('myrepo', 5, 'rev5', 5000) - assert mock_cache.list_reports('myrepo') == [ - ('rev10', 10), - ('rev5', 5), - ('rev2', 2), - ('rev1', 1), + mock_cache.bucket.add_mock_blob("myrepo/rev5.json.zstd", coverage=0.5) + mock_cache.ingest_report("myrepo", 5, "rev5", 5000) + assert mock_cache.list_reports("myrepo") == [ + ("rev10", 10), + ("rev5", 5), + ("rev2", 2), + ("rev1", 1), ] - assert mock_cache.find_report('myrepo') == ('rev10', 10) - assert mock_cache.find_report('myrepo', push_range=(7, 0)) == ('rev5', 5) - assert mock_cache.get_history('myrepo', start=200, end=20000) == [ - {'changeset': 'rev10', 'coverage': 1.0, 'date': 9000}, - {'changeset': 'rev5', 'coverage': 0.5, 'date': 5000}, - {'changeset': 'rev2', 'coverage': 0.2, 'date': 2000}, - {'changeset': 'rev1', 'coverage': 0.1, 'date': 1000}, + assert mock_cache.find_report("myrepo") == ("rev10", 10) + assert mock_cache.find_report("myrepo", push_range=(7, 0)) == ("rev5", 5) + assert mock_cache.get_history("myrepo", start=200, end=20000) == [ + {"changeset": "rev10", "coverage": 1.0, "date": 9000}, + {"changeset": "rev5", "coverage": 0.5, "date": 5000}, + {"changeset": "rev2", "coverage": 0.2, "date": 2000}, + {"changeset": "rev1", "coverage": 0.1, "date": 1000}, ] def test_ingest_hgmo(mock_cache, mock_hgmo): - ''' + """ Test ingestion using a mock HGMO - ''' + """ # Add a report on push 995 - rev = hashlib.md5(b'995').hexdigest() - mock_cache.bucket.add_mock_blob('myrepo/{}.json.zstd'.format(rev), coverage=0.5) + rev = hashlib.md5(b"995").hexdigest() + mock_cache.bucket.add_mock_blob("myrepo/{}.json.zstd".format(rev), coverage=0.5) # Ingest last pushes - assert mock_cache.list_reports('myrepo') == [] - assert len(mock_cache.redis.keys('changeset:myrepo:*')) == 0 - mock_cache.ingest_pushes('myrepo') - assert len(mock_cache.redis.keys('changeset:myrepo:*')) > 0 - assert mock_cache.list_reports('myrepo') == [ - (rev, 995) - ] + assert mock_cache.list_reports("myrepo") == [] + assert len(mock_cache.redis.keys("changeset:myrepo:*")) == 0 + mock_cache.ingest_pushes("myrepo") + assert len(mock_cache.redis.keys("changeset:myrepo:*")) > 0 + assert mock_cache.list_reports("myrepo") == [(rev, 995)] def test_closest_report(mock_cache, mock_hgmo): - ''' + """ Test algo to find the closest report for any changeset - ''' + """ # Build revision for push 992 - revision = '992{}'.format(uuid.uuid4().hex[3:]) + revision = "992{}".format(uuid.uuid4().hex[3:]) # No data at first - assert mock_cache.redis.zcard('reports') == 0 - assert len(mock_cache.redis.keys('changeset:myrepo:*')) == 0 + assert mock_cache.redis.zcard("reports") == 0 + assert len(mock_cache.redis.keys("changeset:myrepo:*")) == 0 # Try to find a report, but none is available with pytest.raises(Exception) as e: - mock_cache.find_closest_report('myrepo', revision) - assert str(e.value) == 'No report found' + mock_cache.find_closest_report("myrepo", revision) + assert str(e.value) == "No report found" # Some pushes were ingested though - assert len(mock_cache.redis.keys('changeset:myrepo:*')) > 0 + assert len(mock_cache.redis.keys("changeset:myrepo:*")) > 0 # Add a report on 994, 2 pushes after our revision - report_rev = hashlib.md5(b'994').hexdigest() - mock_cache.bucket.add_mock_blob('myrepo/{}.json.zstd'.format(report_rev), coverage=0.5) + report_rev = hashlib.md5(b"994").hexdigest() + mock_cache.bucket.add_mock_blob( + "myrepo/{}.json.zstd".format(report_rev), coverage=0.5 + ) # Add a report on 990, 2 pushes before our revision - base_rev = hashlib.md5(b'990').hexdigest() - mock_cache.bucket.add_mock_blob('myrepo/{}.json.zstd'.format(base_rev), coverage=0.4) + base_rev = hashlib.md5(b"990").hexdigest() + mock_cache.bucket.add_mock_blob( + "myrepo/{}.json.zstd".format(base_rev), coverage=0.4 + ) # Now we have a report ! - assert mock_cache.list_reports('myrepo') == [] - assert mock_cache.find_closest_report('myrepo', revision) == (report_rev, 994) - assert mock_cache.list_reports('myrepo') == [ - (report_rev, 994) - ] + assert mock_cache.list_reports("myrepo") == [] + assert mock_cache.find_closest_report("myrepo", revision) == (report_rev, 994) + assert mock_cache.list_reports("myrepo") == [(report_rev, 994)] # This should also work for revisions before - revision = '991{}'.format(uuid.uuid4().hex[3:]) - assert mock_cache.find_closest_report('myrepo', revision) == (report_rev, 994) + revision = "991{}".format(uuid.uuid4().hex[3:]) + assert mock_cache.find_closest_report("myrepo", revision) == (report_rev, 994) # ... and the revision on the push itself - revision = '994{}'.format(uuid.uuid4().hex[3:]) - assert mock_cache.find_closest_report('myrepo', revision) == (report_rev, 994) + revision = "994{}".format(uuid.uuid4().hex[3:]) + assert mock_cache.find_closest_report("myrepo", revision) == (report_rev, 994) # We can also retrieve the base revision - revision = '990{}'.format(uuid.uuid4().hex[3:]) - assert mock_cache.find_closest_report('myrepo', revision) == (base_rev, 990) - revision = '989{}'.format(uuid.uuid4().hex[3:]) - assert mock_cache.find_closest_report('myrepo', revision) == (base_rev, 990) - assert mock_cache.list_reports('myrepo') == [ - (report_rev, 994), - (base_rev, 990), - ] + revision = "990{}".format(uuid.uuid4().hex[3:]) + assert mock_cache.find_closest_report("myrepo", revision) == (base_rev, 990) + revision = "989{}".format(uuid.uuid4().hex[3:]) + assert mock_cache.find_closest_report("myrepo", revision) == (base_rev, 990) + assert mock_cache.list_reports("myrepo") == [(report_rev, 994), (base_rev, 990)] # But not for revisions after the push - revision = '995{}'.format(uuid.uuid4().hex[3:]) + revision = "995{}".format(uuid.uuid4().hex[3:]) with pytest.raises(Exception) as e: - mock_cache.find_closest_report('myrepo', revision) - assert str(e.value) == 'No report found' + mock_cache.find_closest_report("myrepo", revision) + assert str(e.value) == "No report found" def test_get_coverage(mock_cache): - ''' + """ Test coverage access with re-download - ''' + """ # No report at first with pytest.raises(AssertionError) as e: - mock_cache.get_coverage('myrepo', 'myhash', '') - assert str(e.value) == 'Missing report for myrepo at myhash' + mock_cache.get_coverage("myrepo", "myhash", "") + assert str(e.value) == "Missing report for myrepo at myhash" # Report available online - mock_cache.bucket.add_mock_blob('myrepo/myhash.json.zstd') + mock_cache.bucket.add_mock_blob("myrepo/myhash.json.zstd") # Coverage available - coverage = mock_cache.get_coverage('myrepo', 'myhash', '') + coverage = mock_cache.get_coverage("myrepo", "myhash", "") assert coverage == { - 'children': [], - 'coveragePercent': 0.0, - 'path': '', - 'type': 'directory', - 'changeset': 'myhash', + "children": [], + "coveragePercent": 0.0, + "path": "", + "type": "directory", + "changeset": "myhash", } # Remove local file - path = os.path.join(mock_cache.reports_dir, 'myrepo', 'myhash.json') + path = os.path.join(mock_cache.reports_dir, "myrepo", "myhash.json") assert os.path.exists(path) os.unlink(path) # Coverage still available - coverage = mock_cache.get_coverage('myrepo', 'myhash', '') + coverage = mock_cache.get_coverage("myrepo", "myhash", "") assert coverage == { - 'children': [], - 'coveragePercent': 0.0, - 'path': '', - 'type': 'directory', - 'changeset': 'myhash', + "children": [], + "coveragePercent": 0.0, + "path": "", + "type": "directory", + "changeset": "myhash", } # Make invalid json assert os.path.exists(path) - with open(path, 'a') as f: - f.write('break') + with open(path, "a") as f: + f.write("break") # Coverage still available - coverage = mock_cache.get_coverage('myrepo', 'myhash', '') + coverage = mock_cache.get_coverage("myrepo", "myhash", "") assert coverage == { - 'children': [], - 'coveragePercent': 0.0, - 'path': '', - 'type': 'directory', - 'changeset': 'myhash', + "children": [], + "coveragePercent": 0.0, + "path": "", + "type": "directory", + "changeset": "myhash", } assert os.path.exists(path) assert isinstance(json.load(open(path)), dict) diff --git a/bot/code_coverage_bot/artifacts.py b/bot/code_coverage_bot/artifacts.py index dec737d14..476b7ca04 100644 --- a/bot/code_coverage_bot/artifacts.py +++ b/bot/code_coverage_bot/artifacts.py @@ -11,46 +11,48 @@ logger = structlog.get_logger(__name__) -SUITES_TO_IGNORE = ['awsy', 'talos'] # Ignore awsy and talos as they aren't actually suites of tests. -FINISHED_STATUSES = ['completed', 'failed', 'exception'] -ALL_STATUSES = FINISHED_STATUSES + ['unscheduled', 'pending', 'running'] -STATUS_VALUE = { - 'exception': 1, - 'failed': 2, - 'completed': 3, -} +SUITES_TO_IGNORE = [ + "awsy", + "talos", +] # Ignore awsy and talos as they aren't actually suites of tests. +FINISHED_STATUSES = ["completed", "failed", "exception"] +ALL_STATUSES = FINISHED_STATUSES + ["unscheduled", "pending", "running"] +STATUS_VALUE = {"exception": 1, "failed": 2, "completed": 3} class ArtifactsHandler(object): - - def __init__(self, task_ids, parent_dir='ccov-artifacts', task_name_filter='*'): + def __init__(self, task_ids, parent_dir="ccov-artifacts", task_name_filter="*"): self.task_ids = task_ids self.parent_dir = parent_dir self.task_name_filter = task_name_filter def generate_path(self, platform, chunk, artifact): - file_name = '%s_%s_%s' % (platform, chunk, os.path.basename(artifact['name'])) + file_name = "%s_%s_%s" % (platform, chunk, os.path.basename(artifact["name"])) return os.path.join(self.parent_dir, file_name) def get_chunks(self, platform): - return set(f.split('_')[1] for f in os.listdir(self.parent_dir) if os.path.basename(f).startswith(f'{platform}_')) + return set( + f.split("_")[1] + for f in os.listdir(self.parent_dir) + if os.path.basename(f).startswith(f"{platform}_") + ) def get(self, platform=None, suite=None, chunk=None): files = os.listdir(self.parent_dir) if suite is not None and chunk is not None: - raise Exception('suite and chunk can\'t both have a value') + raise Exception("suite and chunk can't both have a value") # Filter artifacts according to platform, suite and chunk. filtered_files = [] for fname in files: - if platform is not None and not fname.startswith('%s_' % platform): + if platform is not None and not fname.startswith("%s_" % platform): continue if suite is not None and suite not in fname: continue - if chunk is not None and ('%s_code-coverage' % chunk) not in fname: + if chunk is not None and ("%s_code-coverage" % chunk) not in fname: continue filtered_files.append(os.path.join(self.parent_dir, fname)) @@ -58,27 +60,30 @@ def get(self, platform=None, suite=None, chunk=None): return filtered_files def download(self, test_task): - chunk_name = taskcluster.get_chunk(test_task['task']['metadata']['name']) - platform_name = taskcluster.get_platform(test_task['task']['metadata']['name']) - test_task_id = test_task['status']['taskId'] + chunk_name = taskcluster.get_chunk(test_task["task"]["metadata"]["name"]) + platform_name = taskcluster.get_platform(test_task["task"]["metadata"]["name"]) + test_task_id = test_task["status"]["taskId"] for artifact in taskcluster.get_task_artifacts(test_task_id): - if not any(n in artifact['name'] for n in ['code-coverage-grcov.zip', 'code-coverage-jsvm.zip']): + if not any( + n in artifact["name"] + for n in ["code-coverage-grcov.zip", "code-coverage-jsvm.zip"] + ): continue artifact_path = self.generate_path(platform_name, chunk_name, artifact) - taskcluster.download_artifact(artifact_path, test_task_id, artifact['name']) - logger.info('%s artifact downloaded' % artifact_path) + taskcluster.download_artifact(artifact_path, test_task_id, artifact["name"]) + logger.info("%s artifact downloaded" % artifact_path) def is_filtered_task(self, task): - ''' + """ Apply name filter from CLI args on task name - ''' + """ assert isinstance(task, dict) - name = task['task']['metadata']['name'] + name = task["task"]["metadata"]["name"] if not fnmatch.fnmatch(name, self.task_name_filter): - logger.debug('Filtered task', name=name) + logger.debug("Filtered task", name=name) return True return False @@ -89,31 +94,46 @@ def download_all(self): # The test tasks for the Linux and Windows builds are in the same group, # but the following code is generic and supports build tasks split in # separate groups. - groups = set([taskcluster.get_task_details(build_task_id)['taskGroupId'] for build_task_id in self.task_ids.values()]) + groups = set( + [ + taskcluster.get_task_details(build_task_id)["taskGroupId"] + for build_task_id in self.task_ids.values() + ] + ) test_tasks = [ task for group in groups for task in taskcluster.get_tasks_in_group(group) if taskcluster.is_coverage_task(task) and not self.is_filtered_task(task) ] - logger.info('Downloading artifacts from {} tasks'.format(len(test_tasks))) + logger.info("Downloading artifacts from {} tasks".format(len(test_tasks))) for test_task in test_tasks: - status = test_task['status']['state'] + status = test_task["status"]["state"] while status not in FINISHED_STATUSES: - assert status in ALL_STATUSES, "State '{}' not recognized".format(status) - logger.info('Waiting for task {} to finish...'.format(test_task['status']['taskId'])) + assert status in ALL_STATUSES, "State '{}' not recognized".format( + status + ) + logger.info( + "Waiting for task {} to finish...".format( + test_task["status"]["taskId"] + ) + ) time.sleep(60) - status = taskcluster.get_task_status(test_task['status']['taskId']) + status = taskcluster.get_task_status(test_task["status"]["taskId"]) # Choose best tasks to download (e.g. 'completed' is better than 'failed') download_tasks = {} for test_task in test_tasks: - status = test_task['status']['state'] - assert status in FINISHED_STATUSES, "State '{}' not recognized".format(status) + status = test_task["status"]["state"] + assert status in FINISHED_STATUSES, "State '{}' not recognized".format( + status + ) - chunk_name = taskcluster.get_chunk(test_task['task']['metadata']['name']) - platform_name = taskcluster.get_platform(test_task['task']['metadata']['name']) + chunk_name = taskcluster.get_chunk(test_task["task"]["metadata"]["name"]) + platform_name = taskcluster.get_platform( + test_task["task"]["metadata"]["name"] + ) if any(to_ignore in chunk_name for to_ignore in SUITES_TO_IGNORE): continue @@ -126,11 +146,11 @@ def download_all(self): # Otherwise, compare the status of this task with the previously selected task. prev_task = download_tasks[(chunk_name, platform_name)] - if STATUS_VALUE[status] > STATUS_VALUE[prev_task['status']['state']]: + if STATUS_VALUE[status] > STATUS_VALUE[prev_task["status"]["state"]]: download_tasks[(chunk_name, platform_name)] = test_task with ThreadPoolExecutorResult() as executor: for test_task in download_tasks.values(): executor.submit(self.download, test_task) - logger.info('Code coverage artifacts downloaded') + logger.info("Code coverage artifacts downloaded") diff --git a/bot/code_coverage_bot/chunk_mapping.py b/bot/code_coverage_bot/chunk_mapping.py index 6b1f16197..2670c535b 100644 --- a/bot/code_coverage_bot/chunk_mapping.py +++ b/bot/code_coverage_bot/chunk_mapping.py @@ -14,54 +14,73 @@ logger = structlog.get_logger(__name__) -ACTIVEDATA_QUERY_URL = 'http://activedata.allizom.org/query' +ACTIVEDATA_QUERY_URL = "http://activedata.allizom.org/query" -PLATFORMS = ['linux', 'windows'] -IGNORED_SUITE_PREFIXES = ['awsy', 'talos', 'test-coverage', 'test-coverage-wpt'] -# TODO: Calculate this dinamically when https://github.com/klahnakoski/ActiveData-ETL/issues/40 is fixed. -TEST_COVERAGE_SUITES = ['reftest', 'web-platform', 'mochitest', 'xpcshell', 'jsreftest', 'crashtest'] +PLATFORMS = ["linux", "windows"] +IGNORED_SUITE_PREFIXES = ["awsy", "talos", "test-coverage", "test-coverage-wpt"] +# TODO: Calculate this dynamically when https://github.com/klahnakoski/ActiveData-ETL/issues/40 is fixed. +TEST_COVERAGE_SUITES = [ + "reftest", + "web-platform", + "mochitest", + "xpcshell", + "jsreftest", + "crashtest", +] def get_suites(revision): - r = requests.post(ACTIVEDATA_QUERY_URL, json={ - 'from': 'unittest', - 'where': {'and': [ - {'eq': {'repo.branch.name': 'mozilla-central'}}, - {'eq': {'repo.changeset.id12': revision[:12]}}, - {'or': [ - {'prefix': {'run.key': 'test-linux64-ccov'}}, - {'prefix': {'run.key': 'test-windows10-64-ccov'}} - ]} - ]}, - 'limit': 500000, - 'groupby': ['run.suite.fullname'] - }) - - suites_data = r.json()['data'] + r = requests.post( + ACTIVEDATA_QUERY_URL, + json={ + "from": "unittest", + "where": { + "and": [ + {"eq": {"repo.branch.name": "mozilla-central"}}, + {"eq": {"repo.changeset.id12": revision[:12]}}, + { + "or": [ + {"prefix": {"run.key": "test-linux64-ccov"}}, + {"prefix": {"run.key": "test-windows10-64-ccov"}}, + ] + }, + ] + }, + "limit": 500000, + "groupby": ["run.suite.fullname"], + }, + ) + + suites_data = r.json()["data"] return [e[0] for e in suites_data] # Retrieve chunk -> tests mapping from ActiveData. def get_tests_chunks(revision, platform, suite): - if platform == 'linux': - run_key_prefix = 'test-linux64-ccov' - elif platform == 'windows': - run_key_prefix = 'test-windows10-64-ccov' - - r = requests.post(ACTIVEDATA_QUERY_URL, json={ - 'from': 'unittest', - 'where': {'and': [ - {'eq': {'repo.branch.name': 'mozilla-central'}}, - {'eq': {'repo.changeset.id12': revision[:12]}}, - {'eq': {'run.suite.fullname': suite}}, - {'prefix': {'run.key': run_key_prefix}}, - ]}, - 'limit': 50000, - 'select': ['result.test', 'run.key'] - }) - - return r.json()['data'] + if platform == "linux": + run_key_prefix = "test-linux64-ccov" + elif platform == "windows": + run_key_prefix = "test-windows10-64-ccov" + + r = requests.post( + ACTIVEDATA_QUERY_URL, + json={ + "from": "unittest", + "where": { + "and": [ + {"eq": {"repo.branch.name": "mozilla-central"}}, + {"eq": {"repo.changeset.id12": revision[:12]}}, + {"eq": {"run.suite.fullname": suite}}, + {"prefix": {"run.key": run_key_prefix}}, + ] + }, + "limit": 50000, + "select": ["result.test", "run.key"], + }, + ) + + return r.json()["data"] def group_by_20k(data): @@ -76,55 +95,70 @@ def group_by_20k(data): def get_test_coverage_suites(): - r = requests.post(ACTIVEDATA_QUERY_URL, json={ - 'from': 'coverage', - 'where': {'and': [ - {'eq': {'repo.branch.name': 'mozilla-central'}}, - {'gte': {'repo.push.date': {'date': 'today-week'}}}, - {'gt': {'source.file.total_covered': 0}}, - {'exists': 'test.name'} - ]}, - 'limit': 50000, - 'select': {'aggregate': 'cardinality', 'value': 'test.name'}, - 'groupby': ['test.suite'] - }) - - return r.json()['data'] + r = requests.post( + ACTIVEDATA_QUERY_URL, + json={ + "from": "coverage", + "where": { + "and": [ + {"eq": {"repo.branch.name": "mozilla-central"}}, + {"gte": {"repo.push.date": {"date": "today-week"}}}, + {"gt": {"source.file.total_covered": 0}}, + {"exists": "test.name"}, + ] + }, + "limit": 50000, + "select": {"aggregate": "cardinality", "value": "test.name"}, + "groupby": ["test.suite"], + }, + ) + + return r.json()["data"] def get_test_coverage_tests(suites): - r = requests.post(ACTIVEDATA_QUERY_URL, json={ - 'from': 'coverage', - 'where': {'and': [ - {'eq': {'repo.branch.name': 'mozilla-central'}}, - {'gte': {'repo.push.date': {'date': 'today-week'}}}, - {'gt': {'source.file.total_covered': 0}}, - {'exists': 'test.name'}, - {'in': {'test.suite': suites}} - ]}, - 'limit': 50000, - 'select': {'aggregate': 'cardinality', 'value': 'source.file.name'}, - 'groupby': ['test.name'] - }) - - return r.json()['data'] + r = requests.post( + ACTIVEDATA_QUERY_URL, + json={ + "from": "coverage", + "where": { + "and": [ + {"eq": {"repo.branch.name": "mozilla-central"}}, + {"gte": {"repo.push.date": {"date": "today-week"}}}, + {"gt": {"source.file.total_covered": 0}}, + {"exists": "test.name"}, + {"in": {"test.suite": suites}}, + ] + }, + "limit": 50000, + "select": {"aggregate": "cardinality", "value": "source.file.name"}, + "groupby": ["test.name"], + }, + ) + + return r.json()["data"] def get_test_coverage_files(tests): - r = requests.post(ACTIVEDATA_QUERY_URL, json={ - 'from': 'coverage', - 'where': {'and': [ - {'eq': {'repo.branch.name': 'mozilla-central'}}, - {'gte': {'repo.push.date': {'date': 'today-week'}}}, - {'gt': {'source.file.total_covered': 0}}, - {'exists': 'test.name'}, - {'in': {'test.name': tests}} - ]}, - 'limit': 50000, - 'select': ['source.file.name', 'test.name'] - }) - - return r.json()['data'] + r = requests.post( + ACTIVEDATA_QUERY_URL, + json={ + "from": "coverage", + "where": { + "and": [ + {"eq": {"repo.branch.name": "mozilla-central"}}, + {"gte": {"repo.push.date": {"date": "today-week"}}}, + {"gt": {"source.file.total_covered": 0}}, + {"exists": "test.name"}, + {"in": {"test.name": tests}}, + ] + }, + "limit": 50000, + "select": ["source.file.name", "test.name"], + }, + ) + + return r.json()["data"] def is_chunk_only_suite(suite): @@ -132,77 +166,97 @@ def is_chunk_only_suite(suite): if any(suite.startswith(prefix) for prefix in IGNORED_SUITE_PREFIXES): return False # Ignore suites supported by test-coverage. - if any(test_coverage_suite in suite for test_coverage_suite in TEST_COVERAGE_SUITES): + if any( + test_coverage_suite in suite for test_coverage_suite in TEST_COVERAGE_SUITES + ): return False return True -def generate(repo_dir, revision, artifactsHandler, out_dir='.'): - logger.info('Generating chunk mapping...') - sqlite_file = os.path.join(out_dir, 'chunk_mapping.sqlite') - tarxz_file = os.path.join(out_dir, 'chunk_mapping.tar.xz') +def generate(repo_dir, revision, artifactsHandler, out_dir="."): + logger.info("Generating chunk mapping...") + sqlite_file = os.path.join(out_dir, "chunk_mapping.sqlite") + tarxz_file = os.path.join(out_dir, "chunk_mapping.tar.xz") with sqlite3.connect(sqlite_file) as conn: - logger.info('Creating tables.') + logger.info("Creating tables.") c = conn.cursor() - c.execute('CREATE TABLE file_to_chunk (path text, platform text, chunk text)') - c.execute('CREATE TABLE chunk_to_test (platform text, chunk text, path text)') - c.execute('CREATE TABLE file_to_test (source text, test text)') + c.execute("CREATE TABLE file_to_chunk (path text, platform text, chunk text)") + c.execute("CREATE TABLE chunk_to_test (platform text, chunk text, path text)") + c.execute("CREATE TABLE file_to_test (source text, test text)") - logger.info('Populating file_to_test table.') + logger.info("Populating file_to_test table.") test_coverage_suites = get_test_coverage_suites() - logger.info('Found {} test suites.'.format(len(test_coverage_suites))) + logger.info("Found {} test suites.".format(len(test_coverage_suites))) for suites in group_by_20k(test_coverage_suites): test_coverage_tests = get_test_coverage_tests(suites) for tests in group_by_20k(test_coverage_tests): tests_files_data = get_test_coverage_files(tests) - source_names = tests_files_data['source.file.name'] - test_iter = enumerate(tests_files_data['test.name']) + source_names = tests_files_data["source.file.name"] + test_iter = enumerate(tests_files_data["test.name"]) source_test_iter = ((source_names[i], test) for i, test in test_iter) - c.executemany('INSERT INTO file_to_test VALUES (?,?)', source_test_iter) + c.executemany("INSERT INTO file_to_test VALUES (?,?)", source_test_iter) with ThreadPoolExecutor(max_workers=4) as executor: futures = {} for platform in PLATFORMS: - logger.info('Reading chunk coverage artifacts for {}.'.format(platform)) + logger.info("Reading chunk coverage artifacts for {}.".format(platform)) for chunk in artifactsHandler.get_chunks(platform): suite = taskcluster.get_suite(chunk) if not is_chunk_only_suite(suite): continue - assert chunk.strip() != '', 'chunk can not be an empty string' + assert chunk.strip() != "", "chunk can not be an empty string" artifacts = artifactsHandler.get(platform=platform, chunk=chunk) - assert len(artifacts) > 0, 'There should be at least one artifact' + assert len(artifacts) > 0, "There should be at least one artifact" - future = executor.submit(grcov.files_list, artifacts, source_dir=repo_dir) + future = executor.submit( + grcov.files_list, artifacts, source_dir=repo_dir + ) futures[future] = (platform, chunk) - logger.info('Populating chunk_to_test table for {}.'.format(platform)) + logger.info("Populating chunk_to_test table for {}.".format(platform)) for suite in get_suites(revision): if not is_chunk_only_suite(suite): continue tests_data = get_tests_chunks(revision, platform, suite) if len(tests_data) == 0: - logger.warn('No tests found for platform {} and suite {}.'.format(platform, suite)) + logger.warn( + "No tests found for platform {} and suite {}.".format( + platform, suite + ) + ) continue - logger.info('Adding tests for platform {} and suite {}'.format(platform, suite)) - task_names = tests_data['run.key'] - test_iter = enumerate(tests_data['result.test']) - chunk_test_iter = ((platform, taskcluster.get_chunk(task_names[i]), test) for i, test in test_iter) - c.executemany('INSERT INTO chunk_to_test VALUES (?,?,?)', chunk_test_iter) - - logger.info('Populating file_to_chunk table.') + logger.info( + "Adding tests for platform {} and suite {}".format( + platform, suite + ) + ) + task_names = tests_data["run.key"] + test_iter = enumerate(tests_data["result.test"]) + chunk_test_iter = ( + (platform, taskcluster.get_chunk(task_names[i]), test) + for i, test in test_iter + ) + c.executemany( + "INSERT INTO chunk_to_test VALUES (?,?,?)", chunk_test_iter + ) + + logger.info("Populating file_to_chunk table.") for future in concurrent.futures.as_completed(futures): (platform, chunk) = futures[future] files = future.result() - c.executemany('INSERT INTO file_to_chunk VALUES (?,?,?)', ((f, platform, chunk) for f in files)) + c.executemany( + "INSERT INTO file_to_chunk VALUES (?,?,?)", + ((f, platform, chunk) for f in files), + ) - logger.info('Writing the chunk mapping archive at {}.'.format(tarxz_file)) - with tarfile.open(tarxz_file, 'w:xz') as tar: + logger.info("Writing the chunk mapping archive at {}.".format(tarxz_file)) + with tarfile.open(tarxz_file, "w:xz") as tar: tar.add(sqlite_file, os.path.basename(sqlite_file)) diff --git a/bot/code_coverage_bot/cli.py b/bot/code_coverage_bot/cli.py index 1191695f9..ffbc83e73 100644 --- a/bot/code_coverage_bot/cli.py +++ b/bot/code_coverage_bot/cli.py @@ -14,41 +14,27 @@ def parse_cli(): - ''' + """ Setup CLI options parser - ''' - parser = argparse.ArgumentParser(description='Mozilla Code Coverage Bot') + """ + parser = argparse.ArgumentParser(description="Mozilla Code Coverage Bot") + parser.add_argument("--repository", default=os.environ.get("REPOSITORY")) + parser.add_argument("--revision", default=os.environ.get("REVISION")) parser.add_argument( - '--repository', - default=os.environ.get('REPOSITORY'), + "--cache-root", required=True, help="Cache root, used to pull changesets" ) parser.add_argument( - '--revision', - default=os.environ.get('REVISION'), + "--task-name-filter", + default="*", + help="Filter Taskcluster tasks using a glob expression", ) parser.add_argument( - '--cache-root', - required=True, - help='Cache root, used to pull changesets' - ) - parser.add_argument( - '--task-name-filter', - default='*', - help='Filter Taskcluster tasks using a glob expression', - ) - parser.add_argument( - '--taskcluster-secret', - help='Taskcluster Secret path', - default=os.environ.get('TASKCLUSTER_SECRET') - ) - parser.add_argument( - '--taskcluster-client-id', - help='Taskcluster Client ID', - ) - parser.add_argument( - '--taskcluster-access-token', - help='Taskcluster Access token', + "--taskcluster-secret", + help="Taskcluster Secret path", + default=os.environ.get("TASKCLUSTER_SECRET"), ) + parser.add_argument("--taskcluster-client-id", help="Taskcluster Client ID") + parser.add_argument("--taskcluster-access-token", help="Taskcluster Access token") return parser.parse_args() @@ -61,15 +47,16 @@ def main(): # Then load secrets secrets.load(args.taskcluster_secret) - init_logger(config.PROJECT_NAME, - PAPERTRAIL_HOST=secrets.get('PAPERTRAIL_HOST'), - PAPERTRAIL_PORT=secrets.get('PAPERTRAIL_PORT'), - SENTRY_DSN=secrets.get('SENTRY_DSN'), - ) + init_logger( + config.PROJECT_NAME, + PAPERTRAIL_HOST=secrets.get("PAPERTRAIL_HOST"), + PAPERTRAIL_PORT=secrets.get("PAPERTRAIL_PORT"), + SENTRY_DSN=secrets.get("SENTRY_DSN"), + ) c = CodeCov(args.repository, args.revision, args.task_name_filter, args.cache_root) c.go() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/bot/code_coverage_bot/codecov.py b/bot/code_coverage_bot/codecov.py index bbdafc60f..a10ab2fc9 100644 --- a/bot/code_coverage_bot/codecov.py +++ b/bot/code_coverage_bot/codecov.py @@ -28,35 +28,34 @@ logger = structlog.get_logger(__name__) -HG_BASE = 'https://hg.mozilla.org/' -MOZILLA_CENTRAL_REPOSITORY = '{}mozilla-central'.format(HG_BASE) -TRY_REPOSITORY = '{}try'.format(HG_BASE) +HG_BASE = "https://hg.mozilla.org/" +MOZILLA_CENTRAL_REPOSITORY = "{}mozilla-central".format(HG_BASE) +TRY_REPOSITORY = "{}try".format(HG_BASE) class CodeCov(object): - def __init__(self, repository, revision, task_name_filter, cache_root): # List of test-suite, sorted alphabetically. # This way, the index of a suite in the array should be stable enough. - self.suites = [ - 'web-platform-tests', - ] + self.suites = ["web-platform-tests"] self.cache_root = cache_root temp_dir = tempfile.mkdtemp() - self.artifacts_dir = os.path.join(temp_dir, 'ccov-artifacts') - self.ccov_reports_dir = os.path.join(temp_dir, 'code-coverage-reports') + self.artifacts_dir = os.path.join(temp_dir, "ccov-artifacts") + self.ccov_reports_dir = os.path.join(temp_dir, "code-coverage-reports") - self.index_service = taskcluster_config.get_service('index') + self.index_service = taskcluster_config.get_service("index") if revision is None: # Retrieve latest ingested revision self.repository = MOZILLA_CENTRAL_REPOSITORY try: - self.revision = uploader.gcp_latest('mozilla-central')[0]['revision'] + self.revision = uploader.gcp_latest("mozilla-central")[0]["revision"] except Exception as e: - logger.warn('Failed to retrieve the latest reports ingested: {}'.format(e)) + logger.warn( + "Failed to retrieve the latest reports ingested: {}".format(e) + ) raise self.from_pulse = False else: @@ -64,15 +63,17 @@ def __init__(self, repository, revision, task_name_filter, cache_root): self.revision = revision self.from_pulse = True - self.branch = self.repository[len(HG_BASE):] + self.branch = self.repository[len(HG_BASE) :] - assert os.path.isdir(cache_root), 'Cache root {} is not a dir.'.format(cache_root) + assert os.path.isdir(cache_root), "Cache root {} is not a dir.".format( + cache_root + ) self.repo_dir = os.path.join(cache_root, self.branch) - logger.info('Mercurial revision', revision=self.revision) + logger.info("Mercurial revision", revision=self.revision) task_ids = {} - for platform in ['linux', 'windows', 'android-test', 'android-emulator']: + for platform in ["linux", "windows", "android-test", "android-emulator"]: task = taskcluster.get_task(self.branch, self.revision, platform) # On try, developers might have requested to run only one platform, and we trust them. @@ -80,20 +81,27 @@ def __init__(self, repository, revision, task_name_filter, cache_root): # as they are unstable). if task is not None: task_ids[platform] = task - elif self.repository == MOZILLA_CENTRAL_REPOSITORY and not platform.startswith('android'): - raise Exception('Code coverage build failed and was not indexed.') - - self.artifactsHandler = ArtifactsHandler(task_ids, self.artifacts_dir, task_name_filter) + elif ( + self.repository == MOZILLA_CENTRAL_REPOSITORY + and not platform.startswith("android") + ): + raise Exception("Code coverage build failed and was not indexed.") + + self.artifactsHandler = ArtifactsHandler( + task_ids, self.artifacts_dir, task_name_filter + ) def clone_repository(self, repository, revision): - cmd = hglib.util.cmdbuilder('robustcheckout', - repository, - self.repo_dir, - purge=True, - sharebase='hg-shared', - upstream='https://hg.mozilla.org/mozilla-unified', - revision=revision, - networkattempts=7) + cmd = hglib.util.cmdbuilder( + "robustcheckout", + repository, + self.repo_dir, + purge=True, + sharebase="hg-shared", + upstream="https://hg.mozilla.org/mozilla-unified", + revision=revision, + networkattempts=7, + ) cmd.insert(0, hglib.HGPATH) @@ -102,7 +110,7 @@ def clone_repository(self, repository, revision): if proc.returncode: raise hglib.error.CommandError(cmd, proc.returncode, out, err) - logger.info('{} cloned'.format(repository)) + logger.info("{} cloned".format(repository)) def retrieve_source_and_artifacts(self): with ThreadPoolExecutorResult(max_workers=2) as executor: @@ -113,22 +121,20 @@ def retrieve_source_and_artifacts(self): executor.submit(self.clone_repository, self.repository, self.revision) def generate_covdir(self): - ''' + """ Build the covdir report using current artifacts - ''' + """ output = grcov.report( - self.artifactsHandler.get(), - source_dir=self.repo_dir, - out_format='covdir', + self.artifactsHandler.get(), source_dir=self.repo_dir, out_format="covdir" ) - logger.info('Covdir report generated successfully') + logger.info("Covdir report generated successfully") return json.loads(output) # This function is executed when the bot is triggered at the end of a mozilla-central build. def go_from_trigger_mozilla_central(self): # Check the covdir report does not already exists if uploader.gcp_covdir_exists(self.branch, self.revision): - logger.warn('Covdir report already on GCP') + logger.warn("Covdir report already on GCP") return self.retrieve_source_and_artifacts() @@ -136,37 +142,48 @@ def go_from_trigger_mozilla_central(self): # Check that all JavaScript files present in the coverage artifacts actually exist. # If they don't, there might be a bug in the LCOV rewriter. for artifact in self.artifactsHandler.get(): - if 'jsvm' not in artifact: + if "jsvm" not in artifact: continue - with zipfile.ZipFile(artifact, 'r') as zf: + with zipfile.ZipFile(artifact, "r") as zf: for file_name in zf.namelist(): - with zf.open(file_name, 'r') as fl: - source_files = [line[3:].decode('utf-8').rstrip() for line in fl if line.startswith(b'SF:')] - missing_files = [f for f in source_files if not os.path.exists(os.path.join(self.repo_dir, f))] + with zf.open(file_name, "r") as fl: + source_files = [ + line[3:].decode("utf-8").rstrip() + for line in fl + if line.startswith(b"SF:") + ] + missing_files = [ + f + for f in source_files + if not os.path.exists(os.path.join(self.repo_dir, f)) + ] if len(missing_files) != 0: - logger.warn(f'{missing_files} are present in coverage reports, but missing from the repository') + logger.warn( + f"{missing_files} are present in coverage reports, but missing from the repository" + ) report = self.generate_covdir() paths = uploader.covdir_paths(report) - expected_extensions = ['.js', '.cpp'] + expected_extensions = [".js", ".cpp"] for extension in expected_extensions: - assert any(path.endswith(extension) for path in paths), \ - 'No {} file in the generated report'.format(extension) + assert any( + path.endswith(extension) for path in paths + ), "No {} file in the generated report".format(extension) # Get pushlog and ask the backend to generate the coverage by changeset # data, which will be cached. with hgmo.HGMO(self.repo_dir) as hgmo_server: changesets = hgmo_server.get_automation_relevance_changesets(self.revision) - logger.info('Upload changeset coverage data to Phabricator') + logger.info("Upload changeset coverage data to Phabricator") phabricatorUploader = PhabricatorUploader(self.repo_dir, self.revision) changesets_coverage = phabricatorUploader.upload(report, changesets) uploader.gcp(self.branch, self.revision, report) - logger.info('Build uploaded on GCP') + logger.info("Build uploaded on GCP") notify_email(self.revision, changesets, changesets_coverage) # This function is executed when the bot is triggered at the end of a try build. @@ -176,49 +193,61 @@ def go_from_trigger_try(self): with hgmo.HGMO(server_address=TRY_REPOSITORY) as hgmo_server: changesets = hgmo_server.get_automation_relevance_changesets(self.revision) - if not any(parse_revision_id(changeset['desc']) is not None for changeset in changesets): - logger.info('None of the commits in the try push are linked to a Phabricator revision') + if not any( + parse_revision_id(changeset["desc"]) is not None for changeset in changesets + ): + logger.info( + "None of the commits in the try push are linked to a Phabricator revision" + ) return self.retrieve_source_and_artifacts() report = self.generate_covdir() - logger.info('Upload changeset coverage data to Phabricator') + logger.info("Upload changeset coverage data to Phabricator") phabricatorUploader.upload(report, changesets) # This function is executed when the bot is triggered via cron. def go_from_cron(self): self.retrieve_source_and_artifacts() - logger.info('Generating suite reports') + logger.info("Generating suite reports") os.makedirs(self.ccov_reports_dir, exist_ok=True) - suite_reports.generate(self.suites, self.artifactsHandler, self.ccov_reports_dir, self.repo_dir) + suite_reports.generate( + self.suites, self.artifactsHandler, self.ccov_reports_dir, self.repo_dir + ) - logger.info('Generating zero coverage reports') + logger.info("Generating zero coverage reports") zc = ZeroCov(self.repo_dir) zc.generate(self.artifactsHandler.get(), self.revision) - logger.info('Generating chunk mapping') + logger.info("Generating chunk mapping") chunk_mapping.generate(self.repo_dir, self.revision, self.artifactsHandler) # Index the task in the TaskCluster index at the given revision and as "latest". # Given that all tasks have the same rank, the latest task that finishes will # overwrite the "latest" entry. namespaces = [ - 'project.releng.services.project.{}.code_coverage_bot.{}'.format(secrets[secrets.APP_CHANNEL], self.revision), - 'project.releng.services.project.{}.code_coverage_bot.latest'.format(secrets[secrets.APP_CHANNEL]), + "project.releng.services.project.{}.code_coverage_bot.{}".format( + secrets[secrets.APP_CHANNEL], self.revision + ), + "project.releng.services.project.{}.code_coverage_bot.latest".format( + secrets[secrets.APP_CHANNEL] + ), ] for namespace in namespaces: self.index_service.insertTask( namespace, { - 'taskId': os.environ['TASK_ID'], - 'rank': 0, - 'data': {}, - 'expires': (datetime.utcnow() + timedelta(180)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'), - } + "taskId": os.environ["TASK_ID"], + "rank": 0, + "data": {}, + "expires": (datetime.utcnow() + timedelta(180)).strftime( + "%Y-%m-%dT%H:%M:%S.%fZ" + ), + }, ) def go(self): @@ -229,4 +258,4 @@ def go(self): elif self.repository == MOZILLA_CENTRAL_REPOSITORY: self.go_from_trigger_mozilla_central() else: - assert False, 'We shouldn\'t be here!' + assert False, "We shouldn't be here!" diff --git a/bot/code_coverage_bot/config.py b/bot/code_coverage_bot/config.py index 834e03fa8..1b08a10b1 100644 --- a/bot/code_coverage_bot/config.py +++ b/bot/code_coverage_bot/config.py @@ -3,4 +3,4 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -PROJECT_NAME = 'code-coverage-bot' +PROJECT_NAME = "code-coverage-bot" diff --git a/bot/code_coverage_bot/grcov.py b/bot/code_coverage_bot/grcov.py index cf6e418c0..b6f84faba 100644 --- a/bot/code_coverage_bot/grcov.py +++ b/bot/code_coverage_bot/grcov.py @@ -6,25 +6,33 @@ logger = structlog.get_logger(__name__) -def report(artifacts, source_dir=None, out_format='covdir', options=[]): - assert out_format in ('covdir', 'files', 'lcov', 'coveralls+'), 'Unsupported output format' - cmd = [ - 'grcov', - '-t', out_format, - ] +def report(artifacts, source_dir=None, out_format="covdir", options=[]): + assert out_format in ( + "covdir", + "files", + "lcov", + "coveralls+", + ), "Unsupported output format" + cmd = ["grcov", "-t", out_format] # Coveralls+ is only needed for zero-coverage reports - if out_format == 'coveralls+': - cmd.extend([ - '--service-name', 'TaskCluster', - '--commit-sha', 'unused', - '--token', 'unused', - '--service-job-number', '1', - ]) + if out_format == "coveralls+": + cmd.extend( + [ + "--service-name", + "TaskCluster", + "--commit-sha", + "unused", + "--token", + "unused", + "--service-job-number", + "1", + ] + ) if source_dir is not None: - cmd.extend(['-s', source_dir]) - cmd.append('--ignore-not-existing') + cmd.extend(["-s", source_dir]) + cmd.append("--ignore-not-existing") cmd.extend(artifacts) cmd.extend(options) @@ -32,11 +40,13 @@ def report(artifacts, source_dir=None, out_format='covdir', options=[]): try: return run_check(cmd) except Exception: - logger.error('Error while running grcov') + logger.error("Error while running grcov") raise def files_list(artifacts, source_dir=None): - options = ['--filter', 'covered', '--threads', '2'] - files = report(artifacts, source_dir=source_dir, out_format='files', options=options) - return files.decode('utf-8').splitlines() + options = ["--filter", "covered", "--threads", "2"] + files = report( + artifacts, source_dir=source_dir, out_format="files", options=options + ) + return files.decode("utf-8").splitlines() diff --git a/bot/code_coverage_bot/hgmo.py b/bot/code_coverage_bot/hgmo.py index db3d8d60d..d08052051 100644 --- a/bot/code_coverage_bot/hgmo.py +++ b/bot/code_coverage_bot/hgmo.py @@ -11,8 +11,8 @@ class HGMO(object): - PID_FILE = 'hgmo.pid' - SERVER_ADDRESS = 'http://localhost:8000' + PID_FILE = "hgmo.pid" + SERVER_ADDRESS = "http://localhost:8000" def __init__(self, repo_dir=None, server_address=None): assert (repo_dir is not None) ^ (server_address is not None) @@ -22,11 +22,10 @@ def __init__(self, repo_dir=None, server_address=None): else: self.server_address = HGMO.SERVER_ADDRESS self.repo_dir = repo_dir - self.pid_file = os.path.join(os.getcwd(), - HGMO.PID_FILE) + self.pid_file = os.path.join(os.getcwd(), HGMO.PID_FILE) def __get_pid(self): - with open(self.pid_file, 'r') as In: + with open(self.pid_file, "r") as In: pid = In.read() return int(pid) @@ -34,15 +33,14 @@ def __enter__(self): if self.repo_dir is None: return self - proc = subprocess.Popen(['hg', 'serve', - '--hgmo', - '--daemon', - '--pid-file', self.pid_file], - cwd=self.repo_dir, - stderr=subprocess.STDOUT) + proc = subprocess.Popen( + ["hg", "serve", "--hgmo", "--daemon", "--pid-file", self.pid_file], + cwd=self.repo_dir, + stderr=subprocess.STDOUT, + ) proc.wait() - logger.info('hgmo is running', pid=self.__get_pid()) + logger.info("hgmo is running", pid=self.__get_pid()) return self @@ -53,29 +51,28 @@ def __exit__(self, type, value, traceback): pid = self.__get_pid() os.killpg(os.getpgid(pid), signal.SIGTERM) os.remove(self.pid_file) - logger.info('hgmo has been killed') + logger.info("hgmo has been killed") def get_pushes(self, startID=None, changeset=None): assert startID is not None or changeset is not None - params = { - 'version': 2, - 'full': 1 - } + params = {"version": 2, "full": 1} if startID is not None: - params['startID'] = startID + params["startID"] = startID if changeset is not None: - params['changeset'] = changeset + params["changeset"] = changeset - r = requests.get('{}/json-pushes'.format(self.server_address), params=params) + r = requests.get("{}/json-pushes".format(self.server_address), params=params) r.raise_for_status() return r.json() def get_annotate(self, revision, path): - r = requests.get('{}/json-annotate/{}/{}'.format(self.server_address, revision, path)) + r = requests.get( + "{}/json-annotate/{}/{}".format(self.server_address, revision, path) + ) # 200 means success. # 404 means a file that doesn't exist (never existed or was removed). @@ -84,16 +81,22 @@ def get_annotate(self, revision, path): annotate_data = r.json() - if 'error' in annotate_data: - if 'not found in manifest' in annotate_data['error']: + if "error" in annotate_data: + if "not found in manifest" in annotate_data["error"]: # The file was removed. return None else: - raise Exception('Error while retrieving annotate data: {}'.format(annotate_data['error'])) + raise Exception( + "Error while retrieving annotate data: {}".format( + annotate_data["error"] + ) + ) - return annotate_data['annotate'] + return annotate_data["annotate"] def get_automation_relevance_changesets(self, changeset): - r = requests.get('{}/json-automationrelevance/{}'.format(self.server_address, changeset)) + r = requests.get( + "{}/json-automationrelevance/{}".format(self.server_address, changeset) + ) r.raise_for_status() - return r.json()['changesets'] + return r.json()["changesets"] diff --git a/bot/code_coverage_bot/notifier.py b/bot/code_coverage_bot/notifier.py index 89075abc5..4eabc661c 100644 --- a/bot/code_coverage_bot/notifier.py +++ b/bot/code_coverage_bot/notifier.py @@ -9,48 +9,52 @@ def notify_email(revision, changesets, changesets_coverage): - ''' + """ Send an email to admins when low coverage for new commits is detected - ''' - notify_service = taskcluster_config.get_service('notify') + """ + notify_service = taskcluster_config.get_service("notify") - content = '' + content = "" for changeset in changesets: - desc = changeset['desc'].split('\n')[0] + desc = changeset["desc"].split("\n")[0] - if any(text in desc for text in ['r=merge', 'a=merge']): + if any(text in desc for text in ["r=merge", "a=merge"]): continue - rev = changeset['node'] + rev = changeset["node"] # Lookup changeset coverage from phabricator uploader - rev_id = parse_revision_id(changeset['desc']) + rev_id = parse_revision_id(changeset["desc"]) if rev_id is None: continue coverage = changesets_coverage.get(rev_id) if coverage is None: - logger.warn('No coverage found', changeset=changeset) + logger.warn("No coverage found", changeset=changeset) continue # Calc totals for all files - covered = sum(c['lines_covered'] for c in coverage.values()) - added = sum(c['lines_added'] for c in coverage.values()) + covered = sum(c["lines_covered"] for c in coverage.values()) + added = sum(c["lines_added"] for c in coverage.values()) if covered < 0.2 * added: - content += '* [{}](https://firefox-code-coverage.herokuapp.com/#/changeset/{}): {} covered out of {} added.\n'.format(desc, rev, covered, added) # noqa + content += "* [{}](https://firefox-code-coverage.herokuapp.com/#/changeset/{}): {} covered out of {} added.\n".format( + desc, rev, covered, added + ) # noqa - if content == '': + if content == "": return elif len(content) > 102400: # Content is 102400 chars max - content = content[:102000] + '\n\n... Content max limit reached!' + content = content[:102000] + "\n\n... Content max limit reached!" for email in secrets[secrets.EMAIL_ADDRESSES]: - notify_service.email({ - 'address': email, - 'subject': 'Coverage patches for {}'.format(revision), - 'content': content, - 'template': 'fullscreen', - }) + notify_service.email( + { + "address": email, + "subject": "Coverage patches for {}".format(revision), + "content": content, + "template": "fullscreen", + } + ) return content diff --git a/bot/code_coverage_bot/phabricator.py b/bot/code_coverage_bot/phabricator.py index 7e3346076..ee40f428d 100644 --- a/bot/code_coverage_bot/phabricator.py +++ b/bot/code_coverage_bot/phabricator.py @@ -12,7 +12,9 @@ logger = structlog.get_logger(__name__) -PHABRICATOR_REVISION_REGEX = re.compile('Differential Revision: https://phabricator.services.mozilla.com/D([0-9]+)') +PHABRICATOR_REVISION_REGEX = re.compile( + "Differential Revision: https://phabricator.services.mozilla.com/D([0-9]+)" +) def parse_revision_id(desc): @@ -28,19 +30,19 @@ def __init__(self, repo_dir, revision): self.revision = revision def _find_coverage(self, report, path): - ''' + """ Find coverage value in a covdir report - ''' + """ assert isinstance(report, dict) - parts = path.split('/') + parts = path.split("/") for part in filter(None, parts): - if part not in report['children']: - logger.warn('Path {} not found in report'.format(path)) + if part not in report["children"]: + logger.warn("Path {} not found in report".format(path)) return - report = report['children'][part] + report = report["children"][part] - return report['coverage'] + return report["coverage"] def _build_coverage_map(self, annotate, coverage_record): # We can't use plain line numbers to map coverage data from the build changeset to the @@ -55,41 +57,41 @@ def _build_coverage_map(self, annotate, coverage_record): for data in annotate: # The line number at the build changeset. # Line numbers start from 1 in the annotate data, from 0 in the coverage data. - lineno = data['lineno'] - 1 + lineno = data["lineno"] - 1 # The line number when it was introduced. - orig_line = data['targetline'] + orig_line = data["targetline"] # The changeset when it was introduced. - orig_changeset = data['node'] + orig_changeset = data["node"] if lineno < len(coverage_record): - key = '{}-{}'.format(orig_changeset, orig_line) + key = "{}-{}".format(orig_changeset, orig_line) coverage_map[key] = coverage_record[lineno] return coverage_map def _apply_coverage_map(self, annotate, coverage_map): - phab_coverage_data = '' + phab_coverage_data = "" for data in annotate: # The line number when it was introduced. - orig_line = data['targetline'] + orig_line = data["targetline"] # The changeset when it was introduced. - orig_changeset = data['node'] + orig_changeset = data["node"] - key = '{}-{}'.format(orig_changeset, orig_line) + key = "{}-{}".format(orig_changeset, orig_line) if key in coverage_map: count = coverage_map[key] if count is None: # A non-executable line. - phab_coverage_data += 'N' + phab_coverage_data += "N" elif count > 0: - phab_coverage_data += 'C' + phab_coverage_data += "C" else: - phab_coverage_data += 'U' + phab_coverage_data += "U" else: # We couldn't find the original changeset-original line in the annotate data for the build changeset, # this means that this line has been overwritten by another changeset. - phab_coverage_data += 'X' + phab_coverage_data += "X" return phab_coverage_data @@ -99,14 +101,14 @@ def generate(self, report, changesets): with hgmo.HGMO(self.repo_dir) as hgmo_server: for changeset in changesets: # Retrieve the revision ID for this changeset. - revision_id = parse_revision_id(changeset['desc']) + revision_id = parse_revision_id(changeset["desc"]) if revision_id is None: continue results[revision_id] = {} # For each file... - for path in changeset['files']: + for path in changeset["files"]: # Retrieve the coverage data. coverage_record = self._find_coverage(report, path) if coverage_record is None: @@ -118,34 +120,38 @@ def generate(self, report, changesets): # This means the file has been removed by another changeset, but if this is the # case, then we shouldn't have a coverage record and so we should have *continue*d # earlier. - assert False, 'Failure to retrieve annotate data for the build changeset' + assert ( + False + ), "Failure to retrieve annotate data for the build changeset" # Build the coverage map from the annotate data and the coverage data of the build changeset. - coverage_map = self._build_coverage_map(build_annotate, coverage_record) + coverage_map = self._build_coverage_map( + build_annotate, coverage_record + ) # Retrieve the annotate data for the changeset of interest. - annotate = hgmo_server.get_annotate(changeset['node'], path) + annotate = hgmo_server.get_annotate(changeset["node"], path) if annotate is None: # This means the file has been removed by this changeset, and maybe was brought back by a following changeset. continue # List lines added by this patch lines_added = [ - line['lineno'] + line["lineno"] for line in build_annotate - if line['node'] == changeset['node'] + if line["node"] == changeset["node"] ] # Apply the coverage map on the annotate data of the changeset of interest. coverage = self._apply_coverage_map(annotate, coverage_map) results[revision_id][path] = { - 'lines_added': len(lines_added), - 'lines_covered': sum( - coverage[line-1] in ('C', 'X', 'N') + "lines_added": len(lines_added), + "lines_covered": sum( + coverage[line - 1] in ("C", "X", "N") for line in lines_added - if line-1 < len(coverage) + if line - 1 < len(coverage) ), - 'coverage': coverage, + "coverage": coverage, } return results @@ -154,27 +160,30 @@ def upload(self, report, changesets): results = self.generate(report, changesets) if secrets[secrets.PHABRICATOR_ENABLED]: - phabricator = PhabricatorAPI(secrets[secrets.PHABRICATOR_TOKEN], secrets[secrets.PHABRICATOR_URL]) + phabricator = PhabricatorAPI( + secrets[secrets.PHABRICATOR_TOKEN], secrets[secrets.PHABRICATOR_URL] + ) else: phabricator = None for rev_id, coverage in results.items(): # Only upload raw coverage data to Phabricator, not stats - coverage = { - path: cov['coverage'] - for path, cov in coverage.items() - } - logger.info('{} coverage: {}'.format(rev_id, coverage)) + coverage = {path: cov["coverage"] for path, cov in coverage.items()} + logger.info("{} coverage: {}".format(rev_id, coverage)) if not phabricator or not coverage: continue try: rev_data = phabricator.load_revision(rev_id=rev_id) - phabricator.upload_coverage_results(rev_data['fields']['diffPHID'], coverage) + phabricator.upload_coverage_results( + rev_data["fields"]["diffPHID"], coverage + ) # XXX: This is only necessary until https://bugzilla.mozilla.org/show_bug.cgi?id=1487843 is resolved. - phabricator.upload_lint_results(rev_data['fields']['diffPHID'], BuildState.Pass, []) + phabricator.upload_lint_results( + rev_data["fields"]["diffPHID"], BuildState.Pass, [] + ) except PhabricatorRevisionNotFoundException: - logger.warn('Phabricator revision not found', rev_id=rev_id) + logger.warn("Phabricator revision not found", rev_id=rev_id) return results diff --git a/bot/code_coverage_bot/secrets.py b/bot/code_coverage_bot/secrets.py index 542d3a856..5540661ca 100644 --- a/bot/code_coverage_bot/secrets.py +++ b/bot/code_coverage_bot/secrets.py @@ -8,13 +8,13 @@ class Secrets(dict): - EMAIL_ADDRESSES = 'EMAIL_ADDRESSES' - APP_CHANNEL = 'APP_CHANNEL' - BACKEND_HOST = 'BACKEND_HOST' - PHABRICATOR_ENABLED = 'PHABRICATOR_ENABLED' - PHABRICATOR_URL = 'PHABRICATOR_URL' - PHABRICATOR_TOKEN = 'PHABRICATOR_TOKEN' - GOOGLE_CLOUD_STORAGE = 'GOOGLE_CLOUD_STORAGE' + EMAIL_ADDRESSES = "EMAIL_ADDRESSES" + APP_CHANNEL = "APP_CHANNEL" + BACKEND_HOST = "BACKEND_HOST" + PHABRICATOR_ENABLED = "PHABRICATOR_ENABLED" + PHABRICATOR_URL = "PHABRICATOR_URL" + PHABRICATOR_TOKEN = "PHABRICATOR_TOKEN" + GOOGLE_CLOUD_STORAGE = "GOOGLE_CLOUD_STORAGE" def load(self, taskcluster_secret): taskcluster_config.load_secrets( diff --git a/bot/code_coverage_bot/suite_reports.py b/bot/code_coverage_bot/suite_reports.py index f00736b44..46e88c7da 100644 --- a/bot/code_coverage_bot/suite_reports.py +++ b/bot/code_coverage_bot/suite_reports.py @@ -13,26 +13,37 @@ def generate(suites, artifactsHandler, ccov_reports_dir, repo_dir): for suite in suites: - output = grcov.report(artifactsHandler.get(suite=suite), out_format='lcov') + output = grcov.report(artifactsHandler.get(suite=suite), out_format="lcov") - info_file = os.path.join(ccov_reports_dir, '%s.info' % suite) + info_file = os.path.join(ccov_reports_dir, "%s.info" % suite) - with open(info_file, 'wb') as f: + with open(info_file, "wb") as f: f.write(output) suite_dir = os.path.join(ccov_reports_dir, suite) - run_check([ - 'genhtml', - '-o', suite_dir, - '--show-details', '--highlight', '--ignore-errors', 'source', - '--legend', info_file, - '--prefix', repo_dir - ], cwd=repo_dir) + run_check( + [ + "genhtml", + "-o", + suite_dir, + "--show-details", + "--highlight", + "--ignore-errors", + "source", + "--legend", + info_file, + "--prefix", + repo_dir, + ], + cwd=repo_dir, + ) os.remove(info_file) - with tarfile.open(os.path.join(ccov_reports_dir, '%s.tar.xz' % suite), 'w:xz') as tar: + with tarfile.open( + os.path.join(ccov_reports_dir, "%s.tar.xz" % suite), "w:xz" + ) as tar: tar.add(suite_dir, arcname=suite) shutil.rmtree(suite_dir) - logger.info('Suite report generated', suite=suite) + logger.info("Suite report generated", suite=suite) diff --git a/bot/code_coverage_bot/taskcluster.py b/bot/code_coverage_bot/taskcluster.py index dac7a7286..7a57d6320 100644 --- a/bot/code_coverage_bot/taskcluster.py +++ b/bot/code_coverage_bot/taskcluster.py @@ -11,8 +11,8 @@ taskcluster_config = TaskclusterConfig() -index_base = 'https://index.taskcluster.net/v1/' -queue_base = 'https://queue.taskcluster.net/v1/' +index_base = "https://index.taskcluster.net/v1/" +queue_base = "https://queue.taskcluster.net/v1/" class TaskclusterException(Exception): @@ -20,67 +20,70 @@ class TaskclusterException(Exception): def get_task(branch, revision, platform): - if platform == 'linux': - platform_name = 'linux64-ccov-debug' - product = 'firefox' - elif platform == 'windows': - platform_name = 'win64-ccov-debug' - product = 'firefox' - elif platform == 'android-test': - platform_name = 'android-test-ccov' - product = 'mobile' - elif platform == 'android-emulator': - platform_name = 'android-api-16-ccov-debug' - product = 'mobile' + if platform == "linux": + platform_name = "linux64-ccov-debug" + product = "firefox" + elif platform == "windows": + platform_name = "win64-ccov-debug" + product = "firefox" + elif platform == "android-test": + platform_name = "android-test-ccov" + product = "mobile" + elif platform == "android-emulator": + platform_name = "android-api-16-ccov-debug" + product = "mobile" else: - raise TaskclusterException('Unsupported platform: %s' % platform) - - r = requests.get(index_base + 'task/gecko.v2.{}.revision.{}.{}.{}'.format(branch, revision, product, platform_name)) + raise TaskclusterException("Unsupported platform: %s" % platform) + + r = requests.get( + index_base + + "task/gecko.v2.{}.revision.{}.{}.{}".format( + branch, revision, product, platform_name + ) + ) task = r.json() if r.status_code == requests.codes.ok: - return task['taskId'] + return task["taskId"] else: - if task['code'] == 'ResourceNotFound': + if task["code"] == "ResourceNotFound": return None else: - raise TaskclusterException('Unknown TaskCluster index error.') + raise TaskclusterException("Unknown TaskCluster index error.") def get_task_details(task_id): - r = requests.get(queue_base + 'task/{}'.format(task_id)) + r = requests.get(queue_base + "task/{}".format(task_id)) r.raise_for_status() return r.json() def get_task_status(task_id): - r = requests.get(queue_base + 'task/{}/status'.format(task_id)) + r = requests.get(queue_base + "task/{}/status".format(task_id)) r.raise_for_status() return r.json() def get_task_artifacts(task_id): - r = requests.get(queue_base + 'task/{}/artifacts'.format(task_id)) + r = requests.get(queue_base + "task/{}/artifacts".format(task_id)) r.raise_for_status() - return r.json()['artifacts'] + return r.json()["artifacts"] def get_tasks_in_group(group_id): - list_url = queue_base + 'task-group/{}/list'.format(group_id) + list_url = queue_base + "task-group/{}/list".format(group_id) - r = requests.get(list_url, params={ - 'limit': 200 - }) + r = requests.get(list_url, params={"limit": 200}) r.raise_for_status() reply = r.json() - tasks = reply['tasks'] - while 'continuationToken' in reply: - r = requests.get(list_url, params={ - 'limit': 200, - 'continuationToken': reply['continuationToken'] - }) + tasks = reply["tasks"] + while "continuationToken" in reply: + r = requests.get( + list_url, + params={"limit": 200, "continuationToken": reply["continuationToken"]}, + ) r.raise_for_status() reply = r.json() - tasks += reply['tasks'] + tasks += reply["tasks"] return tasks @@ -89,62 +92,65 @@ def download_artifact(artifact_path, task_id, artifact_name): return artifact_path def perform_download(): - r = requests.get(queue_base + 'task/{}/artifacts/{}'.format(task_id, artifact_name), stream=True) + r = requests.get( + queue_base + "task/{}/artifacts/{}".format(task_id, artifact_name), + stream=True, + ) r.raise_for_status() - with open(artifact_path, 'wb') as f: + with open(artifact_path, "wb") as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) - if artifact_path.endswith('.zip') and not is_zipfile(artifact_path): - raise BadZipFile('File is not a zip file') + if artifact_path.endswith(".zip") and not is_zipfile(artifact_path): + raise BadZipFile("File is not a zip file") retry(perform_download) BUILD_PLATFORMS = [ - 'build-linux64-ccov/debug', - 'build-win64-ccov/debug', - 'build-android-test-ccov/opt', + "build-linux64-ccov/debug", + "build-win64-ccov/debug", + "build-android-test-ccov/opt", ] TEST_PLATFORMS = [ - 'test-linux64-ccov/debug', - 'test-windows10-64-ccov/debug', - 'test-android-em-4.3-arm7-api-16-ccov/debug', + "test-linux64-ccov/debug", + "test-windows10-64-ccov/debug", + "test-android-em-4.3-arm7-api-16-ccov/debug", ] + BUILD_PLATFORMS def is_coverage_task(task): - return any(task['task']['metadata']['name'].startswith(t) for t in TEST_PLATFORMS) + return any(task["task"]["metadata"]["name"].startswith(t) for t in TEST_PLATFORMS) def get_chunk(name): # Some tests are run on build machines, we define a placeholder chunk for those. if name in BUILD_PLATFORMS: - return 'build' + return "build" for t in TEST_PLATFORMS: if name.startswith(t): - name = name[len(t) + 1:] + name = name[len(t) + 1 :] break - return '-'.join([p for p in name.split('-') if p != 'e10s']) + return "-".join([p for p in name.split("-") if p != "e10s"]) def get_suite(chunk_name): - return '-'.join([p for p in chunk_name.split('-') if not p.isdigit()]) + return "-".join([p for p in chunk_name.split("-") if not p.isdigit()]) def get_platform(name): - if 'linux' in name: - return 'linux' - elif 'win' in name: - return 'windows' - elif 'android-test' in name: - return 'android-test' - elif 'android-em' in name: - return 'android-emulator' + if "linux" in name: + return "linux" + elif "win" in name: + return "windows" + elif "android-test" in name: + return "android-test" + elif "android-em" in name: + return "android-emulator" else: - raise Exception('Unknown platform') + raise Exception("Unknown platform") diff --git a/bot/code_coverage_bot/uploader.py b/bot/code_coverage_bot/uploader.py index 28debda68..fe573453a 100644 --- a/bot/code_coverage_bot/uploader.py +++ b/bot/code_coverage_bot/uploader.py @@ -12,22 +12,22 @@ from code_coverage_tools.gcp import get_bucket logger = structlog.get_logger(__name__) -GCP_COVDIR_PATH = '{repository}/{revision}.json.zstd' +GCP_COVDIR_PATH = "{repository}/{revision}.json.zstd" def gcp(repository, revision, report): - ''' + """ Upload a grcov raw report on Google Cloud Storage * Compress with zstandard * Upload on bucket using revision in name * Trigger ingestion on channel's backend - ''' + """ assert isinstance(report, dict) bucket = get_bucket(secrets[secrets.GOOGLE_CLOUD_STORAGE]) # Compress report compressor = zstd.ZstdCompressor() - archive = compressor.compress(json.dumps(report).encode('utf-8')) + archive = compressor.compress(json.dumps(report).encode("utf-8")) # Upload archive path = GCP_COVDIR_PATH.format(repository=repository, revision=revision) @@ -35,11 +35,11 @@ def gcp(repository, revision, report): blob.upload_from_string(archive) # Update headers - blob.content_type = 'application/json' - blob.content_encoding = 'zstd' + blob.content_type = "application/json" + blob.content_encoding = "zstd" blob.patch() - logger.info('Uploaded {} on {}'.format(path, bucket)) + logger.info("Uploaded {} on {}".format(path, bucket)) # Trigger ingestion on backend retry(lambda: gcp_ingest(repository, revision), retries=10, wait_between_retries=60) @@ -48,9 +48,9 @@ def gcp(repository, revision, report): def gcp_covdir_exists(repository, revision): - ''' + """ Check if a covdir report exists on the Google Cloud Storage bucket - ''' + """ bucket = get_bucket(secrets[secrets.GOOGLE_CLOUD_STORAGE]) path = GCP_COVDIR_PATH.format(repository=repository, revision=revision) blob = bucket.blob(path) @@ -58,56 +58,58 @@ def gcp_covdir_exists(repository, revision): def gcp_ingest(repository, revision): - ''' + """ The GCP report ingestion is triggered remotely on a backend by making a simple HTTP request on the /v2/path endpoint By specifying the exact new revision processed, the backend will download automatically the new report. - ''' - params = { - 'repository': repository, - 'changeset': revision, - } + """ + params = {"repository": repository, "changeset": revision} backend_host = secrets[secrets.BACKEND_HOST] - logger.info('Ingesting report on backend', host=backend_host, repository=repository, revision=revision) - resp = requests.get('{}/v2/path'.format(backend_host), params=params) + logger.info( + "Ingesting report on backend", + host=backend_host, + repository=repository, + revision=revision, + ) + resp = requests.get("{}/v2/path".format(backend_host), params=params) resp.raise_for_status() - logger.info('Successfully ingested report on backend !') + logger.info("Successfully ingested report on backend !") return resp def gcp_latest(repository): - ''' + """ List the latest reports ingested on the backend - ''' - params = { - 'repository': repository, - } + """ + params = {"repository": repository} backend_host = secrets[secrets.BACKEND_HOST] - resp = requests.get('{}/v2/latest'.format(backend_host), params=params) + resp = requests.get("{}/v2/latest".format(backend_host), params=params) resp.raise_for_status() return resp.json() def covdir_paths(report): - ''' + """ Load a covdir report and recursively list all the paths - ''' + """ assert isinstance(report, dict) - def _extract(obj, base_path=''): + def _extract(obj, base_path=""): out = [] - children = obj.get('children', {}) + children = obj.get("children", {}) if children: # Recursive on folder files - out += itertools.chain(*[ - _extract(child, os.path.join(base_path, obj['name'])) - for child in children.values() - ]) + out += itertools.chain( + *[ + _extract(child, os.path.join(base_path, obj["name"])) + for child in children.values() + ] + ) else: # Add full filename - out.append(os.path.join(base_path, obj['name'])) + out.append(os.path.join(base_path, obj["name"])) return out diff --git a/bot/code_coverage_bot/utils.py b/bot/code_coverage_bot/utils.py index f38c22d7e..570075bb0 100644 --- a/bot/code_coverage_bot/utils.py +++ b/bot/code_coverage_bot/utils.py @@ -12,16 +12,14 @@ class RunException(Exception): - ''' + """ Exception used to stop retrying - ''' + """ -def retry(operation, - retries=5, - wait_between_retries=30, - exception_to_break=RunException, - ): +def retry( + operation, retries=5, wait_between_retries=30, exception_to_break=RunException +): while True: try: return operation() @@ -36,9 +34,9 @@ def retry(operation, def hide_secrets(text, secrets): if type(text) is bytes: - encode_secret, xxx = lambda x: bytes(x, encoding='utf-8'), b'XXX' + encode_secret, xxx = lambda x: bytes(x, encoding="utf-8"), b"XXX" elif type(text) is str: - encode_secret, xxx = lambda x: x, 'XXX' + encode_secret, xxx = lambda x: x, "XXX" else: return text @@ -50,13 +48,13 @@ def hide_secrets(text, secrets): def run_check(command, **kwargs): - ''' + """ Run a command through subprocess and check for output - ''' + """ assert isinstance(command, list) if len(command) == 0: - raise RunException('Can\'t run an empty command.') + raise RunException("Can't run an empty command.") _kwargs = dict( stdin=subprocess.DEVNULL, # no interactions @@ -65,19 +63,19 @@ def run_check(command, **kwargs): ) _kwargs.update(kwargs) - log.debug('Running command', command=' ' .join(command), kwargs=_kwargs) + log.debug("Running command", command=" ".join(command), kwargs=_kwargs) with subprocess.Popen(command, **_kwargs) as proc: output, error = proc.communicate() if proc.returncode != 0: log.info( - f'Command failed with code: {proc.returncode}', - command=' ' .join(command), + f"Command failed with code: {proc.returncode}", + command=" ".join(command), output=output, error=error, ) - raise RunException(f'`{command[0]}` failed with code: {proc.returncode}.') + raise RunException(f"`{command[0]}` failed with code: {proc.returncode}.") return output diff --git a/bot/code_coverage_bot/zero_coverage.py b/bot/code_coverage_bot/zero_coverage.py index 5a4128e26..3c1ecc805 100644 --- a/bot/code_coverage_bot/zero_coverage.py +++ b/bot/code_coverage_bot/zero_coverage.py @@ -14,10 +14,10 @@ class ZeroCov(object): - DATE_FORMAT = '%Y-%m-%d' + DATE_FORMAT = "%Y-%m-%d" def __init__(self, repo_dir): - assert os.path.isdir(repo_dir), '{} is not a directory'.format(repo_dir) + assert os.path.isdir(repo_dir), "{} is not a directory".format(repo_dir) self.repo_dir = repo_dir def get_file_size(self, filename): @@ -38,7 +38,7 @@ def get_pushlog(self): with hgmo.HGMO(self.repo_dir) as hgmo_server: pushlog = hgmo_server.get_pushes(startID=0) - logger.info('Pushlog retrieved') + logger.info("Pushlog retrieved") return pushlog @@ -49,88 +49,101 @@ def get_fileinfo(self, filenames): res = {} filenames = set(filenames) - for push in pushlog['pushes'].values(): - pushdate = self.get_utc_from_timestamp(push['date']) - for chgset in push['changesets']: - for f in chgset['files']: + for push in pushlog["pushes"].values(): + pushdate = self.get_utc_from_timestamp(push["date"]) + for chgset in push["changesets"]: + for f in chgset["files"]: if f not in filenames: continue if f not in res: - res[f] = {'size': self.get_file_size(f), - 'first_push_date': pushdate, - 'last_push_date': pushdate, - 'commits': 1} + res[f] = { + "size": self.get_file_size(f), + "first_push_date": pushdate, + "last_push_date": pushdate, + "commits": 1, + } else: r = res[f] - if pushdate < r['first_push_date']: - r['first_push_date'] = pushdate - elif pushdate > r['last_push_date']: - r['last_push_date'] = pushdate - r['commits'] += 1 + if pushdate < r["first_push_date"]: + r["first_push_date"] = pushdate + elif pushdate > r["last_push_date"]: + r["last_push_date"] = pushdate + r["commits"] += 1 # stringify the pushdates for v in res.values(): - v['first_push_date'] = self.get_date_str(v['first_push_date']) - v['last_push_date'] = self.get_date_str(v['last_push_date']) + v["first_push_date"] = self.get_date_str(v["first_push_date"]) + v["last_push_date"] = self.get_date_str(v["last_push_date"]) # add default data for files which are not in res for f in filenames: if f in res: continue - res[f] = {'size': 0, - 'first_push_date': '', - 'last_push_date': '', - 'commits': 0} + res[f] = { + "size": 0, + "first_push_date": "", + "last_push_date": "", + "commits": 0, + } return res - def generate(self, artifacts, hgrev, out_dir='.'): - report = grcov.report(artifacts, out_format='coveralls+', source_dir=self.repo_dir) - report = json.loads(report.decode('utf-8')) # Decoding is only necessary until Python 3.6. + def generate(self, artifacts, hgrev, out_dir="."): + report = grcov.report( + artifacts, out_format="coveralls+", source_dir=self.repo_dir + ) + report = json.loads( + report.decode("utf-8") + ) # Decoding is only necessary until Python 3.6. zero_coverage_files = set() zero_coverage_functions = {} - for sf in report['source_files']: - name = sf['name'] + for sf in report["source_files"]: + name = sf["name"] # For C/C++ source files, we can consider a file as being uncovered # when all its source lines are uncovered. - all_lines_uncovered = all(c is None or c == 0 for c in sf['coverage']) + all_lines_uncovered = all(c is None or c == 0 for c in sf["coverage"]) # For JavaScript files, we can't do the same, as the top-level is always # executed, even if it just contains declarations. So, we need to check if # all its functions, except the top-level, are uncovered. all_functions_uncovered = True - for f in sf['functions']: - f_name = f['name'] - if f_name == 'top-level': + for f in sf["functions"]: + f_name = f["name"] + if f_name == "top-level": continue - if not f['exec']: + if not f["exec"]: if name in zero_coverage_functions: - zero_coverage_functions[name].append(f['name']) + zero_coverage_functions[name].append(f["name"]) else: - zero_coverage_functions[name] = [f['name']] + zero_coverage_functions[name] = [f["name"]] else: all_functions_uncovered = False - if all_lines_uncovered or (len(sf['functions']) > 1 and all_functions_uncovered): + if all_lines_uncovered or ( + len(sf["functions"]) > 1 and all_functions_uncovered + ): zero_coverage_files.add(name) - os.makedirs(os.path.join(out_dir, 'zero_coverage_functions'), exist_ok=True) + os.makedirs(os.path.join(out_dir, "zero_coverage_functions"), exist_ok=True) filesinfo = self.get_fileinfo(zero_coverage_functions.keys()) zero_coverage_info = [] for fname, functions in zero_coverage_functions.items(): info = filesinfo[fname] - info.update({'name': fname, - 'funcs': len(functions), - 'uncovered': fname in zero_coverage_files}) + info.update( + { + "name": fname, + "funcs": len(functions), + "uncovered": fname in zero_coverage_files, + } + ) zero_coverage_info.append(info) - zero_coverage_report = {'hg_revision': hgrev, - 'files': zero_coverage_info} + zero_coverage_report = {"hg_revision": hgrev, "files": zero_coverage_info} - with open(os.path.join(out_dir, 'zero_coverage_report.json'), 'w') as f: + with open(os.path.join(out_dir, "zero_coverage_report.json"), "w") as f: json.dump(zero_coverage_report, f) diff --git a/bot/requirements-dev.txt b/bot/requirements-dev.txt index ba921e8a1..acdcd8a84 100644 --- a/bot/requirements-dev.txt +++ b/bot/requirements-dev.txt @@ -1,6 +1,5 @@ -flake8==3.7.8 -flake8-isort==2.7.0 jsonschema==3.0.2 json-e==3.0.0 +pre-commit==1.18.0 pytest==5.0.1 responses==0.10.6 diff --git a/bot/setup.cfg b/bot/setup.cfg deleted file mode 100644 index 618244776..000000000 --- a/bot/setup.cfg +++ /dev/null @@ -1,66 +0,0 @@ -[flake8] -max-line-length = 159 -exclude=nix_run_setup.py,migrations/,build/,dist/ - -# https://pypi.python.org/pypi/flake8-coding -accept-encodings = utf-8 - -# https://pypi.python.org/pypi/flake8-quotes -inline-quotes = single -multiline-quotes = ''' -docstring-quotes = ''' - -# https://pypi.python.org/pypi/isort -[isort] -line_length = 159 -force_single_line = True -default_section=FIRSTPARTY -sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER -known_first_party = - code_coverage_tools - code_coverage_bot - code_coverage_tools - codecoverage_backend - -[mypy] -# Specify the target platform details in config, so your developers are -# free to run mypy on Windows, Linux, or macOS and get consistent -# results. -# python version will be autodetected -#python_version=3.6 -platform=linux - -# flake8-mypy expects the two following for sensible formatting -show_column_numbers=True -show_error_context=False - -# do not follow imports (except for ones found in typeshed) -follow_imports=skip - -# since we're ignoring imports, writing .mypy_cache doesn't make any sense -cache_dir=/dev/null - -# suppress errors about unsatisfied imports -ignore_missing_imports=True - -# allow untyped calls as a consequence of the options above -disallow_untyped_calls=False - -# allow returning Any as a consequence of the options above -warn_return_any=False - -# treat Optional per PEP 484 -strict_optional=True - -# ensure all execution paths are returning -warn_no_return=True - -# lint-style cleanliness for typing needs to be disabled; returns more errors -# than the full run. -warn_redundant_casts=False -warn_unused_ignores=False - -# The following are off by default since they're too noisy. -# Flip them on if you feel adventurous. -disallow_untyped_defs=False -check_untyped_defs=False diff --git a/bot/setup.py b/bot/setup.py index 94d16a788..da0ed4315 100644 --- a/bot/setup.py +++ b/bot/setup.py @@ -11,39 +11,41 @@ def read_requirements(file_): with open(file_) as f: for line in f.readlines(): line = line.strip() - if line.startswith('-e ') or line.startswith('http://') or line.startswith('https://'): - extras = '' - if '[' in line: - extras = '[' + line.split('[')[1].split(']')[0] + ']' - line = line.split('#')[1].split('egg=')[1] + extras - elif line == '' or line.startswith('#') or line.startswith('-'): + if ( + line.startswith("-e ") + or line.startswith("http://") + or line.startswith("https://") + ): + extras = "" + if "[" in line: + extras = "[" + line.split("[")[1].split("]")[0] + "]" + line = line.split("#")[1].split("egg=")[1] + extras + elif line == "" or line.startswith("#") or line.startswith("-"): continue - line = line.split('#')[0].strip() + line = line.split("#")[0].strip() lines.append(line) return sorted(list(set(lines))) -with open('VERSION') as f: +with open("VERSION") as f: VERSION = f.read().strip() setuptools.setup( - name='code_coverage_bot', + name="code_coverage_bot", version=VERSION, - description='Listens to bugzilla entries, executes' - 'some static analysis and reports results.', - author='Mozilla Release Management', - author_email='release-mgmt-analysis@mozilla.com', - url='https://shipit.mozilla-releng.net', - tests_require=read_requirements('requirements-dev.txt'), - install_requires=read_requirements('requirements.txt'), + description="Listens to bugzilla entries, executes" + "some static analysis and reports results.", + author="Mozilla Release Management", + author_email="release-mgmt-analysis@mozilla.com", + url="https://shipit.mozilla-releng.net", + tests_require=read_requirements("requirements-dev.txt"), + install_requires=read_requirements("requirements.txt"), packages=setuptools.find_packages(), include_package_data=True, zip_safe=False, - license='MPL2', + license="MPL2", entry_points={ - 'console_scripts': [ - 'code-coverage-bot = code_coverage_bot.cli:main', - ] + "console_scripts": ["code-coverage-bot = code_coverage_bot.cli:main"] }, ) diff --git a/bot/tests/conftest.py b/bot/tests/conftest.py index 901776e77..db2f0163e 100644 --- a/bot/tests/conftest.py +++ b/bot/tests/conftest.py @@ -14,7 +14,41 @@ import pytest import responses -FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures') +FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures") + + +def copy_pushlog_database(remote, local): + shutil.copyfile( + os.path.join(remote, ".hg/pushlog2.db"), os.path.join(local, ".hg/pushlog2.db") + ) + + +def add_file(hg, repo_dir, name, contents): + path = os.path.join(repo_dir, name) + + with open(path, "w") as f: + f.write(contents) + + hg.add(files=[bytes(path, "ascii")]) + + +def commit(hg, diff_rev=None): + commit_message = "Commit {}".format(hg.status()) + if diff_rev is not None: + commit_message += "Differential Revision: https://phabricator.services.mozilla.com/D{}".format( + diff_rev + ) + + i, revision = hg.commit(message=commit_message, user="Moz Illa ") + + return str(revision, "ascii") + + +def changesets(repo_dir, revision): + from code_coverage_bot import hgmo + + with hgmo.HGMO(repo_dir) as hgmo_server: + return hgmo_server.get_automation_relevance_changesets(revision) def load_file(path): @@ -27,88 +61,90 @@ def load_json(path): return json.load(f) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def TASK_NOT_FOUND(): - return load_json('task_not_found.json') + return load_json("task_not_found.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LATEST_LINUX(): - return load_json('latest_linux.json') + return load_json("latest_linux.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LINUX_TASK_ID(): - return 'MCIO1RWTRu2GhiE7_jILBw' + return "MCIO1RWTRu2GhiE7_jILBw" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LINUX_TASK(): - return load_json('linux_task.json') + return load_json("linux_task.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LINUX_TASK_STATUS(): - return load_json('linux_task_status.json') + return load_json("linux_task_status.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LINUX_TASK_ARTIFACTS(): - return load_json('linux_task_artifacts.json') + return load_json("linux_task_artifacts.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LATEST_WIN(): - return load_json('latest_win.json') + return load_json("latest_win.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def WIN_TASK_ID(): - return 'PWnw3h-QQSiqxO83MDzKag' + return "PWnw3h-QQSiqxO83MDzKag" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def GROUP_TASKS_1(): - return load_json('task-group_1.json') + return load_json("task-group_1.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def GROUP_TASKS_2(): - return load_json('task-group_2.json') + return load_json("task-group_2.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def LINUX_TEST_TASK_ARTIFACTS(): - return load_json('linux_test_task_artifacts.json') + return load_json("linux_test_task_artifacts.json") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def TEST_TASK_FROM_GROUP(): - return load_json('test_task_from_group.json') + return load_json("test_task_from_group.json") @pytest.fixture() def MERCURIAL_COMMIT(): - hg_commit = '0d1e55d87931fe70ec1d007e886bcd58015ff770' + hg_commit = "0d1e55d87931fe70ec1d007e886bcd58015ff770" responses.add( responses.GET, - f'https://mapper.mozilla-releng.net/gecko-dev/rev/hg/{hg_commit}', - body=f'40e8eb46609dcb8780764774ec550afff1eed3a5 {hg_commit}', - status=200) + f"https://mapper.mozilla-releng.net/gecko-dev/rev/hg/{hg_commit}", + body=f"40e8eb46609dcb8780764774ec550afff1eed3a5 {hg_commit}", + status=200, + ) return hg_commit @pytest.fixture() def GITHUB_COMMIT(): - git_commit = '40e8eb46609dcb8780764774ec550afff1eed3a5' + git_commit = "40e8eb46609dcb8780764774ec550afff1eed3a5" responses.add( responses.GET, - f'https://mapper.mozilla-releng.net/gecko-dev/rev/git/{git_commit}', - body=f'{git_commit} 0d1e55d87931fe70ec1d007e886bcd58015ff770', - status=200) + f"https://mapper.mozilla-releng.net/gecko-dev/rev/git/{git_commit}", + body=f"{git_commit} 0d1e55d87931fe70ec1d007e886bcd58015ff770", + status=200, + ) return git_commit @@ -116,95 +152,98 @@ def GITHUB_COMMIT(): @contextmanager def generate_coverage_artifact(name): with tempfile.TemporaryDirectory() as tmp_dir: - zip_path = os.path.join(tmp_dir, name + '.zip') - with zipfile.ZipFile(zip_path, 'w') as z: + zip_path = os.path.join(tmp_dir, name + ".zip") + with zipfile.ZipFile(zip_path, "w") as z: z.write(os.path.join(FIXTURES_DIR, name)) yield zip_path -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def grcov_artifact(): - with generate_coverage_artifact('grcov.info') as f: + with generate_coverage_artifact("grcov.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def jsvm_artifact(): - with generate_coverage_artifact('jsvm.info') as f: + with generate_coverage_artifact("jsvm.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def grcov_existing_file_artifact(): - with generate_coverage_artifact('grcov_existing_file.info') as f: + with generate_coverage_artifact("grcov_existing_file.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def grcov_uncovered_artifact(): - with generate_coverage_artifact('grcov_uncovered_file.info') as f: + with generate_coverage_artifact("grcov_uncovered_file.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def jsvm_uncovered_artifact(): - with generate_coverage_artifact('jsvm_uncovered_file.info') as f: + with generate_coverage_artifact("jsvm_uncovered_file.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def grcov_uncovered_function_artifact(): - with generate_coverage_artifact('grcov_uncovered_function.info') as f: + with generate_coverage_artifact("grcov_uncovered_function.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def jsvm_uncovered_function_artifact(): - with generate_coverage_artifact('jsvm_uncovered_function.info') as f: + with generate_coverage_artifact("jsvm_uncovered_function.info") as f: yield f -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mock_secrets(): from code_coverage_bot.secrets import secrets - secrets.update({ - 'PHABRICATOR_ENABLED': True, - 'PHABRICATOR_URL': 'http://phabricator.test/api/', - 'PHABRICATOR_TOKEN': 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', - 'EMAIL_ADDRESSES': ['admin@allizom.org'] - }) + + secrets.update( + { + "PHABRICATOR_ENABLED": True, + "PHABRICATOR_URL": "http://phabricator.test/api/", + "PHABRICATOR_TOKEN": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + "EMAIL_ADDRESSES": ["admin@allizom.org"], + } + ) @pytest.fixture() def codecov_commits(): - dir_path = os.path.join(FIXTURES_DIR, 'codecov_commits') + dir_path = os.path.join(FIXTURES_DIR, "codecov_commits") for fname in os.listdir(dir_path): with open(os.path.join(dir_path, fname)) as f: data = json.load(f) - status = data['meta']['status'] + status = data["meta"]["status"] responses.add( responses.GET, - f'https://codecov.io/api/gh/marco-c/gecko-dev/commit/{fname[:-5]}', + f"https://codecov.io/api/gh/marco-c/gecko-dev/commit/{fname[:-5]}", json=data, - status=status + status=status, ) @pytest.fixture def fake_hg_repo(tmpdir): tmp_path = tmpdir.strpath - dest = os.path.join(tmp_path, 'repos') - local = os.path.join(dest, 'local') - remote = os.path.join(dest, 'remote') + dest = os.path.join(tmp_path, "repos") + local = os.path.join(dest, "local") + remote = os.path.join(dest, "remote") for d in [local, remote]: os.makedirs(d) hglib.init(d) - os.environ['USER'] = 'app' + os.environ["USER"] = "app" hg = hglib.open(local) - responses.add_passthru('http://localhost:8000') + responses.add_passthru("http://localhost:8000") yield hg, local, remote @@ -216,89 +255,87 @@ def fake_hg_repo_with_contents(fake_hg_repo): hg, local, remote = fake_hg_repo files = [ - {'name': 'mozglue/build/dummy.cpp', - 'size': 1}, - {'name': 'toolkit/components/osfile/osfile.jsm', - 'size': 2}, - {'name': 'js/src/jit/JIT.cpp', - 'size': 3}, - {'name': 'toolkit/components/osfile/osfile-win.jsm', - 'size': 4}, - {'name': 'js/src/jit/BitSet.cpp', - 'size': 5}, - {'name': 'code_coverage_bot/cli.py', - 'size': 6}, + {"name": "mozglue/build/dummy.cpp", "size": 1}, + {"name": "toolkit/components/osfile/osfile.jsm", "size": 2}, + {"name": "js/src/jit/JIT.cpp", "size": 3}, + {"name": "toolkit/components/osfile/osfile-win.jsm", "size": 4}, + {"name": "js/src/jit/BitSet.cpp", "size": 5}, + {"name": "code_coverage_bot/cli.py", "size": 6}, ] - for c in '?!': + for c in "?!": for f in files: - fname = os.path.join(local, f['name']) + fname = os.path.join(local, f["name"]) parent = os.path.dirname(fname) if not os.path.exists(parent): os.makedirs(parent) - with open(fname, 'w') as Out: - Out.write(c * f['size']) - hg.add(files=[bytes(fname, 'ascii')]) - hg.commit(message=f'Commit file {fname} with {c} inside', - user='Moz Illa ') - hg.push(dest=bytes(remote, 'ascii')) + with open(fname, "w") as Out: + Out.write(c * f["size"]) + hg.add(files=[bytes(fname, "ascii")]) + hg.commit( + message=f"Commit file {fname} with {c} inside", + user="Moz Illa ", + ) + hg.push(dest=bytes(remote, "ascii")) - shutil.copyfile(os.path.join(remote, '.hg/pushlog2.db'), - os.path.join(local, '.hg/pushlog2.db')) + shutil.copyfile( + os.path.join(remote, ".hg/pushlog2.db"), os.path.join(local, ".hg/pushlog2.db") + ) return local @pytest.fixture def mock_phabricator(): - ''' + """ Mock phabricator authentication process - ''' + """ + def _response(name): - path = os.path.join(FIXTURES_DIR, f'phabricator_{name}.json') + path = os.path.join(FIXTURES_DIR, f"phabricator_{name}.json") assert os.path.exists(path) return open(path).read() responses.add( responses.POST, - 'http://phabricator.test/api/user.whoami', - body=_response('auth'), - content_type='application/json', + "http://phabricator.test/api/user.whoami", + body=_response("auth"), + content_type="application/json", ) responses.add( responses.POST, - 'http://phabricator.test/api/differential.revision.search', - body=_response('revision_search'), - content_type='application/json', + "http://phabricator.test/api/differential.revision.search", + body=_response("revision_search"), + content_type="application/json", ) responses.add( responses.POST, - 'http://phabricator.test/api/harbormaster.queryautotargets', - body=_response('harbormaster_queryautotargets'), - content_type='application/json', + "http://phabricator.test/api/harbormaster.queryautotargets", + body=_response("harbormaster_queryautotargets"), + content_type="application/json", ) responses.add( responses.POST, - 'http://phabricator.test/api/harbormaster.sendmessage', - body=_response('harbormaster_sendmessage'), - content_type='application/json', + "http://phabricator.test/api/harbormaster.sendmessage", + body=_response("harbormaster_sendmessage"), + content_type="application/json", ) responses.add( responses.POST, - 'http://phabricator.test/api/harbormaster.queryautotargets', - body=_response('harbormaster_queryautotargets_lint'), - content_type='application/json', + "http://phabricator.test/api/harbormaster.queryautotargets", + body=_response("harbormaster_queryautotargets_lint"), + content_type="application/json", ) responses.add( responses.POST, - 'http://phabricator.test/api/harbormaster.sendmessage', - body=_response('harbormaster_sendmessage_lint'), - content_type='application/json', + "http://phabricator.test/api/harbormaster.sendmessage", + body=_response("harbormaster_sendmessage_lint"), + content_type="application/json", ) @@ -306,65 +343,64 @@ def _response(name): def fake_source_dir(tmpdir): tmpdir_path = tmpdir.strpath - os.makedirs(os.path.join(tmpdir_path, 'code_coverage_bot')) + os.makedirs(os.path.join(tmpdir_path, "code_coverage_bot")) - with open(os.path.join(tmpdir_path, 'code_coverage_bot', 'cli.py'), 'w') as f: - f.write('1\n2\n') + with open(os.path.join(tmpdir_path, "code_coverage_bot", "cli.py"), "w") as f: + f.write("1\n2\n") return tmpdir_path @pytest.fixture def mock_taskcluster(): - ''' + """ Mock a taskcluster proxy usage - ''' + """ from code_coverage_bot.taskcluster import taskcluster_config responses.add( responses.POST, - 'http://taskcluster.test/api/notify/v1/email', - body='{}', - content_type='application/json', + "http://taskcluster.test/api/notify/v1/email", + body="{}", + content_type="application/json", ) - taskcluster_config.options = { - 'rootUrl': 'http://taskcluster.test', - } + taskcluster_config.options = {"rootUrl": "http://taskcluster.test"} def covdir_report(codecov): - ''' + """ Convert source files to covdir format - ''' + """ assert isinstance(codecov, dict) - assert 'source_files' in codecov + assert "source_files" in codecov out = {} - for cov in codecov['source_files']: - assert '/' not in cov['name'] - coverage = cov['coverage'] + for cov in codecov["source_files"]: + assert "/" not in cov["name"] + coverage = cov["coverage"] total = len(coverage) covered = sum(l is not None and l > 0 for l in coverage) - out[cov['name']] = { - 'children': {}, - 'name': cov['name'], - 'coverage': coverage, - 'coveragePercent': 100.0 * covered / total, - 'linesCovered': covered, - 'linesMissed': total - covered, - 'linesTotal': total, + out[cov["name"]] = { + "children": {}, + "name": cov["name"], + "coverage": coverage, + "coveragePercent": 100.0 * covered / total, + "linesCovered": covered, + "linesMissed": total - covered, + "linesTotal": total, } # Covdir has a root level def _sum(name): return sum(c[name] for c in out.values()) + return { - 'children': out, - 'name': 'src', - 'coverage': [], - 'coveragePercent': _sum('coveragePercent') / len(out) if out else 0, - 'linesCovered': _sum('linesCovered'), - 'linesMissed': _sum('linesMissed'), - 'linesTotal': _sum('linesTotal'), + "children": out, + "name": "src", + "coverage": [], + "coveragePercent": _sum("coveragePercent") / len(out) if out else 0, + "linesCovered": _sum("linesCovered"), + "linesMissed": _sum("linesMissed"), + "linesTotal": _sum("linesTotal"), } diff --git a/bot/tests/mercurial.py b/bot/tests/mercurial.py deleted file mode 100644 index 1749e1c3a..000000000 --- a/bot/tests/mercurial.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import shutil - -from code_coverage_bot import hgmo - - -def copy_pushlog_database(remote, local): - shutil.copyfile(os.path.join(remote, '.hg/pushlog2.db'), - os.path.join(local, '.hg/pushlog2.db')) - - -def add_file(hg, repo_dir, name, contents): - path = os.path.join(repo_dir, name) - - with open(path, 'w') as f: - f.write(contents) - - hg.add(files=[bytes(path, 'ascii')]) - - -def commit(hg, diff_rev=None): - commit_message = 'Commit {}'.format(hg.status()) - if diff_rev is not None: - commit_message += 'Differential Revision: https://phabricator.services.mozilla.com/D{}'.format(diff_rev) - - i, revision = hg.commit(message=commit_message, - user='Moz Illa ') - - return str(revision, 'ascii') - - -def changesets(repo_dir, revision): - with hgmo.HGMO(repo_dir) as hgmo_server: - return hgmo_server.get_automation_relevance_changesets(revision) diff --git a/bot/tests/test__system.py b/bot/tests/test__system.py index 2244db00e..9c4a3a25b 100644 --- a/bot/tests/test__system.py +++ b/bot/tests/test__system.py @@ -6,32 +6,32 @@ def test_mercurial(): - ''' + """ Test mercurial versions & extensions - ''' - assert shutil.which('hg'), 'Missing mercurial' + """ + assert shutil.which("hg"), "Missing mercurial" # Check mercurial version - output = run_check(['hg', 'version', '-T', '{ver}']) - assert output.decode('utf-8').startswith('4.8') + output = run_check(["hg", "version", "-T", "{ver}"]) + assert output.decode("utf-8").startswith("4.8") # Check needed extensions - output = run_check(['hg', 'version', '-T', '{extensions}']) - extensions = output.decode('utf-8').split('\n') - assert 'hgmo' in extensions - assert 'pushlog' in extensions - assert 'robustcheckout' in extensions + output = run_check(["hg", "version", "-T", "{extensions}"]) + extensions = output.decode("utf-8").split("\n") + assert "hgmo" in extensions + assert "pushlog" in extensions + assert "robustcheckout" in extensions def test_grcov(): - ''' + """ Test grcov is available on the system - ''' - assert shutil.which('grcov'), 'Missing grcov' + """ + assert shutil.which("grcov"), "Missing grcov" def test_genhtml(): - ''' + """ Test genhtml is available on the system - ''' - assert shutil.which('genhtml'), 'Missing genhtml' + """ + assert shutil.which("genhtml"), "Missing genhtml" diff --git a/bot/tests/test_artifacts.py b/bot/tests/test_artifacts.py index 85cdc02cd..26e993fc8 100644 --- a/bot/tests/test_artifacts.py +++ b/bot/tests/test_artifacts.py @@ -10,39 +10,43 @@ from code_coverage_bot.artifacts import ArtifactsHandler FILES = [ - 'windows_mochitest-1_code-coverage-jsvm.info', - 'linux_mochitest-2_code-coverage-grcov.zip', - 'windows_xpcshell-7_code-coverage-jsvm.info', - 'linux_xpcshell-7_code-coverage-grcov.zip', - 'linux_xpcshell-3_code-coverage-grcov.zip', - 'windows_cppunit_code-coverage-grcov.zip', - 'linux_firefox-ui-functional-remote_code-coverage-jsvm.info', + "windows_mochitest-1_code-coverage-jsvm.info", + "linux_mochitest-2_code-coverage-grcov.zip", + "windows_xpcshell-7_code-coverage-jsvm.info", + "linux_xpcshell-7_code-coverage-grcov.zip", + "linux_xpcshell-3_code-coverage-grcov.zip", + "windows_cppunit_code-coverage-grcov.zip", + "linux_firefox-ui-functional-remote_code-coverage-jsvm.info", ] @pytest.fixture def FAKE_ARTIFACTS_DIR(tmpdir): for f in FILES: - open(os.path.join(tmpdir.strpath, f), 'w') + open(os.path.join(tmpdir.strpath, f), "w") return tmpdir.strpath def test_generate_path(FAKE_ARTIFACTS_DIR): a = ArtifactsHandler([], parent_dir=FAKE_ARTIFACTS_DIR) - artifact_jsvm = {'name': 'code-coverage-jsvm.info'} - artifact_grcov = {'name': 'code-coverage-grcov.zip'} - assert os.path.join(a.parent_dir, 'linux_xpcshell-3_code-coverage-jsvm.info') == a.generate_path('linux', 'xpcshell-3', artifact_jsvm) - assert os.path.join(a.parent_dir, 'windows_cppunit_code-coverage-grcov.zip') == a.generate_path('windows', 'cppunit', artifact_grcov) + artifact_jsvm = {"name": "code-coverage-jsvm.info"} + artifact_grcov = {"name": "code-coverage-grcov.zip"} + assert os.path.join( + a.parent_dir, "linux_xpcshell-3_code-coverage-jsvm.info" + ) == a.generate_path("linux", "xpcshell-3", artifact_jsvm) + assert os.path.join( + a.parent_dir, "windows_cppunit_code-coverage-grcov.zip" + ) == a.generate_path("windows", "cppunit", artifact_grcov) def test_get_chunks(FAKE_ARTIFACTS_DIR): a = ArtifactsHandler([], parent_dir=FAKE_ARTIFACTS_DIR) - assert a.get_chunks('windows') == { - 'mochitest-1', 'xpcshell-7', 'cppunit', - } - assert a.get_chunks('linux') == { - 'mochitest-2', 'xpcshell-3', 'xpcshell-7', - 'firefox-ui-functional-remote', + assert a.get_chunks("windows") == {"mochitest-1", "xpcshell-7", "cppunit"} + assert a.get_chunks("linux") == { + "mochitest-2", + "xpcshell-3", + "xpcshell-7", + "firefox-ui-functional-remote", } @@ -52,49 +56,60 @@ def add_dir(files): a = ArtifactsHandler([], parent_dir=FAKE_ARTIFACTS_DIR) assert set(a.get()) == add_dir(FILES) - assert set(a.get(suite='mochitest')) == add_dir([ - 'windows_mochitest-1_code-coverage-jsvm.info', - 'linux_mochitest-2_code-coverage-grcov.zip' - ]) - assert set(a.get(chunk='xpcshell-7')) == add_dir([ - 'windows_xpcshell-7_code-coverage-jsvm.info', - 'linux_xpcshell-7_code-coverage-grcov.zip' - ]) - assert set(a.get(chunk='cppunit')) == add_dir([ - 'windows_cppunit_code-coverage-grcov.zip' - ]) - assert set(a.get(platform='windows')) == add_dir([ - 'windows_mochitest-1_code-coverage-jsvm.info', - 'windows_xpcshell-7_code-coverage-jsvm.info', - 'windows_cppunit_code-coverage-grcov.zip', - ]) - assert set(a.get(platform='linux', chunk='xpcshell-7')) == add_dir([ - 'linux_xpcshell-7_code-coverage-grcov.zip' - ]) - - with pytest.raises(Exception, match='suite and chunk can\'t both have a value'): - a.get(chunk='xpcshell-7', suite='mochitest') - - -@mock.patch('code_coverage_bot.taskcluster.get_task_artifacts') -@mock.patch('code_coverage_bot.taskcluster.download_artifact') -def test_download(mocked_download_artifact, mocked_get_task_artifact, TEST_TASK_FROM_GROUP, LINUX_TEST_TASK_ARTIFACTS): + assert set(a.get(suite="mochitest")) == add_dir( + [ + "windows_mochitest-1_code-coverage-jsvm.info", + "linux_mochitest-2_code-coverage-grcov.zip", + ] + ) + assert set(a.get(chunk="xpcshell-7")) == add_dir( + [ + "windows_xpcshell-7_code-coverage-jsvm.info", + "linux_xpcshell-7_code-coverage-grcov.zip", + ] + ) + assert set(a.get(chunk="cppunit")) == add_dir( + ["windows_cppunit_code-coverage-grcov.zip"] + ) + assert set(a.get(platform="windows")) == add_dir( + [ + "windows_mochitest-1_code-coverage-jsvm.info", + "windows_xpcshell-7_code-coverage-jsvm.info", + "windows_cppunit_code-coverage-grcov.zip", + ] + ) + assert set(a.get(platform="linux", chunk="xpcshell-7")) == add_dir( + ["linux_xpcshell-7_code-coverage-grcov.zip"] + ) + + with pytest.raises(Exception, match="suite and chunk can't both have a value"): + a.get(chunk="xpcshell-7", suite="mochitest") + + +@mock.patch("code_coverage_bot.taskcluster.get_task_artifacts") +@mock.patch("code_coverage_bot.taskcluster.download_artifact") +def test_download( + mocked_download_artifact, + mocked_get_task_artifact, + TEST_TASK_FROM_GROUP, + LINUX_TEST_TASK_ARTIFACTS, +): a = ArtifactsHandler([]) - mocked_get_task_artifact.return_value = LINUX_TEST_TASK_ARTIFACTS['artifacts'] + mocked_get_task_artifact.return_value = LINUX_TEST_TASK_ARTIFACTS["artifacts"] a.download(TEST_TASK_FROM_GROUP) assert mocked_get_task_artifact.call_count == 1 assert mocked_download_artifact.call_count == 2 assert mocked_download_artifact.call_args_list[0] == mock.call( - 'ccov-artifacts/linux_mochitest-devtools-chrome-4_code-coverage-grcov.zip', - 'AN1M9SW0QY6DZT6suL3zlQ', - 'public/test_info/code-coverage-grcov.zip', + "ccov-artifacts/linux_mochitest-devtools-chrome-4_code-coverage-grcov.zip", + "AN1M9SW0QY6DZT6suL3zlQ", + "public/test_info/code-coverage-grcov.zip", ) assert mocked_download_artifact.call_args_list[1] == mock.call( - 'ccov-artifacts/linux_mochitest-devtools-chrome-4_code-coverage-jsvm.zip', - 'AN1M9SW0QY6DZT6suL3zlQ', - 'public/test_info/code-coverage-jsvm.zip', + "ccov-artifacts/linux_mochitest-devtools-chrome-4_code-coverage-jsvm.zip", + "AN1M9SW0QY6DZT6suL3zlQ", + "public/test_info/code-coverage-jsvm.zip", ) @@ -104,25 +119,23 @@ def test_download(mocked_download_artifact, mocked_get_task_artifact, TEST_TASK_ def _group_tasks(): task_state_groups = [ [ - ('test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4', 'exception'), - ('test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4', 'failed'), - ('test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4', 'completed'), + ("test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4", "exception"), + ("test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4", "failed"), + ("test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4", "completed"), ], [ - ('test-windows10-64-ccov/debug-xpcshell-4', 'exception'), - ('test-windows10-64-ccov/debug-xpcshell-4', 'failed'), + ("test-windows10-64-ccov/debug-xpcshell-4", "exception"), + ("test-windows10-64-ccov/debug-xpcshell-4", "failed"), ], [ - ('test-windows10-64-ccov/debug-talos-dromaeojs-e10s', 'failed'), - ('test-windows10-64-ccov/debug-talos-dromaeojs-e10s', 'completed'), + ("test-windows10-64-ccov/debug-talos-dromaeojs-e10s", "failed"), + ("test-windows10-64-ccov/debug-talos-dromaeojs-e10s", "completed"), ], [ - ('test-linux64-ccov/debug-cppunit', 'exception'), - ('test-linux64-ccov/debug-cppunit', 'completed'), + ("test-linux64-ccov/debug-cppunit", "exception"), + ("test-linux64-ccov/debug-cppunit", "completed"), ], - [ - ('test-linux64-stylo-disabled/debug-crashtest-e10s', 'completed'), - ] + [("test-linux64-stylo-disabled/debug-crashtest-e10s", "completed")], ] # Transform a task_name and state into an object like the ones returned by Taskcluster. @@ -130,46 +143,59 @@ def build_task(task_state): task_name = task_state[0] state = task_state[1] return { - 'status': { - 'taskId': task_name + '-' + state, - 'state': state, - }, - 'task': { - 'metadata': { - 'name': task_name - }, - } + "status": {"taskId": task_name + "-" + state, "state": state}, + "task": {"metadata": {"name": task_name}}, } # Generate all possible permutations of task_name - state. - task_state_groups_permutations = [list(itertools.permutations(task_state_group)) for task_state_group in task_state_groups] + task_state_groups_permutations = [ + list(itertools.permutations(task_state_group)) + for task_state_group in task_state_groups + ] # Generate the product of all possible permutations. for ordering in itertools.product(*task_state_groups_permutations): yield { - 'taskGroupId': 'aPt9FbIdQwmhwDIPDYLuaw', - 'tasks': [build_task(task_state) for sublist in ordering for task_state in sublist], + "taskGroupId": "aPt9FbIdQwmhwDIPDYLuaw", + "tasks": [ + build_task(task_state) for sublist in ordering for task_state in sublist + ], } @responses.activate -def test_download_all(LINUX_TASK_ID, LINUX_TASK, GROUP_TASKS_1, GROUP_TASKS_2, FAKE_ARTIFACTS_DIR): - responses.add(responses.GET, f'https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}', json=LINUX_TASK, status=200) +def test_download_all( + LINUX_TASK_ID, LINUX_TASK, GROUP_TASKS_1, GROUP_TASKS_2, FAKE_ARTIFACTS_DIR +): + responses.add( + responses.GET, + f"https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}", + json=LINUX_TASK, + status=200, + ) for group_tasks in _group_tasks(): - responses.add(responses.GET, 'https://queue.taskcluster.net/v1/task-group/aPt9FbIdQwmhwDIPDYLuaw/list', json=group_tasks, status=200) + responses.add( + responses.GET, + "https://queue.taskcluster.net/v1/task-group/aPt9FbIdQwmhwDIPDYLuaw/list", + json=group_tasks, + status=200, + ) - a = ArtifactsHandler({'linux': LINUX_TASK_ID}, parent_dir=FAKE_ARTIFACTS_DIR) + a = ArtifactsHandler({"linux": LINUX_TASK_ID}, parent_dir=FAKE_ARTIFACTS_DIR) downloaded = set() def mock_download(task): - downloaded.add(task['status']['taskId']) + downloaded.add(task["status"]["taskId"]) + a.download = mock_download a.download_all() - assert downloaded == set([ - 'test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4-completed', - 'test-windows10-64-ccov/debug-xpcshell-4-failed', - 'test-linux64-ccov/debug-cppunit-completed', - ]) + assert downloaded == set( + [ + "test-linux64-ccov/debug-mochitest-devtools-chrome-e10s-4-completed", + "test-windows10-64-ccov/debug-xpcshell-4-failed", + "test-linux64-ccov/debug-cppunit-completed", + ] + ) diff --git a/bot/tests/test_chunk_mapping.py b/bot/tests/test_chunk_mapping.py index be9bbea22..f1eddfca9 100644 --- a/bot/tests/test_chunk_mapping.py +++ b/bot/tests/test_chunk_mapping.py @@ -11,36 +11,41 @@ @pytest.fixture -def fake_artifacts_handler(grcov_artifact, jsvm_artifact, grcov_existing_file_artifact, grcov_uncovered_function_artifact): +def fake_artifacts_handler( + grcov_artifact, + jsvm_artifact, + grcov_existing_file_artifact, + grcov_uncovered_function_artifact, +): class FakeArtifactsHandler(object): def __init__(self): pass def get_chunks(self, platform): - return {'chunk1', 'chunk2'} + return {"chunk1", "chunk2"} def get(self, platform=None, suite=None, chunk=None): - if platform == 'linux' and chunk == 'chunk1': + if platform == "linux" and chunk == "chunk1": return [grcov_artifact] # js/src/jit/BitSet.cpp - elif platform == 'linux' and chunk == 'chunk2': + elif platform == "linux" and chunk == "chunk2": return [jsvm_artifact] # toolkit/components/osfile/osfile.jsm - elif platform == 'windows' and chunk == 'chunk1': + elif platform == "windows" and chunk == "chunk1": return [grcov_existing_file_artifact] # code_coverage_bot/cli.py - elif platform == 'windows' and chunk == 'chunk2': + elif platform == "windows" and chunk == "chunk2": return [grcov_uncovered_function_artifact] # js/src/jit/JIT.cpp return FakeArtifactsHandler() def assert_file_to_test(c, source_path, test_path): - c.execute('SELECT test FROM file_to_test WHERE source=?', (source_path,)) + c.execute("SELECT test FROM file_to_test WHERE source=?", (source_path,)) results = c.fetchall() assert len(results) == 1 assert results[0][0] == test_path def assert_file_to_chunk(c, path, platform, chunk): - c.execute('SELECT platform, chunk FROM file_to_chunk WHERE path=?', (path,)) + c.execute("SELECT platform, chunk FROM file_to_chunk WHERE path=?", (path,)) results = c.fetchall() assert len(results) == 1 assert results[0][0] == platform @@ -48,7 +53,9 @@ def assert_file_to_chunk(c, path, platform, chunk): def assert_chunk_to_test(c, platform, chunk, tests): - c.execute('SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?', (platform, chunk)) + c.execute( + "SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?", (platform, chunk) + ) results = c.fetchall() assert len(results) == len(tests) assert set([e[0] for e in results]) == set(tests) @@ -59,115 +66,118 @@ def test_zero_coverage(tmpdir, fake_artifacts_handler, fake_hg_repo_with_content tmp_path = tmpdir.strpath def request_callback(request): - payload = json.loads(request.body.decode('utf-8')) + payload = json.loads(request.body.decode("utf-8")) print(payload) - if payload['from'] == 'coverage': - if 'groupby' in payload: - if payload['groupby'] == ['test.suite']: - data = [ - ['chrome', 2], - ['jsreftest', 1], + if payload["from"] == "coverage": + if "groupby" in payload: + if payload["groupby"] == ["test.suite"]: + data = [["chrome", 2], ["jsreftest", 1]] + elif payload["groupby"] == ["test.name"]: + assert payload["where"]["and"][4]["in"]["test.suite"] == [ + "chrome", + "jsreftest", ] - elif payload['groupby'] == ['test.name']: - assert payload['where']['and'][4]['in']['test.suite'] == ['chrome', 'jsreftest'] data = [ - ['js/xpconnect/tests/unit/test_lazyproxy.js', 60], - ['netwerk/test/unit/test_substituting_protocol_handler.js', 55], + ["js/xpconnect/tests/unit/test_lazyproxy.js", 60], + ["netwerk/test/unit/test_substituting_protocol_handler.js", 55], ] else: - assert False, 'Unexpected groupby' - elif 'select' in payload: - if payload['select'] == ['source.file.name', 'test.name']: + assert False, "Unexpected groupby" + elif "select" in payload: + if payload["select"] == ["source.file.name", "test.name"]: data = { - 'source.file.name': [ - 'js/src/vm/TraceLogging.cpp', - 'gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp', + "source.file.name": [ + "js/src/vm/TraceLogging.cpp", + "gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp", ], - 'test.name': [ - 'js/xpconnect/tests/unit/test_lazyproxy.js', - 'netwerk/test/unit/test_substituting_protocol_handler.js', + "test.name": [ + "js/xpconnect/tests/unit/test_lazyproxy.js", + "netwerk/test/unit/test_substituting_protocol_handler.js", ], } else: - assert False, 'Unexpected select' + assert False, "Unexpected select" else: - assert False, 'Unexpected payload' - elif payload['from'] == 'unittest': - if 'groupby' in payload: - if payload['groupby'] == ['run.suite.fullname']: - data = [ - ['marionette', 3590], - ['gtest', 2078], - ['talos', 3000], - ] + assert False, "Unexpected payload" + elif payload["from"] == "unittest": + if "groupby" in payload: + if payload["groupby"] == ["run.suite.fullname"]: + data = [["marionette", 3590], ["gtest", 2078], ["talos", 3000]] else: - assert False, 'Unexpected groupby' - elif 'select' in payload: - if payload['select'] == ['result.test', 'run.key']: - requested_suite = payload['where']['and'][2]['eq']['run.suite.fullname'] - if requested_suite == 'gtest': + assert False, "Unexpected groupby" + elif "select" in payload: + if payload["select"] == ["result.test", "run.key"]: + requested_suite = payload["where"]["and"][2]["eq"][ + "run.suite.fullname" + ] + if requested_suite == "gtest": data = {} - elif requested_suite == 'marionette': - prefix = payload['where']['and'][3]['prefix']['run.key'] - if prefix == 'test-linux64-ccov': + elif requested_suite == "marionette": + prefix = payload["where"]["and"][3]["prefix"]["run.key"] + if prefix == "test-linux64-ccov": data = { - 'result.test': [ - 'marionette-test1', - ], - 'run.key': [ - 'test-linux64-ccov/debug-marionette-headless-e10s', + "result.test": ["marionette-test1"], + "run.key": [ + "test-linux64-ccov/debug-marionette-headless-e10s" ], } - elif prefix == 'test-windows10-64-ccov': + elif prefix == "test-windows10-64-ccov": data = { - 'result.test': [ - 'marionette-test2', - ], - 'run.key': [ - 'test-windows10-64-ccov/debug-marionette-e10s', + "result.test": ["marionette-test2"], + "run.key": [ + "test-windows10-64-ccov/debug-marionette-e10s" ], } else: - assert False, 'Unexpected prefix' + assert False, "Unexpected prefix" else: - assert False, 'Unexpected suite' + assert False, "Unexpected suite" else: - assert False, 'Unexpected select' + assert False, "Unexpected select" else: - assert False, 'Unexpected payload' + assert False, "Unexpected payload" else: - assert False, 'Unexpected from' + assert False, "Unexpected from" - return (200, {}, json.dumps({'data': data})) + return (200, {}, json.dumps({"data": data})) responses.add_callback( - responses.POST, chunk_mapping.ACTIVEDATA_QUERY_URL, + responses.POST, + chunk_mapping.ACTIVEDATA_QUERY_URL, callback=request_callback, - content_type='application/json', + content_type="application/json", ) chunk_mapping.generate( fake_hg_repo_with_contents, - '632bb768b1dd4b96a196412e8f7b669ca09d6d91', + "632bb768b1dd4b96a196412e8f7b669ca09d6d91", fake_artifacts_handler, out_dir=tmp_path, ) - with tarfile.open(os.path.join(tmp_path, 'chunk_mapping.tar.xz')) as t: - t.extract('chunk_mapping.sqlite', tmp_path) + with tarfile.open(os.path.join(tmp_path, "chunk_mapping.tar.xz")) as t: + t.extract("chunk_mapping.sqlite", tmp_path) - with sqlite3.connect(os.path.join(tmp_path, 'chunk_mapping.sqlite')) as conn: + with sqlite3.connect(os.path.join(tmp_path, "chunk_mapping.sqlite")) as conn: c = conn.cursor() - assert_file_to_test(c, 'js/src/vm/TraceLogging.cpp', 'js/xpconnect/tests/unit/test_lazyproxy.js') - assert_file_to_test(c, 'gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp', 'netwerk/test/unit/test_substituting_protocol_handler.js') - - assert_file_to_chunk(c, 'js/src/jit/BitSet.cpp', 'linux', 'chunk1') - assert_file_to_chunk(c, 'toolkit/components/osfile/osfile.jsm', 'linux', 'chunk2') - assert_file_to_chunk(c, 'code_coverage_bot/cli.py', 'windows', 'chunk1') - assert_file_to_chunk(c, 'js/src/jit/JIT.cpp', 'windows', 'chunk2') - - assert_chunk_to_test(c, 'linux', 'marionette-headless', ['marionette-test1']) - assert_chunk_to_test(c, 'windows', 'marionette', ['marionette-test2']) + assert_file_to_test( + c, "js/src/vm/TraceLogging.cpp", "js/xpconnect/tests/unit/test_lazyproxy.js" + ) + assert_file_to_test( + c, + "gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp", + "netwerk/test/unit/test_substituting_protocol_handler.js", + ) + + assert_file_to_chunk(c, "js/src/jit/BitSet.cpp", "linux", "chunk1") + assert_file_to_chunk( + c, "toolkit/components/osfile/osfile.jsm", "linux", "chunk2" + ) + assert_file_to_chunk(c, "code_coverage_bot/cli.py", "windows", "chunk1") + assert_file_to_chunk(c, "js/src/jit/JIT.cpp", "windows", "chunk2") + + assert_chunk_to_test(c, "linux", "marionette-headless", ["marionette-test1"]) + assert_chunk_to_test(c, "windows", "marionette", ["marionette-test2"]) diff --git a/bot/tests/test_codecov.py b/bot/tests/test_codecov.py index e817ea14e..bdd0662fb 100644 --- a/bot/tests/test_codecov.py +++ b/bot/tests/test_codecov.py @@ -4,4 +4,4 @@ def test_ok(): - assert(codecov) + assert codecov diff --git a/bot/tests/test_grcov.py b/bot/tests/test_grcov.py index a82377b0d..802e457ac 100644 --- a/bot/tests/test_grcov.py +++ b/bot/tests/test_grcov.py @@ -7,266 +7,263 @@ def covdir_get(report, path): - parts = path.split('/') + parts = path.split("/") for part in parts: - report = report['children'][part] + report = report["children"][part] return report def test_report_invalid_output_format(grcov_artifact): - with pytest.raises(AssertionError, match='Unsupported output format'): - grcov.report([grcov_artifact], out_format='UNSUPPORTED') - with pytest.raises(AssertionError, match='Unsupported output format'): - grcov.report([grcov_artifact], out_format='coveralls') + with pytest.raises(AssertionError, match="Unsupported output format"): + grcov.report([grcov_artifact], out_format="UNSUPPORTED") + with pytest.raises(AssertionError, match="Unsupported output format"): + grcov.report([grcov_artifact], out_format="coveralls") def test_report_grcov_artifact_coverallsplus(grcov_artifact): - output = grcov.report([grcov_artifact], out_format='coveralls+') - report = json.loads(output.decode('utf-8')) - assert report['repo_token'] == 'unused' - assert report['service_name'] == 'TaskCluster' - assert report['service_job_number'] == '1' - assert report['git']['branch'] == 'master' - assert report['git']['head']['id'] == 'unused' - assert report['service_number'] == '' - assert len(report['source_files']) == 1 - assert report['source_files'][0]['name'] == 'js/src/jit/BitSet.cpp' - assert report['source_files'][0]['coverage'] == [42, 42] - assert report['source_files'][0]['branches'] == [] - assert 'source_digest' in report['source_files'][0] - assert len(report['source_files'][0]['functions']) == 1 - assert report['source_files'][0]['functions'][0]['exec'] - assert report['source_files'][0]['functions'][0]['name'] == '_ZNK2js3jit6BitSet5emptyEv' - assert report['source_files'][0]['functions'][0]['start'] == 1 + output = grcov.report([grcov_artifact], out_format="coveralls+") + report = json.loads(output.decode("utf-8")) + assert report["repo_token"] == "unused" + assert report["service_name"] == "TaskCluster" + assert report["service_job_number"] == "1" + assert report["git"]["branch"] == "master" + assert report["git"]["head"]["id"] == "unused" + assert report["service_number"] == "" + assert len(report["source_files"]) == 1 + assert report["source_files"][0]["name"] == "js/src/jit/BitSet.cpp" + assert report["source_files"][0]["coverage"] == [42, 42] + assert report["source_files"][0]["branches"] == [] + assert "source_digest" in report["source_files"][0] + assert len(report["source_files"][0]["functions"]) == 1 + assert report["source_files"][0]["functions"][0]["exec"] + assert ( + report["source_files"][0]["functions"][0]["name"] + == "_ZNK2js3jit6BitSet5emptyEv" + ) + assert report["source_files"][0]["functions"][0]["start"] == 1 def test_report_grcov_artifact(grcov_artifact): - output = grcov.report([grcov_artifact], out_format='covdir') - report = json.loads(output.decode('utf-8')) + output = grcov.report([grcov_artifact], out_format="covdir") + report = json.loads(output.decode("utf-8")) assert report == { - 'children': { - 'js': { - 'children': { - 'src': { - 'children': { - 'jit': { - 'children': { - 'BitSet.cpp': { - 'coverage': [ - 42, - 42 - ], - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'BitSet.cpp' + "children": { + "js": { + "children": { + "src": { + "children": { + "jit": { + "children": { + "BitSet.cpp": { + "coverage": [42, 42], + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "BitSet.cpp", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'jit' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "jit", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'src' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "src", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'js' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "js", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': '' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "", } def test_report_jsvm_artifact(jsvm_artifact): - output = grcov.report([jsvm_artifact], out_format='covdir') - report = json.loads(output.decode('utf-8')) + output = grcov.report([jsvm_artifact], out_format="covdir") + report = json.loads(output.decode("utf-8")) assert report == { - 'children': { - 'toolkit': { - 'children': { - 'components': { - 'children': { - 'osfile': { - 'children': { - 'osfile.jsm': { - 'coverage': [ - 42, - 42 - ], - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'osfile.jsm' + "children": { + "toolkit": { + "children": { + "components": { + "children": { + "osfile": { + "children": { + "osfile.jsm": { + "coverage": [42, 42], + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "osfile.jsm", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'osfile' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "osfile", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'components' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "components", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'toolkit' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "toolkit", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': '' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "", } def test_report_multiple_artifacts(grcov_artifact, jsvm_artifact): - output = grcov.report([grcov_artifact, jsvm_artifact], out_format='covdir') - report = json.loads(output.decode('utf-8')) + output = grcov.report([grcov_artifact, jsvm_artifact], out_format="covdir") + report = json.loads(output.decode("utf-8")) - assert report['linesTotal'] == 4 - assert report['linesCovered'] == 4 - assert report['coveragePercent'] == 100.0 + assert report["linesTotal"] == 4 + assert report["linesCovered"] == 4 + assert report["coveragePercent"] == 100.0 - assert covdir_get(report, 'toolkit/components/osfile/osfile.jsm') == { - 'coverage': [ - 42, - 42 - ], - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'osfile.jsm' + assert covdir_get(report, "toolkit/components/osfile/osfile.jsm") == { + "coverage": [42, 42], + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "osfile.jsm", } - assert covdir_get(report, 'js/src/jit/BitSet.cpp') == { - 'coverage': [ - 42, - 42 - ], - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'BitSet.cpp' + assert covdir_get(report, "js/src/jit/BitSet.cpp") == { + "coverage": [42, 42], + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "BitSet.cpp", } -def test_report_source_dir(fake_source_dir, grcov_artifact, grcov_existing_file_artifact): - output = grcov.report([grcov_existing_file_artifact], source_dir=fake_source_dir, out_format='covdir') - report = json.loads(output.decode('utf-8')) +def test_report_source_dir( + fake_source_dir, grcov_artifact, grcov_existing_file_artifact +): + output = grcov.report( + [grcov_existing_file_artifact], source_dir=fake_source_dir, out_format="covdir" + ) + report = json.loads(output.decode("utf-8")) assert report == { - 'children': { - 'code_coverage_bot': { - 'children': { - 'cli.py': { - 'coverage': [ - 42, - 42 - ], - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'cli.py' + "children": { + "code_coverage_bot": { + "children": { + "cli.py": { + "coverage": [42, 42], + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "cli.py", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'code_coverage_bot' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "code_coverage_bot", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': '' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "", } def test_report_options(grcov_artifact, jsvm_artifact): - output = grcov.report([grcov_artifact, jsvm_artifact], out_format='covdir', options=['--ignore-dir', 'toolkit/*']) - report = json.loads(output.decode('utf-8')) + output = grcov.report( + [grcov_artifact, jsvm_artifact], + out_format="covdir", + options=["--ignore-dir", "toolkit/*"], + ) + report = json.loads(output.decode("utf-8")) assert report == { - 'children': { - 'js': { - 'children': { - 'src': { - 'children': { - 'jit': { - 'children': { - 'BitSet.cpp': { - 'coverage': [ - 42, - 42 - ], - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'BitSet.cpp' + "children": { + "js": { + "children": { + "src": { + "children": { + "jit": { + "children": { + "BitSet.cpp": { + "coverage": [42, 42], + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "BitSet.cpp", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'jit' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "jit", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'src' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "src", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': 'js' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "js", } }, - 'coveragePercent': 100.0, - 'linesCovered': 2, - 'linesMissed': 0, - 'linesTotal': 2, - 'name': '' + "coveragePercent": 100.0, + "linesCovered": 2, + "linesMissed": 0, + "linesTotal": 2, + "name": "", } def test_files_list(grcov_artifact, grcov_uncovered_artifact): files = grcov.files_list([grcov_artifact, grcov_uncovered_artifact]) - assert set(files) == set(['js/src/jit/BitSet.cpp']) + assert set(files) == set(["js/src/jit/BitSet.cpp"]) -def test_files_list_source_dir(fake_source_dir, grcov_artifact, grcov_existing_file_artifact): - files = grcov.files_list([grcov_artifact, grcov_existing_file_artifact], source_dir=fake_source_dir) - assert set(files) == set(['code_coverage_bot/cli.py']) +def test_files_list_source_dir( + fake_source_dir, grcov_artifact, grcov_existing_file_artifact +): + files = grcov.files_list( + [grcov_artifact, grcov_existing_file_artifact], source_dir=fake_source_dir + ) + assert set(files) == set(["code_coverage_bot/cli.py"]) diff --git a/bot/tests/test_hgmo.py b/bot/tests/test_hgmo.py index 75fa5d9df..4addca29e 100644 --- a/bot/tests/test_hgmo.py +++ b/bot/tests/test_hgmo.py @@ -4,4 +4,4 @@ def test_ok(): - assert(hgmo) + assert hgmo diff --git a/bot/tests/test_hook.py b/bot/tests/test_hook.py index 49a77f588..3094ebf90 100644 --- a/bot/tests/test_hook.py +++ b/bot/tests/test_hook.py @@ -6,44 +6,33 @@ import jsonschema import pytest -HOOK = os.path.join(os.path.dirname(__file__), '../taskcluster-hook.json') +HOOK = os.path.join(os.path.dirname(__file__), "../taskcluster-hook.json") payloads = [ # Trigger by interface or API + {"firedBy": "triggerHook", "taskId": "xxx", "payload": {}}, { - 'firedBy': 'triggerHook', - 'taskId': 'xxx', - 'payload': {}, + "firedBy": "triggerHook", + "taskId": "xxx", + "payload": {"taskName": "Custom task name", "taskGroupId": "yyyy"}, }, - { - 'firedBy': 'triggerHook', - 'taskId': 'xxx', - 'payload': { - 'taskName': 'Custom task name', - 'taskGroupId': 'yyyy', - }, - }, - # Cron trigger - { - 'firedBy': 'schedule', - 'taskId': 'xxx', - }, + {"firedBy": "schedule", "taskId": "xxx"}, ] @pytest.mark.parametrize("payload", payloads) def test_hook_syntax(payload): - ''' + """ Validate the Taskcluster hook syntax - ''' + """ assert os.path.exists(HOOK) with open(HOOK, "r") as f: # Patch the hook as in the taskboot deployment content = f.read() - content = content.replace('REVISION', 'deadbeef1234') - content = content.replace('CHANNEL', 'test') + content = content.replace("REVISION", "deadbeef1234") + content = content.replace("CHANNEL", "test") # Now parse it as json hook_content = json.loads(content) diff --git a/bot/tests/test_notifier.py b/bot/tests/test_notifier.py index 43fa06de1..2f9aa7e75 100644 --- a/bot/tests/test_notifier.py +++ b/bot/tests/test_notifier.py @@ -3,57 +3,57 @@ from code_coverage_bot.notifier import notify_email from code_coverage_bot.phabricator import PhabricatorUploader +from conftest import add_file +from conftest import changesets +from conftest import commit +from conftest import copy_pushlog_database from conftest import covdir_report -from mercurial import add_file -from mercurial import changesets -from mercurial import commit -from mercurial import copy_pushlog_database @responses.activate def test_notification(mock_secrets, mock_taskcluster, mock_phabricator, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n') + add_file(hg, local, "file", "1\n2\n3\n4\n") commit(hg, 1) - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n") revision = commit(hg, 2) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) stack = changesets(local, revision) assert len(stack) == 2 - assert stack[0]['desc'] == "Commit [(b'A', b'file')]Differential Revision: https://phabricator.services.mozilla.com/D1" - assert stack[1]['desc'] == "Commit [(b'M', b'file')]Differential Revision: https://phabricator.services.mozilla.com/D2" - - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [None, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], - }] - }) + assert ( + stack[0]["desc"] + == "Commit [(b'A', b'file')]Differential Revision: https://phabricator.services.mozilla.com/D1" + ) + assert ( + stack[1]["desc"] + == "Commit [(b'M', b'file')]Differential Revision: https://phabricator.services.mozilla.com/D2" + ) + + report = covdir_report( + { + "source_files": [ + {"name": "file", "coverage": [None, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]} + ] + } + ) phab = PhabricatorUploader(local, revision) changesets_coverage = phab.generate(report, stack) assert changesets_coverage == { - 1: { - 'file': { - 'lines_added': 4, - 'lines_covered': 2, - 'coverage': 'NUCU' - }, - }, - 2: { - 'file': { - 'lines_added': 6, - 'lines_covered': 0, - 'coverage': 'NUCUUUUUUU', - }, - }, + 1: {"file": {"lines_added": 4, "lines_covered": 2, "coverage": "NUCU"}}, + 2: {"file": {"lines_added": 6, "lines_covered": 0, "coverage": "NUCUUUUUUU"}}, } mail = notify_email(revision, stack, changesets_coverage) - assert mail == "* [Commit [(b'M', b'file')]Differential Revision: https://phabricator.services.mozilla.com/D2](https://firefox-code-coverage.herokuapp.com/#/changeset/{}): 0 covered out of 6 added.\n".format(revision) # noqa + assert ( + mail + == "* [Commit [(b'M', b'file')]Differential Revision: https://phabricator.services.mozilla.com/D2](https://firefox-code-coverage.herokuapp.com/#/changeset/{}): 0 covered out of 6 added.\n".format( ## noqa + revision + ) + ) diff --git a/bot/tests/test_phabricator.py b/bot/tests/test_phabricator.py index 710b8515b..6eda3361b 100644 --- a/bot/tests/test_phabricator.py +++ b/bot/tests/test_phabricator.py @@ -7,41 +7,32 @@ import responses from code_coverage_bot.phabricator import PhabricatorUploader +from conftest import add_file +from conftest import changesets +from conftest import commit +from conftest import copy_pushlog_database from conftest import covdir_report -from mercurial import add_file -from mercurial import changesets -from mercurial import commit -from mercurial import copy_pushlog_database @responses.activate def test_simple(mock_secrets, mock_phabricator, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") revision = commit(hg, 1) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [None, 0, 1, 1, 1, 1, 0], - }] - }) + report = covdir_report( + {"source_files": [{"name": "file", "coverage": [None, 0, 1, 1, 1, 1, 0]}]} + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { - 1: { - 'file': { - 'coverage': 'NUCCCCU', - 'lines_added': 7, - 'lines_covered': 5 - } - } + 1: {"file": {"coverage": "NUCCCCU", "lines_added": 7, "lines_covered": 5}} } phabricator.upload(report, changesets(local, revision)) @@ -49,79 +40,88 @@ def test_simple(mock_secrets, mock_phabricator, fake_hg_repo): assert len(responses.calls) >= 3 call = responses.calls[-5] - assert call.request.url == 'http://phabricator.test/api/differential.revision.search' - params = json.loads(urllib.parse.parse_qs(call.request.body)['params'][0]) - assert params['constraints']['ids'] == [1] + assert ( + call.request.url == "http://phabricator.test/api/differential.revision.search" + ) + params = json.loads(urllib.parse.parse_qs(call.request.body)["params"][0]) + assert params["constraints"]["ids"] == [1] call = responses.calls[-4] - assert call.request.url == 'http://phabricator.test/api/harbormaster.queryautotargets' - params = json.loads(urllib.parse.parse_qs(call.request.body)['params'][0]) - assert params['objectPHID'] == 'PHID-DIFF-test' - assert params['targetKeys'] == ['arcanist.unit'] + assert ( + call.request.url == "http://phabricator.test/api/harbormaster.queryautotargets" + ) + params = json.loads(urllib.parse.parse_qs(call.request.body)["params"][0]) + assert params["objectPHID"] == "PHID-DIFF-test" + assert params["targetKeys"] == ["arcanist.unit"] call = responses.calls[-3] - assert call.request.url == 'http://phabricator.test/api/harbormaster.sendmessage' - params = json.loads(urllib.parse.parse_qs(call.request.body)['params'][0]) - assert params['buildTargetPHID'] == 'PHID-HMBT-test' - assert params['type'] == 'pass' - assert params['unit'] == [{'name': 'Aggregate coverage information', 'result': 'pass', 'coverage': {'file': 'NUCCCCU'}}] - assert params['lint'] == [] + assert call.request.url == "http://phabricator.test/api/harbormaster.sendmessage" + params = json.loads(urllib.parse.parse_qs(call.request.body)["params"][0]) + assert params["buildTargetPHID"] == "PHID-HMBT-test" + assert params["type"] == "pass" + assert params["unit"] == [ + { + "name": "Aggregate coverage information", + "result": "pass", + "coverage": {"file": "NUCCCCU"}, + } + ] + assert params["lint"] == [] call = responses.calls[-2] - assert call.request.url == 'http://phabricator.test/api/harbormaster.queryautotargets' - params = json.loads(urllib.parse.parse_qs(call.request.body)['params'][0]) - assert params['objectPHID'] == 'PHID-DIFF-test' - assert params['targetKeys'] == ['arcanist.lint'] + assert ( + call.request.url == "http://phabricator.test/api/harbormaster.queryautotargets" + ) + params = json.loads(urllib.parse.parse_qs(call.request.body)["params"][0]) + assert params["objectPHID"] == "PHID-DIFF-test" + assert params["targetKeys"] == ["arcanist.lint"] call = responses.calls[-1] - assert call.request.url == 'http://phabricator.test/api/harbormaster.sendmessage' - params = json.loads(urllib.parse.parse_qs(call.request.body)['params'][0]) - assert params['buildTargetPHID'] == 'PHID-HMBT-test-lint' - assert params['type'] == 'pass' - assert params['unit'] == [] - assert params['lint'] == [] + assert call.request.url == "http://phabricator.test/api/harbormaster.sendmessage" + params = json.loads(urllib.parse.parse_qs(call.request.body)["params"][0]) + assert params["buildTargetPHID"] == "PHID-HMBT-test-lint" + assert params["type"] == "pass" + assert params["unit"] == [] + assert params["lint"] == [] @responses.activate def test_file_with_no_coverage(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") revision = commit(hg, 1) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [] - }) + report = covdir_report({"source_files": []}) results = phabricator.generate(report, changesets(local, revision)) - assert results == { - 1: {} - } + assert results == {1: {}} @responses.activate def test_one_commit_without_differential(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") revision = commit(hg) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file_one_commit', - 'coverage': [None, 0, 1, 1, 1, 1, 0], - }] - }) + report = covdir_report( + { + "source_files": [ + {"name": "file_one_commit", "coverage": [None, 0, 1, 1, 1, 1, 0]} + ] + } + ) results = phabricator.generate(report, changesets(local, revision)) assert results == {} @@ -131,53 +131,41 @@ def test_one_commit_without_differential(mock_secrets, fake_hg_repo): def test_two_commits_two_files(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file1_commit1', '1\n2\n3\n4\n5\n6\n7\n') - add_file(hg, local, 'file2_commit1', '1\n2\n3\n') + add_file(hg, local, "file1_commit1", "1\n2\n3\n4\n5\n6\n7\n") + add_file(hg, local, "file2_commit1", "1\n2\n3\n") revision = commit(hg, 1) - add_file(hg, local, 'file3_commit2', '1\n2\n3\n4\n5\n') + add_file(hg, local, "file3_commit2", "1\n2\n3\n4\n5\n") revision = commit(hg, 2) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file1_commit1', - 'coverage': [None, 0, 1, 1, 1, 1, 0], - }, { - 'name': 'file2_commit1', - 'coverage': [1, 1, 0], - }, { - 'name': 'file3_commit2', - 'coverage': [1, 1, 0, 1, None], - }] - }) + report = covdir_report( + { + "source_files": [ + {"name": "file1_commit1", "coverage": [None, 0, 1, 1, 1, 1, 0]}, + {"name": "file2_commit1", "coverage": [1, 1, 0]}, + {"name": "file3_commit2", "coverage": [1, 1, 0, 1, None]}, + ] + } + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { 1: { - 'file1_commit1': { - 'coverage': 'NUCCCCU', - 'lines_added': 7, - 'lines_covered': 5 + "file1_commit1": { + "coverage": "NUCCCCU", + "lines_added": 7, + "lines_covered": 5, }, - 'file2_commit1': { - 'coverage': 'CCU', - 'lines_added': 3, - 'lines_covered': 2 - } + "file2_commit1": {"coverage": "CCU", "lines_added": 3, "lines_covered": 2}, }, 2: { - 'file3_commit2': { - 'coverage': 'CCUCN', - 'lines_added': 5, - 'lines_covered': 4 - } - } - + "file3_commit2": {"coverage": "CCUCN", "lines_added": 5, "lines_covered": 4} + }, } @@ -185,40 +173,25 @@ def test_two_commits_two_files(mock_secrets, fake_hg_repo): def test_changesets_overwriting(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") commit(hg, 1) - add_file(hg, local, 'file', '1\n2\n3\n42\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n42\n5\n6\n7\n") revision = commit(hg, 2) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [None, 0, 1, 1, 1, 1, 0], - }] - }) + report = covdir_report( + {"source_files": [{"name": "file", "coverage": [None, 0, 1, 1, 1, 1, 0]}]} + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { - 1: { - 'file': { - 'coverage': 'NUCXCCU', - 'lines_added': 6, - 'lines_covered': 4 - } - }, - 2: { - 'file': { - 'coverage': 'NUCCCCU', - 'lines_added': 1, - 'lines_covered': 1 - } - } + 1: {"file": {"coverage": "NUCXCCU", "lines_added": 6, "lines_covered": 4}}, + 2: {"file": {"coverage": "NUCCCCU", "lines_added": 1, "lines_covered": 1}}, } @@ -226,40 +199,29 @@ def test_changesets_overwriting(mock_secrets, fake_hg_repo): def test_changesets_displacing(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") commit(hg, 1) - add_file(hg, local, 'file', '-1\n-2\n1\n2\n3\n4\n5\n6\n7\n8\n9\n') + add_file(hg, local, "file", "-1\n-2\n1\n2\n3\n4\n5\n6\n7\n8\n9\n") revision = commit(hg, 2) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [0, 1, None, 0, 1, 1, 1, 1, 0, 1, 0], - }] - }) + report = covdir_report( + { + "source_files": [ + {"name": "file", "coverage": [0, 1, None, 0, 1, 1, 1, 1, 0, 1, 0]} + ] + } + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { - 1: { - 'file': { - 'coverage': 'NUCCCCU', - 'lines_added': 7, - 'lines_covered': 4 - } - }, - 2: { - 'file': { - 'coverage': 'UCNUCCCCUCU', - 'lines_added': 4, - 'lines_covered': 2, - } - } + 1: {"file": {"coverage": "NUCCCCU", "lines_added": 7, "lines_covered": 4}}, + 2: {"file": {"coverage": "UCNUCCCCUCU", "lines_added": 4, "lines_covered": 2}}, } @@ -267,75 +229,53 @@ def test_changesets_displacing(mock_secrets, fake_hg_repo): def test_changesets_reducing_size(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") commit(hg, 1) - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n") revision = commit(hg, 2) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [None, 0, 1, 1, 1], - }] - }) + report = covdir_report( + {"source_files": [{"name": "file", "coverage": [None, 0, 1, 1, 1]}]} + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { - 1: { - 'file': { - 'coverage': 'NUCCCXX', - 'lines_added': 5, - 'lines_covered': 4 - } - }, - 2: { - 'file': { - 'coverage': 'NUCCC', - 'lines_added': 0, - 'lines_covered': 0 - } - } + 1: {"file": {"coverage": "NUCCCXX", "lines_added": 5, "lines_covered": 4}}, + 2: {"file": {"coverage": "NUCCC", "lines_added": 0, "lines_covered": 0}}, } @responses.activate -def test_changesets_overwriting_one_commit_without_differential(mock_secrets, fake_hg_repo): +def test_changesets_overwriting_one_commit_without_differential( + mock_secrets, fake_hg_repo +): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") commit(hg, 1) - add_file(hg, local, 'file', '1\n2\n3\n42\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n42\n5\n6\n7\n") revision = commit(hg) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [None, 0, 1, 1, 1, 1, 0], - }] - }) + report = covdir_report( + {"source_files": [{"name": "file", "coverage": [None, 0, 1, 1, 1, 1, 0]}]} + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { - 1: { - 'file': { - 'coverage': 'NUCXCCU', - 'lines_added': 6, - 'lines_covered': 4 - } - } + 1: {"file": {"coverage": "NUCXCCU", "lines_added": 6, "lines_covered": 4}} } @@ -343,60 +283,47 @@ def test_changesets_overwriting_one_commit_without_differential(mock_secrets, fa def test_removed_file(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") commit(hg, 1) - hg.remove(files=[bytes(os.path.join(local, 'file'), 'ascii')]) + hg.remove(files=[bytes(os.path.join(local, "file"), "ascii")]) revision = commit(hg) - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [] - }) + report = covdir_report({"source_files": []}) results = phabricator.generate(report, changesets(local, revision)) - assert results == { - 1: {} - } + assert results == {1: {}} @responses.activate def test_backout_removed_file(mock_secrets, fake_hg_repo): hg, local, remote = fake_hg_repo - add_file(hg, local, 'file', '1\n2\n3\n4\n5\n6\n7\n') + add_file(hg, local, "file", "1\n2\n3\n4\n5\n6\n7\n") commit(hg, 1) - hg.remove(files=[bytes(os.path.join(local, 'file'), 'ascii')]) + hg.remove(files=[bytes(os.path.join(local, "file"), "ascii")]) revision = commit(hg, 2) - hg.backout(rev=revision, message='backout', user='marco') - revision = hg.log(limit=1)[0][1].decode('ascii') + hg.backout(rev=revision, message="backout", user="marco") + revision = hg.log(limit=1)[0][1].decode("ascii") - hg.push(dest=bytes(remote, 'ascii')) + hg.push(dest=bytes(remote, "ascii")) copy_pushlog_database(remote, local) phabricator = PhabricatorUploader(local, revision) - report = covdir_report({ - 'source_files': [{ - 'name': 'file', - 'coverage': [None, 0, 1, 1, 1, 1, 0], - }] - }) + report = covdir_report( + {"source_files": [{"name": "file", "coverage": [None, 0, 1, 1, 1, 1, 0]}]} + ) results = phabricator.generate(report, changesets(local, revision)) assert results == { - 1: { - 'file': { - 'coverage': 'NUCCCCU', - 'lines_added': 7, - 'lines_covered': 5 - } - }, - 2: {} + 1: {"file": {"coverage": "NUCCCCU", "lines_added": 7, "lines_covered": 5}}, + 2: {}, } diff --git a/bot/tests/test_suite_reports.py b/bot/tests/test_suite_reports.py index a4b8a6419..8586968be 100644 --- a/bot/tests/test_suite_reports.py +++ b/bot/tests/test_suite_reports.py @@ -4,4 +4,4 @@ def test_ok(): - assert(suite_reports) + assert suite_reports diff --git a/bot/tests/test_taskcluster.py b/bot/tests/test_taskcluster.py index ec7cb5e19..43eee4986 100644 --- a/bot/tests/test_taskcluster.py +++ b/bot/tests/test_taskcluster.py @@ -13,134 +13,177 @@ @responses.activate def test_get_task_status(LINUX_TASK_ID, LINUX_TASK_STATUS): - responses.add(responses.GET, f'https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}/status', json=LINUX_TASK_STATUS, status=200) + responses.add( + responses.GET, + f"https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}/status", + json=LINUX_TASK_STATUS, + status=200, + ) assert taskcluster.get_task_status(LINUX_TASK_ID) == LINUX_TASK_STATUS @responses.activate def test_get_task_details(LINUX_TASK_ID, LINUX_TASK): - responses.add(responses.GET, f'https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}', json=LINUX_TASK, status=200) + responses.add( + responses.GET, + f"https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}", + json=LINUX_TASK, + status=200, + ) assert taskcluster.get_task_details(LINUX_TASK_ID) == LINUX_TASK @responses.activate def test_get_task(LINUX_TASK_ID, LATEST_LINUX, WIN_TASK_ID, LATEST_WIN): - responses.add(responses.GET, 'https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.b2a9a4bb5c94de179ae7a3f52fde58c0e2897498.firefox.linux64-ccov-debug', json=LATEST_LINUX, status=200) # noqa - assert taskcluster.get_task('mozilla-central', 'b2a9a4bb5c94de179ae7a3f52fde58c0e2897498', 'linux') == LINUX_TASK_ID - - responses.add(responses.GET, 'https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.916103b8675d9fdb28b891cac235d74f9f475942.firefox.win64-ccov-debug', json=LATEST_WIN, status=200) # noqa - assert taskcluster.get_task('mozilla-central', '916103b8675d9fdb28b891cac235d74f9f475942', 'windows') == WIN_TASK_ID + responses.add( + responses.GET, + "https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.b2a9a4bb5c94de179ae7a3f52fde58c0e2897498.firefox.linux64-ccov-debug", + json=LATEST_LINUX, + status=200, + ) # noqa + assert ( + taskcluster.get_task( + "mozilla-central", "b2a9a4bb5c94de179ae7a3f52fde58c0e2897498", "linux" + ) + == LINUX_TASK_ID + ) + + responses.add( + responses.GET, + "https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.916103b8675d9fdb28b891cac235d74f9f475942.firefox.win64-ccov-debug", + json=LATEST_WIN, + status=200, + ) # noqa + assert ( + taskcluster.get_task( + "mozilla-central", "916103b8675d9fdb28b891cac235d74f9f475942", "windows" + ) + == WIN_TASK_ID + ) @responses.activate def test_get_task_not_found(TASK_NOT_FOUND): - responses.add(responses.GET, 'https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.b2a9a4bb5c94de179ae7a3f52fde58c0e2897498.firefox.linux64-ccov-debug', json=TASK_NOT_FOUND, status=404) # noqa - - assert taskcluster.get_task('mozilla-central', 'b2a9a4bb5c94de179ae7a3f52fde58c0e2897498', 'linux') is None + responses.add( + responses.GET, + "https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.b2a9a4bb5c94de179ae7a3f52fde58c0e2897498.firefox.linux64-ccov-debug", + json=TASK_NOT_FOUND, + status=404, + ) # noqa + + assert ( + taskcluster.get_task( + "mozilla-central", "b2a9a4bb5c94de179ae7a3f52fde58c0e2897498", "linux" + ) + is None + ) @responses.activate def test_get_task_failure(TASK_NOT_FOUND): err = TASK_NOT_FOUND.copy() - err['code'] = 'RandomError' - responses.add(responses.GET, 'https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.b2a9a4bb5c94de179ae7a3f52fde58c0e2897498.firefox.linux64-ccov-debug', json=err, status=500) # noqa - - with pytest.raises(taskcluster.TaskclusterException, match='Unknown TaskCluster index error.'): - taskcluster.get_task('mozilla-central', 'b2a9a4bb5c94de179ae7a3f52fde58c0e2897498', 'linux') + err["code"] = "RandomError" + responses.add( + responses.GET, + "https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.b2a9a4bb5c94de179ae7a3f52fde58c0e2897498.firefox.linux64-ccov-debug", + json=err, + status=500, + ) # noqa + + with pytest.raises( + taskcluster.TaskclusterException, match="Unknown TaskCluster index error." + ): + taskcluster.get_task( + "mozilla-central", "b2a9a4bb5c94de179ae7a3f52fde58c0e2897498", "linux" + ) @responses.activate def test_get_task_artifacts(LINUX_TASK_ID, LINUX_TASK_ARTIFACTS): - responses.add(responses.GET, f'https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}/artifacts', json=LINUX_TASK_ARTIFACTS, status=200) - assert taskcluster.get_task_artifacts(LINUX_TASK_ID) == LINUX_TASK_ARTIFACTS['artifacts'] + responses.add( + responses.GET, + f"https://queue.taskcluster.net/v1/task/{LINUX_TASK_ID}/artifacts", + json=LINUX_TASK_ARTIFACTS, + status=200, + ) + assert ( + taskcluster.get_task_artifacts(LINUX_TASK_ID) + == LINUX_TASK_ARTIFACTS["artifacts"] + ) @responses.activate def test_get_tasks_in_group(GROUP_TASKS_1, GROUP_TASKS_2): - responses.add(responses.GET, 'https://queue.taskcluster.net/v1/task-group/aPt9FbIdQwmhwDIPDYLuaw/list?limit=200', json=GROUP_TASKS_1, status=200, match_querystring=True) # noqa - responses.add(responses.GET, 'https://queue.taskcluster.net/v1/task-group/aPt9FbIdQwmhwDIPDYLuaw/list?continuationToken=1%2132%21YVB0OUZiSWRRd21od0RJUERZTHVhdw--~1%2132%21ZnJVcGRRT0VTalN0Nm9Ua1Ztcy04UQ--&limit=200', json=GROUP_TASKS_2, status=200, match_querystring=True) # noqa - - assert taskcluster.get_tasks_in_group('aPt9FbIdQwmhwDIPDYLuaw') == GROUP_TASKS_1['tasks'] + GROUP_TASKS_2['tasks'] + responses.add( + responses.GET, + "https://queue.taskcluster.net/v1/task-group/aPt9FbIdQwmhwDIPDYLuaw/list?limit=200", + json=GROUP_TASKS_1, + status=200, + match_querystring=True, + ) # noqa + responses.add( + responses.GET, + "https://queue.taskcluster.net/v1/task-group/aPt9FbIdQwmhwDIPDYLuaw/list?continuationToken=1%2132%21YVB0OUZiSWRRd21od0RJUERZTHVhdw--~1%2132%21ZnJVcGRRT0VTalN0Nm9Ua1Ztcy04UQ--&limit=200", # noqa + json=GROUP_TASKS_2, + status=200, + match_querystring=True, + ) # noqa + + assert ( + taskcluster.get_tasks_in_group("aPt9FbIdQwmhwDIPDYLuaw") + == GROUP_TASKS_1["tasks"] + GROUP_TASKS_2["tasks"] + ) def test_is_coverage_task(): - assert taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'test-linux64-ccov/debug-mochitest-1' - } - } - }) - - assert not taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'test-linux64/debug-mochitest-1' - } - } - }) - - assert taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'test-windows10-64-ccov/debug-cppunit' - } - } - }) - - assert not taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'test-windows10-64/debug-cppunit' - } - } - }) - - assert taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'build-win64-ccov/debug' - } - } - }) - - assert not taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'build-win64/debug' - } - } - }) - - assert taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'build-linux64-ccov/debug' - } - } - }) - - assert not taskcluster.is_coverage_task({ - 'task': { - 'metadata': { - 'name': 'build-linux64/debug' - } - } - }) + assert taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "test-linux64-ccov/debug-mochitest-1"}}} + ) + + assert not taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "test-linux64/debug-mochitest-1"}}} + ) + + assert taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "test-windows10-64-ccov/debug-cppunit"}}} + ) + + assert not taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "test-windows10-64/debug-cppunit"}}} + ) + + assert taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "build-win64-ccov/debug"}}} + ) + + assert not taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "build-win64/debug"}}} + ) + + assert taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "build-linux64-ccov/debug"}}} + ) + + assert not taskcluster.is_coverage_task( + {"task": {"metadata": {"name": "build-linux64/debug"}}} + ) def test_get_chunk(): tests = [ - ('test-linux64-ccov/debug-mochitest-1', 'mochitest-1'), - ('test-linux64-ccov/debug-mochitest-e10s-7', 'mochitest-7'), - ('test-linux64-ccov/debug-cppunit', 'cppunit'), - ('test-linux64-ccov/debug-firefox-ui-functional-remote-e10s', 'firefox-ui-functional-remote'), - ('test-windows10-64-ccov/debug-mochitest-1', 'mochitest-1'), - ('test-windows10-64-ccov/debug-mochitest-e10s-7', 'mochitest-7'), - ('test-windows10-64-ccov/debug-cppunit', 'cppunit'), - ('build-linux64-ccov/debug', 'build'), - ('build-android-test-ccov/opt', 'build'), - ('build-win64-ccov/debug', 'build'), + ("test-linux64-ccov/debug-mochitest-1", "mochitest-1"), + ("test-linux64-ccov/debug-mochitest-e10s-7", "mochitest-7"), + ("test-linux64-ccov/debug-cppunit", "cppunit"), + ( + "test-linux64-ccov/debug-firefox-ui-functional-remote-e10s", + "firefox-ui-functional-remote", + ), + ("test-windows10-64-ccov/debug-mochitest-1", "mochitest-1"), + ("test-windows10-64-ccov/debug-mochitest-e10s-7", "mochitest-7"), + ("test-windows10-64-ccov/debug-cppunit", "cppunit"), + ("build-linux64-ccov/debug", "build"), + ("build-android-test-ccov/opt", "build"), + ("build-win64-ccov/debug", "build"), ] for (name, chunk) in tests: @@ -149,11 +192,11 @@ def test_get_chunk(): def test_get_suite(): tests = [ - ('mochitest-1', 'mochitest'), - ('mochitest-7', 'mochitest'), - ('cppunit', 'cppunit'), - ('firefox-ui-functional-remote', 'firefox-ui-functional-remote'), - ('build', 'build'), + ("mochitest-1", "mochitest"), + ("mochitest-7", "mochitest"), + ("cppunit", "cppunit"), + ("firefox-ui-functional-remote", "firefox-ui-functional-remote"), + ("build", "build"), ] for (chunk, suite) in tests: @@ -162,43 +205,57 @@ def test_get_suite(): def test_get_platform(): tests = [ - ('test-linux64-ccov/debug-mochitest-1', 'linux'), - ('test-windows10-64-ccov/debug-mochitest-1', 'windows'), - ('build-linux64-ccov/debug', 'linux'), - ('build-win64-ccov/debug', 'windows'), - ('build-android-test-ccov/opt', 'android-test'), - ('test-android-em-4.3-arm7-api-16-ccov/debug-robocop-2', 'android-emulator'), + ("test-linux64-ccov/debug-mochitest-1", "linux"), + ("test-windows10-64-ccov/debug-mochitest-1", "windows"), + ("build-linux64-ccov/debug", "linux"), + ("build-win64-ccov/debug", "windows"), + ("build-android-test-ccov/opt", "android-test"), + ("test-android-em-4.3-arm7-api-16-ccov/debug-robocop-2", "android-emulator"), ] for (name, platform) in tests: assert taskcluster.get_platform(name) == platform -@mock.patch('time.sleep') +@mock.patch("time.sleep") @responses.activate def test_download_artifact_forbidden(mocked_sleep, tmpdir): - responses.add(responses.GET, 'https://queue.taskcluster.net/v1/task/FBdocjnAQOW_GJDOfmgjxw/artifacts/public/test_info/code-coverage-grcov.zip', body='xml error...', status=403) # noqa - - with pytest.raises(requests.exceptions.HTTPError, match='403 Client Error: Forbidden for url: https://queue.taskcluster.net/v1/task/FBdocjnAQOW_GJDOfmgjxw/artifacts/public/test_info/code-coverage-grcov.zip'): # noqa + responses.add( + responses.GET, + "https://queue.taskcluster.net/v1/task/FBdocjnAQOW_GJDOfmgjxw/artifacts/public/test_info/code-coverage-grcov.zip", # noqa + body="xml error...", + status=403, + ) + + with pytest.raises( + requests.exceptions.HTTPError, + match="403 Client Error: Forbidden for url: https://queue.taskcluster.net/v1/task/FBdocjnAQOW_GJDOfmgjxw/artifacts/public/test_info/code-coverage-grcov.zip", # noqa + ): taskcluster.download_artifact( - os.path.join(tmpdir.strpath, 'windows_reftest-6_code-coverage-grcov.zip'), - 'FBdocjnAQOW_GJDOfmgjxw', - 'public/test_info/code-coverage-grcov.zip' + os.path.join(tmpdir.strpath, "windows_reftest-6_code-coverage-grcov.zip"), + "FBdocjnAQOW_GJDOfmgjxw", + "public/test_info/code-coverage-grcov.zip", ) assert mocked_sleep.call_count == 4 -@mock.patch('time.sleep') +@mock.patch("time.sleep") @responses.activate def test_download_artifact_badzip(mocked_sleep, tmpdir): - responses.add(responses.GET, 'https://queue.taskcluster.net/v1/task/FBdocjnAQOW_GJDOfmgjxw/artifacts/public/test_info/code-coverage-grcov.zip', body='NOT A ZIP FILE', status=200, stream=True) # noqa - - with pytest.raises(BadZipFile, match='File is not a zip file'): + responses.add( + responses.GET, + "https://queue.taskcluster.net/v1/task/FBdocjnAQOW_GJDOfmgjxw/artifacts/public/test_info/code-coverage-grcov.zip", # noqa + body="NOT A ZIP FILE", + status=200, + stream=True, + ) + + with pytest.raises(BadZipFile, match="File is not a zip file"): taskcluster.download_artifact( - os.path.join(tmpdir.strpath, 'windows_reftest-6_code-coverage-grcov.zip'), - 'FBdocjnAQOW_GJDOfmgjxw', - 'public/test_info/code-coverage-grcov.zip' + os.path.join(tmpdir.strpath, "windows_reftest-6_code-coverage-grcov.zip"), + "FBdocjnAQOW_GJDOfmgjxw", + "public/test_info/code-coverage-grcov.zip", ) assert mocked_sleep.call_count == 4 diff --git a/bot/tests/test_zero_coverage.py b/bot/tests/test_zero_coverage.py index de405c1a1..d58126efe 100644 --- a/bot/tests/test_zero_coverage.py +++ b/bot/tests/test_zero_coverage.py @@ -8,57 +8,96 @@ from code_coverage_bot.zero_coverage import ZeroCov -def test_zero_coverage(tmpdir, - grcov_artifact, grcov_uncovered_artifact, - jsvm_artifact, jsvm_uncovered_artifact, - grcov_uncovered_function_artifact, jsvm_uncovered_function_artifact, - fake_hg_repo_with_contents): +def test_zero_coverage( + tmpdir, + grcov_artifact, + grcov_uncovered_artifact, + jsvm_artifact, + jsvm_uncovered_artifact, + grcov_uncovered_function_artifact, + jsvm_uncovered_function_artifact, + fake_hg_repo_with_contents, +): tmp_path = tmpdir.strpath - hgrev = '314159265358' - ZeroCov(fake_hg_repo_with_contents).generate([ - grcov_artifact, grcov_uncovered_artifact, - jsvm_artifact, jsvm_uncovered_artifact, - grcov_uncovered_function_artifact, jsvm_uncovered_function_artifact - ], hgrev, out_dir=tmp_path) + hgrev = "314159265358" + ZeroCov(fake_hg_repo_with_contents).generate( + [ + grcov_artifact, + grcov_uncovered_artifact, + jsvm_artifact, + jsvm_uncovered_artifact, + grcov_uncovered_function_artifact, + jsvm_uncovered_function_artifact, + ], + hgrev, + out_dir=tmp_path, + ) - with open(os.path.join(tmp_path, 'zero_coverage_report.json'), 'r') as f: + with open(os.path.join(tmp_path, "zero_coverage_report.json"), "r") as f: zero_coverage_report = json.load(f) - assert 'hg_revision' in zero_coverage_report and zero_coverage_report['hg_revision'] == hgrev - assert 'files' in zero_coverage_report - zero_coverage_functions = zero_coverage_report['files'] + assert ( + "hg_revision" in zero_coverage_report + and zero_coverage_report["hg_revision"] == hgrev + ) + assert "files" in zero_coverage_report + zero_coverage_functions = zero_coverage_report["files"] today = datetime.utcnow() today = pytz.utc.localize(today) today = today.strftime(ZeroCov.DATE_FORMAT) expected_zero_coverage_functions = [ - {'funcs': 1, 'name': 'mozglue/build/dummy.cpp', 'uncovered': True, - 'size': 1, 'commits': 2, - 'first_push_date': today, 'last_push_date': today}, - {'funcs': 2, 'name': 'toolkit/components/osfile/osfile.jsm', 'uncovered': False, - 'size': 2, 'commits': 2, - 'first_push_date': today, 'last_push_date': today}, - {'funcs': 1, 'name': 'js/src/jit/JIT.cpp', 'uncovered': False, - 'size': 3, 'commits': 2, - 'first_push_date': today, 'last_push_date': today}, - {'funcs': 1, 'name': 'toolkit/components/osfile/osfile-win.jsm', 'uncovered': True, - 'size': 4, 'commits': 2, - 'first_push_date': today, 'last_push_date': today}, + { + "funcs": 1, + "name": "mozglue/build/dummy.cpp", + "uncovered": True, + "size": 1, + "commits": 2, + "first_push_date": today, + "last_push_date": today, + }, + { + "funcs": 2, + "name": "toolkit/components/osfile/osfile.jsm", + "uncovered": False, + "size": 2, + "commits": 2, + "first_push_date": today, + "last_push_date": today, + }, + { + "funcs": 1, + "name": "js/src/jit/JIT.cpp", + "uncovered": False, + "size": 3, + "commits": 2, + "first_push_date": today, + "last_push_date": today, + }, + { + "funcs": 1, + "name": "toolkit/components/osfile/osfile-win.jsm", + "uncovered": True, + "size": 4, + "commits": 2, + "first_push_date": today, + "last_push_date": today, + }, ] assert len(zero_coverage_functions) == len(expected_zero_coverage_functions) while len(expected_zero_coverage_functions): exp_item = expected_zero_coverage_functions.pop() found = False for found_item in zero_coverage_functions: - if found_item['name'] == exp_item['name']: + if found_item["name"] == exp_item["name"]: found = True break assert found - assert found_item['funcs'] == exp_item['funcs'] - assert found_item['first_push_date'] == exp_item['first_push_date'] - assert found_item['last_push_date'] == exp_item['last_push_date'] - assert found_item['size'] == exp_item['size'] - assert found_item['commits'] == exp_item['commits'] - assert found_item['uncovered'] == exp_item['uncovered'] + assert found_item["funcs"] == exp_item["funcs"] + assert found_item["first_push_date"] == exp_item["first_push_date"] + assert found_item["last_push_date"] == exp_item["last_push_date"] + assert found_item["size"] == exp_item["size"] + assert found_item["commits"] == exp_item["commits"] + assert found_item["uncovered"] == exp_item["uncovered"] diff --git a/bot/tools/covdir_gen.py b/bot/tools/covdir_gen.py index 38f704005..e2be44679 100644 --- a/bot/tools/covdir_gen.py +++ b/bot/tools/covdir_gen.py @@ -11,30 +11,24 @@ from code_coverage_bot.secrets import secrets from code_coverage_tools.taskcluter import TaskclusterConfig -CODECOV_URL = 'https://codecov.io/api/gh/marco-c/gecko-dev/commit' -MC_REPO = 'https://hg.mozilla.org/mozilla-central' -HOOK_GROUP = 'project-releng' -HOOK_ID = 'services-{app_channel}-codecoverage/bot-generation' +CODECOV_URL = "https://codecov.io/api/gh/marco-c/gecko-dev/commit" +MC_REPO = "https://hg.mozilla.org/mozilla-central" +HOOK_GROUP = "project-releng" +HOOK_ID = "services-{app_channel}-codecoverage/bot-generation" taskcluster = TaskclusterConfig() taskcluster.auth( - os.environ['TASKCLUSTER_CLIENT_ID'], - os.environ['TASKCLUSTER_ACCESS_TOKEN'], -) -secrets.load( - os.environ['TASKCLUSTER_SECRET'], + os.environ["TASKCLUSTER_CLIENT_ID"], os.environ["TASKCLUSTER_ACCESS_TOKEN"] ) +secrets.load(os.environ["TASKCLUSTER_SECRET"]) def list_commits(codecov_token, maximum=None, unique=None, skip_commits=[]): - ''' + """ List all the commits ingested on codecov - ''' - assert unique in (None, 'week', 'day') - params = { - 'access_token': codecov_token, - 'page': 1, - } + """ + assert unique in (None, "week", "day") + params = {"access_token": codecov_token, "page": 1} nb = 0 dates = set() while True: @@ -42,25 +36,27 @@ def list_commits(codecov_token, maximum=None, unique=None, skip_commits=[]): resp.raise_for_status() data = resp.json() - if not data['commits']: + if not data["commits"]: return - for commit in data['commits']: + for commit in data["commits"]: # Skip commit if that day or week has already been processed earlier - day = datetime.strptime(commit['timestamp'], '%Y-%m-%d %H:%M:%S').date() + day = datetime.strptime(commit["timestamp"], "%Y-%m-%d %H:%M:%S").date() week = day.isocalendar()[:2] - if unique == 'day' and day in dates: + if unique == "day" and day in dates: continue - if unique == 'week' and week in dates: + if unique == "week" and week in dates: continue dates.add(day) dates.add(week) # Convert git to mercurial revision - commit['mercurial'] = git_to_mercurial(commit['commitid']) - if commit['mercurial'] in skip_commits: - print('Skipping already processed commit {}'.format(commit['mercurial'])) + commit["mercurial"] = git_to_mercurial(commit["commitid"]) + if commit["mercurial"] in skip_commits: + print( + "Skipping already processed commit {}".format(commit["mercurial"]) + ) continue yield commit @@ -69,21 +65,23 @@ def list_commits(codecov_token, maximum=None, unique=None, skip_commits=[]): if maximum is not None and nb >= maximum: return - params['page'] += 1 + params["page"] += 1 def trigger_task(task_group_id, commit): - ''' + """ Trigger a code coverage task to build covdir at a specified revision - ''' - assert 'mercurial' in commit - name = 'covdir {} - {} - {}'.format(secrets[secrets.APP_CHANNEL], commit['timestamp'], commit['mercurial']) - hooks = taskcluster.get_service('hooks') + """ + assert "mercurial" in commit + name = "covdir {} - {} - {}".format( + secrets[secrets.APP_CHANNEL], commit["timestamp"], commit["mercurial"] + ) + hooks = taskcluster.get_service("hooks") payload = { - 'REPOSITORY': MC_REPO, - 'REVISION': commit['mercurial'], - 'taskGroupId': task_group_id, - 'taskName': name, + "REPOSITORY": MC_REPO, + "REVISION": commit["mercurial"], + "taskGroupId": task_group_id, + "taskName": name, } hook_id = HOOK_ID.format(app_channel=secrets[secrets.APP_CHANNEL]) return hooks.triggerHook(HOOK_GROUP, hook_id, payload) @@ -92,41 +90,61 @@ def trigger_task(task_group_id, commit): def main(): # CLI args parser = argparse.ArgumentParser() - parser.add_argument('--nb-tasks', type=int, default=5, help='NB of tasks to create') - parser.add_argument('--unique', choices=('day', 'week'), help='Trigger only one task per day or week') - parser.add_argument('--group', type=str, default=slugId(), help='Task group to create/update') - parser.add_argument('--dry-run', action='store_true', default=False, help='List actions without triggering any new task') - parser.add_argument('--codecov-token', type=str, default=os.environ.get('CODECOV_TOKEN'), help='Codecov access token') + parser.add_argument("--nb-tasks", type=int, default=5, help="NB of tasks to create") + parser.add_argument( + "--unique", + choices=("day", "week"), + help="Trigger only one task per day or week", + ) + parser.add_argument( + "--group", type=str, default=slugId(), help="Task group to create/update" + ) + parser.add_argument( + "--dry-run", + action="store_true", + default=False, + help="List actions without triggering any new task", + ) + parser.add_argument( + "--codecov-token", + type=str, + default=os.environ.get("CODECOV_TOKEN"), + help="Codecov access token", + ) args = parser.parse_args() # Download revision mapper database - print('Downloading revision database...') + print("Downloading revision database...") download_mapfile() # List existing tags & commits - print('Group', args.group) - queue = taskcluster.get_service('queue') + print("Group", args.group) + queue = taskcluster.get_service("queue") try: group = queue.listTaskGroup(args.group) commits = [ - task['task']['payload']['env']['REVISION'] - for task in group['tasks'] - if task['status']['state'] not in ('failed', 'exception') + task["task"]["payload"]["env"]["REVISION"] + for task in group["tasks"] + if task["status"]["state"] not in ("failed", "exception") ] - print('Found {} commits processed in task group {}'.format(len(commits), args.group)) + print( + "Found {} commits processed in task group {}".format( + len(commits), args.group + ) + ) except Exception as e: - print('Invalid task group : {}'.format(e)) + print("Invalid task group : {}".format(e)) commits = [] # Trigger a task for each commit for commit in list_commits(args.codecov_token, args.nb_tasks, args.unique, commits): - print('Triggering commit {mercurial} from {timestamp}'.format(**commit)) + print("Triggering commit {mercurial} from {timestamp}".format(**commit)) if args.dry_run: - print('>>> No trigger on dry run') + print(">>> No trigger on dry run") else: out = trigger_task(args.group, commit) - print('>>>', out['status']['taskId']) + print(">>>", out["status"]["taskId"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/bot/tools/restart_tasks.py b/bot/tools/restart_tasks.py index 54cca0669..58907eb7a 100644 --- a/bot/tools/restart_tasks.py +++ b/bot/tools/restart_tasks.py @@ -7,45 +7,44 @@ from code_coverage_bot.secrets import secrets from code_coverage_bot.taskcluster import taskcluster_config -CODECOV_URL = 'https://codecov.io/api/gh/marco-c/gecko-dev/commit' -HOOK_GROUP = 'project-relman' -HOOK_ID = 'code-coverage-{app_channel}' +CODECOV_URL = "https://codecov.io/api/gh/marco-c/gecko-dev/commit" +HOOK_GROUP = "project-relman" +HOOK_ID = "code-coverage-{app_channel}" taskcluster_config.auth( - os.environ.get('TASKCLUSTER_CLIENT_ID'), - os.environ.get('TASKCLUSTER_ACCESS_TOKEN'), + os.environ.get("TASKCLUSTER_CLIENT_ID"), os.environ.get("TASKCLUSTER_ACCESS_TOKEN") ) -secrets.load( - os.environ['TASKCLUSTER_SECRET'], -) -queue = taskcluster_config.get_service('queue') +secrets.load(os.environ["TASKCLUSTER_SECRET"]) +queue = taskcluster_config.get_service("queue") def list_commits(tasks): - ''' + """ Read the revision from an existing code coverage task - ''' + """ for task_id in tasks: try: task = queue.task(task_id) - env = task['payload']['env'] - yield env['REPOSITORY'], env['REVISION'] + env = task["payload"]["env"] + yield env["REPOSITORY"], env["REVISION"] except Exception as e: - print('Failed to load task {}: {}'.format(task_id, e)) + print("Failed to load task {}: {}".format(task_id, e)) def trigger_task(task_group_id, repository, commit): - ''' + """ Trigger a code coverage task to build covdir at a specified revision - ''' + """ assert isinstance(commit, str) - name = 'covdir {} - {} - {}'.format(secrets[secrets.APP_CHANNEL], repository, commit) - hooks = taskcluster_config.get_service('hooks') + name = "covdir {} - {} - {}".format( + secrets[secrets.APP_CHANNEL], repository, commit + ) + hooks = taskcluster_config.get_service("hooks") payload = { - 'REPOSITORY': repository, - 'REVISION': commit, - 'taskGroupId': task_group_id, - 'taskName': name, + "REPOSITORY": repository, + "REVISION": commit, + "taskGroupId": task_group_id, + "taskName": name, } hook_id = HOOK_ID.format(app_channel=secrets[secrets.APP_CHANNEL]) return hooks.triggerHook(HOOK_GROUP, hook_id, payload) @@ -54,46 +53,62 @@ def trigger_task(task_group_id, repository, commit): def main(): # CLI args parser = argparse.ArgumentParser() - parser.add_argument('--nb-tasks', type=int, default=5, help='NB of tasks to create') - parser.add_argument('--group', type=str, default=slugId(), help='Task group to create/update') - parser.add_argument('--dry-run', action='store_true', default=False, help='List actions without triggering any new task') - parser.add_argument('tasks', nargs='+', help='Existing tasks to retrigger') + parser.add_argument("--nb-tasks", type=int, default=5, help="NB of tasks to create") + parser.add_argument( + "--group", type=str, default=slugId(), help="Task group to create/update" + ) + parser.add_argument( + "--dry-run", + action="store_true", + default=False, + help="List actions without triggering any new task", + ) + parser.add_argument("tasks", nargs="+", help="Existing tasks to retrigger") args = parser.parse_args() # List existing tags & commits - print('Group', args.group) + print("Group", args.group) try: group = queue.listTaskGroup(args.group) - commits = set([ - (task['task']['payload']['env']['REPOSITORY'], task['task']['payload']['env']['REVISION']) - for task in group['tasks'] - if task['status']['state'] not in ('failed', 'exception') - ]) - print('Found {} commits processed in task group {}'.format(len(commits), args.group)) + commits = set( + [ + ( + task["task"]["payload"]["env"]["REPOSITORY"], + task["task"]["payload"]["env"]["REVISION"], + ) + for task in group["tasks"] + if task["status"]["state"] not in ("failed", "exception") + ] + ) + print( + "Found {} commits processed in task group {}".format( + len(commits), args.group + ) + ) except Exception as e: - print('Invalid task group : {}'.format(e)) + print("Invalid task group : {}".format(e)) commits = set() # Trigger a task for each commit triggered = 0 for repository, commit in list_commits(args.tasks): if (repository, commit) in commits: - print('Skipping existing commit {} {}'.format(repository, commit)) + print("Skipping existing commit {} {}".format(repository, commit)) continue - print('Triggering {} : {}'.format(repository, commit)) + print("Triggering {} : {}".format(repository, commit)) if args.dry_run: - print('>>> No trigger on dry run') + print(">>> No trigger on dry run") else: out = trigger_task(args.group, repository, commit) - print('>>>', out['status']['taskId']) + print(">>>", out["status"]["taskId"]) triggered += 1 commits.add((repository, commit)) if triggered >= args.nb_tasks: - print('Max nb tasks reached !') + print("Max nb tasks reached !") break -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/code_coverage_tools/gcp.py b/tools/code_coverage_tools/gcp.py index 16a21e2fa..6aff4fc97 100644 --- a/tools/code_coverage_tools/gcp.py +++ b/tools/code_coverage_tools/gcp.py @@ -4,16 +4,16 @@ def get_bucket(service_account): - ''' + """ Build a Google Cloud Storage client & bucket from Taskcluster secret - ''' + """ assert isinstance(service_account, dict) # Load credentials from Taskcluster secret - if 'bucket' not in service_account: - raise KeyError('Missing bucket in GOOGLE_CLOUD_STORAGE') - bucket = service_account['bucket'] + if "bucket" not in service_account: + raise KeyError("Missing bucket in GOOGLE_CLOUD_STORAGE") + bucket = service_account["bucket"] # Use those credentials to create a Storage client # The project is needed to avoid checking env variables and crashing diff --git a/tools/code_coverage_tools/log.py b/tools/code_coverage_tools/log.py index 7daed0f4c..8195b88f7 100644 --- a/tools/code_coverage_tools/log.py +++ b/tools/code_coverage_tools/log.py @@ -11,41 +11,41 @@ class UnstructuredRenderer(structlog.processors.KeyValueRenderer): - def __call__(self, logger, method_name, event_dict): event = None - if 'event' in event_dict: - event = event_dict.pop('event') + if "event" in event_dict: + event = event_dict.pop("event") if event_dict or event is None: # if there are other keys, use the parent class to render them # and append to the event rendered = super(UnstructuredRenderer, self).__call__( - logger, method_name, event_dict) - return f'{event} ({rendered})' + logger, method_name, event_dict + ) + return f"{event} ({rendered})" else: return event def setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT): - ''' + """ Setup papertrail account using taskcluster secrets - ''' + """ # Setup papertrail papertrail = logbook.SyslogHandler( - application_name=f'mozilla/release-services/{channel}/{project_name}', + application_name=f"mozilla/release-services/{channel}/{project_name}", address=(PAPERTRAIL_HOST, int(PAPERTRAIL_PORT)), level=logbook.INFO, - format_string='{record.time} {record.channel}: {record.message}', + format_string="{record.time} {record.channel}: {record.message}", bubble=True, ) papertrail.push_application() def setup_sentry(project_name, channel, SENTRY_DSN): - ''' + """ Setup sentry account using taskcluster secrets - ''' + """ import raven import raven.handlers.logbook @@ -53,7 +53,7 @@ def setup_sentry(project_name, channel, SENTRY_DSN): sentry_client = raven.Client( dsn=SENTRY_DSN, site=project_name, - name='mozilla/release-services', + name="mozilla/release-services", environment=channel, # TODO: # release=read(VERSION) we need to promote that as well via secrets @@ -62,26 +62,25 @@ def setup_sentry(project_name, channel, SENTRY_DSN): ) sentry_handler = raven.handlers.logbook.SentryHandler( - sentry_client, - level=logbook.WARNING, - bubble=True, + sentry_client, level=logbook.WARNING, bubble=True ) sentry_handler.push_application() -def init_logger(project_name, - channel=None, - level=logbook.INFO, - PAPERTRAIL_HOST=None, - PAPERTRAIL_PORT=None, - SENTRY_DSN=None - ): +def init_logger( + project_name, + channel=None, + level=logbook.INFO, + PAPERTRAIL_HOST=None, + PAPERTRAIL_PORT=None, + SENTRY_DSN=None, +): if not channel: - channel = os.environ.get('APP_CHANNEL') + channel = os.environ.get("APP_CHANNEL") # Output logs on stderr, with color support on consoles - fmt = '{record.time} [{record.level_name:<8}] {record.channel}: {record.message}' + fmt = "{record.time} [{record.level_name:<8}] {record.channel}: {record.message}" handler = logbook.more.ColorizedStderrHandler(level=level, format_string=fmt) handler.push_application() diff --git a/tools/code_coverage_tools/taskcluster.py b/tools/code_coverage_tools/taskcluster.py index 6cfdcb130..6bf7a74ce 100644 --- a/tools/code_coverage_tools/taskcluster.py +++ b/tools/code_coverage_tools/taskcluster.py @@ -9,6 +9,7 @@ import structlog import taskcluster + try: import toml except ImportError: @@ -16,19 +17,19 @@ logger = structlog.get_logger(__name__) -TASKCLUSTER_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' +TASKCLUSTER_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" def read_hosts(): - ''' + """ Read /etc/hosts to get hostnames on a Nix env (used for taskclusterProxy) Only reads ipv4 entries to avoid duplicates - ''' + """ out = {} - regex = re.compile(r'([\w:\-\.]+)') - for line in open('/etc/hosts').readlines(): - if ':' in line: # only ipv4 + regex = re.compile(r"([\w:\-\.]+)") + for line in open("/etc/hosts").readlines(): + if ":" in line: # only ipv4 continue x = regex.findall(line) if not x: @@ -40,79 +41,81 @@ def read_hosts(): class TaskclusterConfig(object): - ''' + """ Local configuration used to access Taskcluster service and objects - ''' + """ + def __init__(self): self.options = None self.secrets = None def auth(self, client_id=None, access_token=None): - ''' + """ Build Taskcluster credentials options Supports, by order of preference: * directly provided credentials * credentials from local configuration * credentials from environment variables * taskclusterProxy - ''' - self.options = { - 'maxRetries': 12, - } + """ + self.options = {"maxRetries": 12} if client_id is None and access_token is None: # Credentials preference: Use local config from release-services - xdg = os.path.expanduser(os.environ.get('XDG_CONFIG_HOME', '~/.config')) - config = os.path.join(xdg, 'please', 'config.toml') + xdg = os.path.expanduser(os.environ.get("XDG_CONFIG_HOME", "~/.config")) + config = os.path.join(xdg, "please", "config.toml") try: - assert os.path.exists(config), 'No user config available' + assert os.path.exists(config), "No user config available" data = toml.load(open(config)) - client_id = data['common']['taskcluster_client_id'] - access_token = data['common']['taskcluster_access_token'] - assert client_id is not None and access_token is not None, \ - 'Missing values in user folder' - logger.info('Using taskcluster credentials from local configuration') + client_id = data["common"]["taskcluster_client_id"] + access_token = data["common"]["taskcluster_access_token"] + assert ( + client_id is not None and access_token is not None + ), "Missing values in user folder" + logger.info("Using taskcluster credentials from local configuration") except Exception: # Credentials preference: Use env. variables - client_id = os.environ.get('TASKCLUSTER_CLIENT_ID') - access_token = os.environ.get('TASKCLUSTER_ACCESS_TOKEN') - logger.info('Using taskcluster credentials from environment') + client_id = os.environ.get("TASKCLUSTER_CLIENT_ID") + access_token = os.environ.get("TASKCLUSTER_ACCESS_TOKEN") + logger.info("Using taskcluster credentials from environment") else: - logger.info('Using taskcluster credentials from cli') + logger.info("Using taskcluster credentials from cli") if client_id is not None and access_token is not None: # Use provided credentials - self.options['credentials'] = { - 'clientId': client_id, - 'accessToken': access_token, + self.options["credentials"] = { + "clientId": client_id, + "accessToken": access_token, } - self.options['rootUrl'] = 'https://taskcluster.net' + self.options["rootUrl"] = "https://taskcluster.net" else: # Get taskcluster proxy host # as /etc/hosts is not used in the Nix image (?) hosts = read_hosts() - if 'taskcluster' not in hosts: - raise Exception('Missing taskcluster in /etc/hosts') + if "taskcluster" not in hosts: + raise Exception("Missing taskcluster in /etc/hosts") # Load secrets from TC task context # with taskclusterProxy root_url = f"http://{hosts['taskcluster']}" - logger.info('Taskcluster Proxy enabled', url=root_url) - self.options['rootUrl'] = root_url + logger.info("Taskcluster Proxy enabled", url=root_url) + self.options["rootUrl"] = root_url def get_service(self, service_name): - ''' + """ Build a Taskcluster service instance using current authentication - ''' - assert self.options is not None, 'Not authenticated' + """ + assert self.options is not None, "Not authenticated" service = getattr(taskcluster, service_name.capitalize(), None) - assert service is not None, 'Invalid Taskcluster service {}'.format(service_name) + assert service is not None, "Invalid Taskcluster service {}".format( + service_name + ) return service(self.options) def load_secrets(self, name, project_name, required=[], existing=dict()): - ''' + """ Fetch a specific set of secrets by name and verify that the required secrets exist. @@ -122,17 +125,17 @@ def load_secrets(self, name, project_name, required=[], existing=dict()): object - project specific secrets, specified under the `project_name` key in the secrets object - ''' - assert name is not None, 'Missing Taskcluster secret name' + """ + assert name is not None, "Missing Taskcluster secret name" self.secrets = dict() if existing: self.secrets = copy.deepcopy(existing) - secrets_service = self.get_service('secrets') - all_secrets = secrets_service.get(name).get('secret', dict()) - logger.info('Loaded Taskcluster secret', name=name) + secrets_service = self.get_service("secrets") + all_secrets = secrets_service.get(name).get("secret", dict()) + logger.info("Loaded Taskcluster secret", name=name) - secrets_common = all_secrets.get('common', dict()) + secrets_common = all_secrets.get("common", dict()) self.secrets.update(secrets_common) secrets_app = all_secrets.get(project_name, dict()) @@ -140,4 +143,4 @@ def load_secrets(self, name, project_name, required=[], existing=dict()): for required_secret in required: if required_secret not in self.secrets: - raise Exception(f'Missing value {required_secret} in secrets.') + raise Exception(f"Missing value {required_secret} in secrets.") diff --git a/tools/setup.py b/tools/setup.py index ec5dcbaa7..48e04cca7 100644 --- a/tools/setup.py +++ b/tools/setup.py @@ -11,27 +11,31 @@ def read_requirements(file_): with open(file_) as f: for line in f.readlines(): line = line.strip() - if line.startswith('-e ') or line.startswith('http://') or line.startswith('https://'): - extras = '' - if '[' in line: - extras = '[' + line.split('[')[1].split(']')[0] + ']' - line = line.split('#')[1].split('egg=')[1] + extras - elif line == '' or line.startswith('#') or line.startswith('-'): + if ( + line.startswith("-e ") + or line.startswith("http://") + or line.startswith("https://") + ): + extras = "" + if "[" in line: + extras = "[" + line.split("[")[1].split("]")[0] + "]" + line = line.split("#")[1].split("egg=")[1] + extras + elif line == "" or line.startswith("#") or line.startswith("-"): continue - line = line.split('#')[0].strip() + line = line.split("#")[0].strip() lines.append(line) return sorted(list(set(lines))) setuptools.setup( - name='code-coverage-tools', - version='0.1.0', - description='Support tools for Mozilla code coverage', - author='Mozilla Release Management', - author_email='release-mgmt-analysis@mozilla.com', - install_requires=read_requirements('requirements.txt'), + name="code-coverage-tools", + version="0.1.0", + description="Support tools for Mozilla code coverage", + author="Mozilla Release Management", + author_email="release-mgmt-analysis@mozilla.com", + install_requires=read_requirements("requirements.txt"), packages=setuptools.find_packages(), include_package_data=True, zip_safe=False, - license='MPL2', + license="MPL2", )