diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2652040eea..017e65ff62 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -41,7 +41,7 @@ repos: html, ] - repo: https://github.com/scop/pre-commit-shfmt - rev: v3.10.0-1 + rev: v3.10.0-2 hooks: - id: shfmt - repo: https://github.com/adrienverge/yamllint.git @@ -74,7 +74,7 @@ repos: - ".*/generated/" additional_dependencies: ["gibberish-detector"] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.7.2" + rev: "v0.9.1" hooks: - id: ruff-format - id: ruff diff --git a/ai_chat/factories.py b/ai_chat/factories.py index af9509947a..55fcd2b6e8 100644 --- a/ai_chat/factories.py +++ b/ai_chat/factories.py @@ -11,8 +11,8 @@ class ChatMessageFactory(factory.Factory): role = FuzzyChoice(MessageRole.USER, MessageRole.ASSISTANT) content = factory.Faker("sentence") - id = name = factory.Sequence(lambda n: "%d" % n) - index = factory.Sequence(lambda n: "%d" % n) + id = name = factory.Sequence(lambda n: str(n)) + index = factory.Sequence(lambda n: str(n)) class Meta: model = ChatMessage diff --git a/authentication/middleware.py b/authentication/middleware.py index a9c75facb1..e2c54c7283 100644 --- a/authentication/middleware.py +++ b/authentication/middleware.py @@ -38,7 +38,7 @@ def process_exception(self, request, exception): if url: url += ( - "?" in url and "&" or "?" + ("?" in url and "&") or "?" ) + f"message={quote(message)}&backend={backend_name}" return redirect(url) return None diff --git a/channels/serializers_test.py b/channels/serializers_test.py index 2eb8882537..fd3068f2bd 100644 --- a/channels/serializers_test.py +++ b/channels/serializers_test.py @@ -155,7 +155,7 @@ def test_create_channel(base_channel_data, channel_detail, channel_type): """ paths = sorted( (p.learning_resource for p in LearningPathFactory.create_batch(2)), - key=lambda list: list.id, # noqa: A002 + key=lambda lst: lst.id, reverse=True, ) diff --git a/conftest.py b/conftest.py index c569bd3fce..c0ce4157ce 100644 --- a/conftest.py +++ b/conftest.py @@ -11,7 +11,7 @@ @pytest.fixture(autouse=True) -def prevent_requests(mocker, request): # noqa: PT004 +def prevent_requests(mocker, request): """Patch requests to error on request by default""" if "mocked_responses" in request.fixturenames: return diff --git a/fixtures/aws.py b/fixtures/aws.py index a0967415ea..c724a18322 100644 --- a/fixtures/aws.py +++ b/fixtures/aws.py @@ -10,13 +10,13 @@ @pytest.fixture(autouse=True) -def silence_s3_logging(): # noqa: PT004 +def silence_s3_logging(): """Only show S3 errors""" logging.getLogger("botocore").setLevel(logging.ERROR) @pytest.fixture -def mock_s3_fixture(): # noqa: PT004 +def mock_s3_fixture(): """Mock the S3 fixture for the duration of the test""" with mock_aws(): yield diff --git a/fixtures/common.py b/fixtures/common.py index 5da3065c65..8b86e3d7d6 100644 --- a/fixtures/common.py +++ b/fixtures/common.py @@ -22,13 +22,13 @@ @pytest.fixture(autouse=True) -def silence_factory_logging(): # noqa: PT004 +def silence_factory_logging(): """Only show factory errors""" logging.getLogger("factory").setLevel(logging.ERROR) @pytest.fixture(autouse=True) -def warnings_as_errors(): # noqa: PT004 +def warnings_as_errors(): """ Convert warnings to errors. This should only affect unit tests, letting pylint and other plugins raise DeprecationWarnings without erroring. @@ -53,7 +53,7 @@ def warnings_as_errors(): # noqa: PT004 @pytest.fixture -def randomness(): # noqa: PT004 +def randomness(): """Ensure a fixed seed for factoryboy""" factory.fuzzy.reseed_random("happy little clouds") @@ -95,7 +95,7 @@ def mocked_responses(): @pytest.fixture -def offeror_featured_lists(): # noqa: PT004 +def offeror_featured_lists(): """Generate featured offeror lists for testing""" for offered_by in OfferedBy.names(): offeror = LearningResourceOfferorFactory.create(code=offered_by) diff --git a/learning_resources/etl/loaders_test.py b/learning_resources/etl/loaders_test.py index 8c26c297a2..e3a7d0888c 100644 --- a/learning_resources/etl/loaders_test.py +++ b/learning_resources/etl/loaders_test.py @@ -374,8 +374,8 @@ def test_load_course( # noqa: PLR0913,PLR0912,PLR0915 "start_date": old_run.start_date, "end_date": old_run.end_date, "prices": [ - {"amount": Decimal(30.00), "currency": CURRENCY_USD}, - {"amount": Decimal(120.00), "currency": CURRENCY_USD}, + {"amount": Decimal("30.00"), "currency": CURRENCY_USD}, + {"amount": Decimal("120.00"), "currency": CURRENCY_USD}, ], }, { @@ -384,8 +384,8 @@ def test_load_course( # noqa: PLR0913,PLR0912,PLR0915 "start_date": start_date, "end_date": run.end_date, "prices": [ - {"amount": Decimal(0.00), "currency": CURRENCY_USD}, - {"amount": Decimal(49.00), "currency": CURRENCY_USD}, + {"amount": Decimal("0.00"), "currency": CURRENCY_USD}, + {"amount": Decimal("49.00"), "currency": CURRENCY_USD}, ], }, ] @@ -403,12 +403,12 @@ def test_load_course( # noqa: PLR0913,PLR0912,PLR0915 ) assert result.next_start_date == expected_next_start_date assert result.prices == ( - [Decimal(0.00), Decimal(49.00)] + [Decimal("0.00"), Decimal("49.00")] if is_run_published and result.certification else [] ) assert [price.amount for price in result.resource_prices.all()] == ( - [Decimal(0.00), Decimal(49.00)] + [Decimal("0.00"), Decimal("49.00")] if is_run_published and result.certification else [] ) @@ -563,9 +563,9 @@ def test_load_duplicate_course( for key, value in props.items(): assert getattr(result, key) == value, f"Property {key} should equal {value}" - assert ( - getattr(saved_course, key) == value - ), f"Property {key} should be updated to {value} in the database" + assert getattr(saved_course, key) == value, ( + f"Property {key} should be updated to {value} in the database" + ) @pytest.mark.parametrize("unique_url", [True, False]) @@ -695,7 +695,7 @@ def test_load_run(run_exists, status, certification): if run_exists else LearningResourceRunFactory.build() ) - prices = [Decimal(70.00), Decimal(20.00)] + prices = [Decimal("70.00"), Decimal("20.00")] props = model_to_dict( LearningResourceRunFactory.build( run_id=learning_resource_run.run_id, @@ -964,9 +964,9 @@ def test_load_content_file(): assert loaded_file.run == learning_resource_run for key, value in props.items(): - assert ( - getattr(loaded_file, key) == value - ), f"Property {key} should equal {value}" + assert getattr(loaded_file, key) == value, ( + f"Property {key} should equal {value}" + ) def test_load_image(): diff --git a/learning_resources/etl/mitpe_test.py b/learning_resources/etl/mitpe_test.py index 5481c62105..2609aeaa30 100644 --- a/learning_resources/etl/mitpe_test.py +++ b/learning_resources/etl/mitpe_test.py @@ -55,7 +55,7 @@ 2123, 4, 25, 4, 0, tzinfo=datetime.UTC ), "published": True, - "prices": [{"amount": Decimal(1870), "currency": "USD"}], + "prices": [{"amount": Decimal("1870"), "currency": "USD"}], "url": "https://professional.mit.edu/course-catalog/comunicacao-persuasiva-pensamento-critico-para-aprimorar-mensagem-portuguese", "instructors": [{"full_name": "Edward Schiappa"}, {"full_name": ""}], "format": [Format.asynchronous.name], @@ -102,7 +102,7 @@ 2123, 6, 17, 4, 0, tzinfo=datetime.UTC ), "published": True, - "prices": [{"amount": Decimal(3600), "currency": "USD"}], + "prices": [{"amount": Decimal("3600"), "currency": "USD"}], "url": "https://professional.mit.edu/course-catalog/design-thinking-and-innovation-technical-leaders", "instructors": [ {"full_name": "Blade Kotelly"}, @@ -154,7 +154,7 @@ 2123, 7, 6, 4, 0, tzinfo=datetime.UTC ), "published": True, - "prices": [{"amount": Decimal(1870), "currency": "USD"}], + "prices": [{"amount": Decimal("1870"), "currency": "USD"}], "url": "https://professional.mit.edu/course-catalog/manufatura-inteligente-producao-na-industria-40-portuguese", "instructors": [{"full_name": ""}, {"full_name": "Brian Anthony"}], "format": [Format.asynchronous.name], diff --git a/learning_resources/etl/mitxonline.py b/learning_resources/etl/mitxonline.py index b457731da0..4968666bc6 100644 --- a/learning_resources/etl/mitxonline.py +++ b/learning_resources/etl/mitxonline.py @@ -225,7 +225,7 @@ def _transform_run(course_run: dict, course: dict) -> dict: transform_price(price) for price in sorted( { - Decimal(0.00), + Decimal("0.00"), *[ Decimal(price) for price in [ diff --git a/learning_resources/etl/openedx.py b/learning_resources/etl/openedx.py index d4256d2d77..6ddd567b22 100644 --- a/learning_resources/etl/openedx.py +++ b/learning_resources/etl/openedx.py @@ -378,9 +378,9 @@ def _transform_course_commitment(course_run) -> CommitmentConfig: ) if min_effort or max_effort: return CommitmentConfig( - commitment=f"{ - commit_str_prefix}{max_effort or min_effort - } hour{'s' if max_effort > 1 else ''}/week", + commitment=f"{commit_str_prefix}{max_effort or min_effort} hour{ + 's' if max_effort > 1 else '' + }/week", min_weekly_hours=min(min_effort, max_effort), max_weekly_hours=max(min_effort, max_effort), ) @@ -411,7 +411,7 @@ def _get_course_price(course): if seat["price"] != "0.00" ] ), run.get("seats", [{}])[0].get("currency", CURRENCY_USD) - return Decimal(0.00), CURRENCY_USD + return Decimal("0.00"), CURRENCY_USD prices_currencies = [ _get_course_price(course) for course in program.get("courses", []) @@ -464,7 +464,7 @@ def _transform_course_run(config, course_run, course_last_modified, marketing_ur transform_price(price, currency) for (price, currency) in sorted( { - (Decimal(0.00), CURRENCY_USD), + (Decimal("0.00"), CURRENCY_USD), *[ (Decimal(seat.get("price")), seat.get("currency")) for seat in course_run.get("seats", []) diff --git a/learning_resources/etl/podcast_test.py b/learning_resources/etl/podcast_test.py index b48cb1b693..feb6948914 100644 --- a/learning_resources/etl/podcast_test.py +++ b/learning_resources/etl/podcast_test.py @@ -53,9 +53,9 @@ def mock_podcast_file( # pylint: disable=too-many-arguments # noqa: PLR0913 content = f"""--- rss_url: {rss_url} -{ "podcast_title: " + podcast_title if podcast_title else "" } -{ "topics: " + topics if topics else "" } -{ "offered_by: " + offered_by if offered_by else "" } +{"podcast_title: " + podcast_title if podcast_title else ""} +{"topics: " + topics if topics else ""} +{"offered_by: " + offered_by if offered_by else ""} website: {website_url} google_podcasts_url: {google_podcasts_url} apple_podcasts_url: {apple_podcasts_url} @@ -64,7 +64,7 @@ def mock_podcast_file( # pylint: disable=too-many-arguments # noqa: PLR0913 @pytest.fixture -def mock_rss_request(mocker): # noqa: PT004 +def mock_rss_request(mocker): """ Mock request data """ @@ -76,7 +76,7 @@ def mock_rss_request(mocker): # noqa: PT004 @pytest.fixture -def mock_rss_request_with_bad_rss_file(mocker): # noqa: PT004 +def mock_rss_request_with_bad_rss_file(mocker): """ Mock request data """ diff --git a/learning_resources/etl/posthog.py b/learning_resources/etl/posthog.py index 362ac3abed..1f0f633850 100644 --- a/learning_resources/etl/posthog.py +++ b/learning_resources/etl/posthog.py @@ -246,8 +246,7 @@ def load_posthog_lrd_view_event( return None except ValueError: skip_warning = ( - f"WARNING: skipping event for resource ID {event.resourceId}" - " - invalid ID" + f"WARNING: skipping event for resource ID {event.resourceId} - invalid ID" ) log.warning(skip_warning) return None diff --git a/learning_resources/etl/prolearn.py b/learning_resources/etl/prolearn.py index 68ffc70fdf..b943b22eff 100644 --- a/learning_resources/etl/prolearn.py +++ b/learning_resources/etl/prolearn.py @@ -266,7 +266,7 @@ def transform_programs(programs: list[dict]) -> list[dict]: runs = _transform_runs(program) if platform and runs: transformed_program = { - "readable_id": f'prolearn-{platform}-{program["nid"]}', + "readable_id": f"prolearn-{platform}-{program['nid']}", "title": program["title"], "description": clean_data(program["body"]), "offered_by": {"name": offered_by.name} if offered_by else None, @@ -328,7 +328,7 @@ def _transform_runs(resource: dict) -> list[dict]: if start_date and start_date >= now_in_utc(): runs.append( { - "run_id": f'{resource["nid"]}_{start_value}', + "run_id": f"{resource['nid']}_{start_value}", "title": resource["title"], "image": parse_image(resource), "description": clean_data(resource["body"]), @@ -361,7 +361,7 @@ def _transform_course( runs = _transform_runs(course) if len(runs) > 0: return { - "readable_id": f'prolearn-{platform}-{course["nid"]}', + "readable_id": f"prolearn-{platform}-{course['nid']}", "offered_by": {"name": offered_by.name} if offered_by else None, "platform": platform, "etl_source": ETLSource.prolearn.name, diff --git a/learning_resources/etl/prolearn_test.py b/learning_resources/etl/prolearn_test.py index 8b4adb6e6b..260c3c639f 100644 --- a/learning_resources/etl/prolearn_test.py +++ b/learning_resources/etl/prolearn_test.py @@ -297,8 +297,8 @@ def test_parse_date(date_int, expected_dt): @pytest.mark.parametrize( ("price_str", "expected_price"), [ - ["$5,342", round(Decimal(5342), 2)], # noqa: PT007 - ["5.34", round(Decimal(5.34), 2)], # noqa: PT007 + ["$5,342", round(Decimal("5342"), 2)], # noqa: PT007 + ["5.34", round(Decimal("5.34"), 2)], # noqa: PT007 [None, None], # noqa: PT007 ["", None], # noqa: PT007 ], diff --git a/learning_resources/etl/sloan_test.py b/learning_resources/etl/sloan_test.py index e2d2886a58..03dbe51f88 100644 --- a/learning_resources/etl/sloan_test.py +++ b/learning_resources/etl/sloan_test.py @@ -37,7 +37,7 @@ @pytest.fixture(autouse=True) -def mock_sloan_api_setting(settings): # noqa: PT004 +def mock_sloan_api_setting(settings): """Set the prolearn api url""" settings.SEE_API_URL = "http://localhost/test/programs/api" settings.SEE_API_CLIENT_ID = "test" diff --git a/learning_resources/etl/utils.py b/learning_resources/etl/utils.py index e0aed9d992..504838ab12 100644 --- a/learning_resources/etl/utils.py +++ b/learning_resources/etl/utils.py @@ -367,7 +367,7 @@ def text_from_srt_content(content: str): def text_from_sjson_content(content: str): """ - return text from sjson content + Return text from sjson content Args: content (str): The sjson content diff --git a/learning_resources/etl/youtube_test.py b/learning_resources/etl/youtube_test.py index 7f3b1ccefd..16f0724db3 100644 --- a/learning_resources/etl/youtube_test.py +++ b/learning_resources/etl/youtube_test.py @@ -111,7 +111,7 @@ def mock_channel_file(content): @pytest.fixture -def mocked_github_channel_response(mocker): # noqa: PT004 +def mocked_github_channel_response(mocker): """Mock response from github api requst to open-video-data""" mock_file = mock_channel_file( diff --git a/learning_resources/factories.py b/learning_resources/factories.py index e3d7c9ca47..9aaed9a1be 100644 --- a/learning_resources/factories.py +++ b/learning_resources/factories.py @@ -64,7 +64,7 @@ def _post_gen_tags(obj, create, extracted, **kwargs): # noqa: ARG001 class LearningResourceContentTagFactory(DjangoModelFactory): """Factory for LearningResourceContentTag objects""" - name = factory.Sequence(lambda n: "Tag %03d" % n) + name = factory.Sequence(lambda n: f"Tag {n:03d}") class Meta: model = models.LearningResourceContentTag @@ -86,7 +86,7 @@ class Meta: class LearningResourceTopicFactory(DjangoModelFactory): """Factory for learning resource topics""" - name = factory.Sequence(lambda n: "Topic %03d" % n) + name = factory.Sequence(lambda n: f"Topic {n:03d}") class Meta: model = models.LearningResourceTopic @@ -137,7 +137,7 @@ class Meta: class LearningResourceSchoolFactory(DjangoModelFactory): """Factory for LearningResourceDepartment""" - name = factory.Sequence(lambda n: "%03d name" % n) + name = factory.Sequence(lambda n: f"{n:03d} name") url = factory.Faker("url") class Meta: @@ -147,8 +147,8 @@ class Meta: class LearningResourceDepartmentFactory(DjangoModelFactory): """Factory for LearningResourceDepartment""" - department_id = factory.Sequence(lambda n: "%03d" % n) - name = factory.Sequence(lambda n: "%03d name" % n) + department_id = factory.Sequence(lambda n: f"{n:03d}") + name = factory.Sequence(lambda n: f"{n:03d} name") school = factory.SubFactory( "learning_resources.factories.LearningResourceSchoolFactory" ) @@ -198,7 +198,7 @@ class LearningResourceFactory(DjangoModelFactory): choices=constants.LearningResourceType.names() ) readable_id = factory.Sequence( - lambda n: "RESOURCEN%03d_%03d.MIT" % (n, random.randint(1, 1000)) # noqa: S311 + lambda n: f"RESOURCEN{n:03d}_{random.randint(1, 1000)}.MIT" # noqa: S311 ) etl_source = "mock" title = factory.Faker("word") @@ -405,7 +405,7 @@ class CourseFactory(DjangoModelFactory): course_numbers = factory.List( [ { - "value": f"{random.randint(1,20)}.0001", # noqa: S311 + "value": f"{random.randint(1, 20)}.0001", # noqa: S311 "department": None, "listing_type": CourseNumberType.primary.name, "primary": True, @@ -493,7 +493,7 @@ class LearningResourceRunFactory(DjangoModelFactory): ] ), ) - run_id = factory.Sequence(lambda n: "RUN%03d.MIT_run" % n) + run_id = factory.Sequence(lambda n: f"RUN{n:03d}.MIT_run") title = factory.Faker("word") description = factory.Faker("sentence") full_description = factory.Faker("text") @@ -839,7 +839,7 @@ class VideoFactory(DjangoModelFactory): is_video=True, create_video=False, ) - duration = factory.Sequence(lambda n: "PT%02dM%02dS" % (n, n)) + duration = factory.Sequence(lambda n: f"PT{n:02d}M{n:02d}S") class Meta: model = models.Video @@ -852,7 +852,7 @@ class Params: class VideoChannelFactory(DjangoModelFactory): """Factory for VideoChannels""" - channel_id = factory.Sequence(lambda n: "VIDEO-CHANNEL-%03d.MIT" % n) + channel_id = factory.Sequence(lambda n: f"VIDEO-CHANNEL-{n:03d}.MIT") title = factory.Faker("word") class Params: diff --git a/learning_resources/filters.py b/learning_resources/filters.py index bd23cd4929..38b65be0d5 100644 --- a/learning_resources/filters.py +++ b/learning_resources/filters.py @@ -132,7 +132,7 @@ def filter_free(self, queryset, _, value): free_filter = ( Q(runs__isnull=True) | Q(runs__resource_prices__isnull=True) - | Q(runs__resource_prices__amount=Decimal(0.00)) + | Q(runs__resource_prices__amount=Decimal("0.00")) ) & Q(professional=False) if value: # Free resources diff --git a/learning_resources/filters_test.py b/learning_resources/filters_test.py index 3f5aa10f3c..8564612a26 100644 --- a/learning_resources/filters_test.py +++ b/learning_resources/filters_test.py @@ -237,15 +237,15 @@ def test_learning_resource_filter_free(client): ) LearningResourceRunFactory.create( learning_resource=free_course - ).resource_prices.set([LearningResourcePriceFactory.create(amount=Decimal(0.00))]) + ).resource_prices.set([LearningResourcePriceFactory.create(amount=Decimal("0.00"))]) paid_course = LearningResourceFactory.create(is_course=True, runs=[]) LearningResourceRunFactory.create( learning_resource=paid_course ).resource_prices.set( [ - LearningResourcePriceFactory.create(amount=Decimal(50.00)), - LearningResourcePriceFactory.create(amount=Decimal(100.00)), + LearningResourcePriceFactory.create(amount=Decimal("50.00")), + LearningResourcePriceFactory.create(amount=Decimal("100.00")), ] ) @@ -256,8 +256,8 @@ def test_learning_resource_filter_free(client): learning_resource=free2pay_course ).resource_prices.set( [ - LearningResourcePriceFactory.create(amount=Decimal(0.00)), - LearningResourcePriceFactory.create(amount=Decimal(100.00)), + LearningResourcePriceFactory.create(amount=Decimal("0.00")), + LearningResourcePriceFactory.create(amount=Decimal("100.00")), ] ) @@ -312,7 +312,7 @@ def test_learning_resource_filter_readable_id(client): courses = CourseFactory.create_batch(5) resource = courses[0].learning_resource results = client.get( - f"{RESOURCE_API_URL}?{urlencode({'readable_id':resource.readable_id})}" + f"{RESOURCE_API_URL}?{urlencode({'readable_id': resource.readable_id})}" ).json()["results"] assert len(results) == 1 assert results[0]["readable_id"] == resource.readable_id @@ -323,7 +323,7 @@ def test_course_filter_readable_id(client): courses = CourseFactory.create_batch(5) resource = courses[0].learning_resource results = client.get( - f"{COURSE_API_URL}?{urlencode({'readable_id':resource.readable_id})}" + f"{COURSE_API_URL}?{urlencode({'readable_id': resource.readable_id})}" ).json()["results"] assert len(results) == 1 assert results[0]["readable_id"] == resource.readable_id @@ -334,7 +334,7 @@ def test_podcast_filter_readable_id(client): podcasts = PodcastFactory.create_batch(5) resource = podcasts[0].learning_resource results = client.get( - f"{PODCAST_API_URL}?{urlencode({'readable_id':resource.readable_id})}" + f"{PODCAST_API_URL}?{urlencode({'readable_id': resource.readable_id})}" ).json()["results"] assert len(results) == 1 assert results[0]["readable_id"] == resource.readable_id @@ -345,7 +345,7 @@ def test_podcast_episode_filter_readable_id(client): podcast_episodes = PodcastEpisodeFactory.create_batch(5) resource = podcast_episodes[0].learning_resource results = client.get( - f"{PODCAST_EPISODE_API_URL}?{urlencode({'readable_id':resource.readable_id})}" + f"{PODCAST_EPISODE_API_URL}?{urlencode({'readable_id': resource.readable_id})}" ).json()["results"] assert len(results) == 1 assert results[0]["readable_id"] == resource.readable_id @@ -356,7 +356,7 @@ def test_video_filter_readable_id(client): videos = VideoFactory.create_batch(5) resource = videos[0].learning_resource results = client.get( - f"{VIDEOS_API_URL}?{urlencode({'readable_id':resource.readable_id})}" + f"{VIDEOS_API_URL}?{urlencode({'readable_id': resource.readable_id})}" ).json()["results"] assert len(results) == 1 assert results[0]["readable_id"] == resource.readable_id @@ -367,7 +367,7 @@ def test_video_playlist_filter_readable_id(client): channels = VideoPlaylistFactory.create_batch(5) resource = channels[0].learning_resource results = client.get( - f"{VIDEO_PLAYLISTS_API_URL}?{urlencode({'readable_id':resource.readable_id})}" + f"{VIDEO_PLAYLISTS_API_URL}?{urlencode({'readable_id': resource.readable_id})}" ).json()["results"] assert len(results) == 1 assert results[0]["readable_id"] == resource.readable_id diff --git a/learning_resources/migrations/0040_add_lrd_view_collection_table.py b/learning_resources/migrations/0040_add_lrd_view_collection_table.py index 2db5b440ed..66f750e801 100644 --- a/learning_resources/migrations/0040_add_lrd_view_collection_table.py +++ b/learning_resources/migrations/0040_add_lrd_view_collection_table.py @@ -29,8 +29,7 @@ class Migration(migrations.Migration): models.DateTimeField( editable=False, help_text=( - "The date of the lrd_view event, as " - "collected by PostHog." + "The date of the lrd_view event, as collected by PostHog." ), ), ), diff --git a/learning_resources/permissions_test.py b/learning_resources/permissions_test.py index d094e51fa8..684a4eb506 100644 --- a/learning_resources/permissions_test.py +++ b/learning_resources/permissions_test.py @@ -24,7 +24,7 @@ @pytest.fixture(autouse=True) -def drf_settings(settings): # noqa: PT004 +def drf_settings(settings): """Default drf prefix setting""" # noqa: D401 settings.DRF_NESTED_PARENT_LOOKUP_PREFIX = "" diff --git a/learning_resources/serializers.py b/learning_resources/serializers.py index cfc241e360..01f3a23786 100644 --- a/learning_resources/serializers.py +++ b/learning_resources/serializers.py @@ -488,7 +488,7 @@ def get_free(self, instance) -> bool: ]: prices = [price.amount for price in instance.resource_prices.all()] return not instance.professional and ( - Decimal(0.00) in prices or not prices or prices == [] + Decimal("0.00") in prices or not prices or prices == [] ) else: return True diff --git a/learning_resources/serializers_test.py b/learning_resources/serializers_test.py index bd4622d91e..442cbad8ef 100644 --- a/learning_resources/serializers_test.py +++ b/learning_resources/serializers_test.py @@ -236,7 +236,7 @@ def test_learning_resource_serializer( # noqa: PLR0913 and ( not resource.resource_prices.all() or all( - price.amount == Decimal(0.00) + price.amount == Decimal("0.00") for price in resource.resource_prices.all() ) ) diff --git a/learning_resources/views_learningpath_test.py b/learning_resources/views_learningpath_test.py index a7b1d032f5..f7336d2f74 100644 --- a/learning_resources/views_learningpath_test.py +++ b/learning_resources/views_learningpath_test.py @@ -428,7 +428,7 @@ def test_set_learning_path_relationships(client, staff_user): ) client.force_login(staff_user) resp = client.patch( - f"{url}?{"".join([f"learning_path_id={learning_path.learning_resource.id}&" for learning_path in learning_paths])}" + f"{url}?{''.join([f'learning_path_id={learning_path.learning_resource.id}&' for learning_path in learning_paths])}" ) assert resp.status_code == 200 for learning_path in learning_paths: @@ -465,7 +465,7 @@ def test_adding_to_learning_path_not_effect_existing_membership(client, staff_us client.force_login(staff_user) lps = [existing_parent, new_additional_parent] resp = client.patch( - f"{url}?{"".join([f"learning_path_id={lp.learning_resource.id}&" for lp in lps])}" + f"{url}?{''.join([f'learning_path_id={lp.learning_resource.id}&' for lp in lps])}" ) assert resp.status_code == 200 diff --git a/learning_resources/views_userlist_test.py b/learning_resources/views_userlist_test.py index 773efd4d81..4102745f1b 100644 --- a/learning_resources/views_userlist_test.py +++ b/learning_resources/views_userlist_test.py @@ -323,7 +323,7 @@ def test_set_userlist_relationships(client, user): ) client.force_login(user) resp = client.patch( - f"{url}?{"".join([f"userlist_id={userlist.id}&" for userlist in userlists])}" + f"{url}?{''.join([f'userlist_id={userlist.id}&' for userlist in userlists])}" ) assert resp.status_code == 200 for userlist in userlists: @@ -342,7 +342,7 @@ def test_set_userlist_relationships_unauthorized(client, user): client.force_login(user) with pytest.raises(PermissionError): client.patch( - f"{url}?{"".join([f"userlist_id={userlist.id}&" for userlist in userlists])}" + f"{url}?{''.join([f'userlist_id={userlist.id}&' for userlist in userlists])}" ) for userlist in userlists: assert not userlist.resources.filter(id=course.learning_resource.id).exists() @@ -399,7 +399,7 @@ def test_adding_to_userlist_not_effect_existing_membership(client, user): client.force_login(user) lists = [existing_parent, new_additional_parent] resp = client.patch( - f"{url}?{"".join([f"userlist_id={userlist.id}&" for userlist in lists])}" + f"{url}?{''.join([f'userlist_id={userlist.id}&' for userlist in lists])}" ) assert resp.status_code == 200 diff --git a/learning_resources_search/indexing_api_test.py b/learning_resources_search/indexing_api_test.py index cfae97d13e..3931afd6ec 100644 --- a/learning_resources_search/indexing_api_test.py +++ b/learning_resources_search/indexing_api_test.py @@ -222,7 +222,7 @@ def test_index_learning_resources( index_types, ): """ - index functions should call bulk with correct arguments + Index functions should call bulk with correct arguments """ settings.OPENSEARCH_INDEXING_CHUNK_SIZE = 3 documents = ["doc1", "doc2", "doc3", "doc4", "doc5"] @@ -602,7 +602,7 @@ def test_bulk_index_content_files( # noqa: PLR0913 document_indexing_chunk_size, ): """ - index functions for content files should call bulk with correct arguments + Index functions for content files should call bulk with correct arguments """ settings.OPENSEARCH_INDEXING_CHUNK_SIZE = indexing_chunk_size settings.OPENSEARCH_DOCUMENT_INDEXING_CHUNK_SIZE = document_indexing_chunk_size @@ -685,7 +685,7 @@ def test_index_content_files( # noqa: PLR0913 doc, ): """ - index functions for content files should call bulk with correct arguments + Index functions for content files should call bulk with correct arguments """ settings.OPENSEARCH_INDEXING_CHUNK_SIZE = 6 course = CourseFactory.create() diff --git a/learning_resources_search/serializers.py b/learning_resources_search/serializers.py index a0903b0891..d993f87a0b 100644 --- a/learning_resources_search/serializers.py +++ b/learning_resources_search/serializers.py @@ -387,8 +387,7 @@ class LearningResourcesSearchRequestSerializer(SearchRequestSerializer): course_feature = serializers.ListField( required=False, child=serializers.CharField(), - help_text="The course feature. " - "Possible options are at api/v1/course_features/", + help_text="The course feature. Possible options are at api/v1/course_features/", ) aggregations = serializers.ListField( required=False, diff --git a/learning_resources_search/serializers_test.py b/learning_resources_search/serializers_test.py index de0430547c..8460d699dd 100644 --- a/learning_resources_search/serializers_test.py +++ b/learning_resources_search/serializers_test.py @@ -101,8 +101,7 @@ "image": None, "run_id": "course-v1:xPRO+MCPO+R1", "title": ( - "Project Management: Leading Organizations to" - " Success" + "Project Management: Leading Organizations to Success" ), "description": None, "full_description": None, diff --git a/main/middleware/feature_flags_test.py b/main/middleware/feature_flags_test.py index 11b6afca62..e6817d396f 100644 --- a/main/middleware/feature_flags_test.py +++ b/main/middleware/feature_flags_test.py @@ -14,7 +14,7 @@ @pytest.fixture -def middleware_settings(settings): # noqa: PT004 +def middleware_settings(settings): """Default settings for middleware""" # noqa: D401 settings.MIDDLEWARE_FEATURE_FLAG_QS_PREFIX = "ZZ" settings.MIDDLEWARE_FEATURE_FLAG_COOKIE_NAME = FEATURE_FLAG_COOKIE_NAME diff --git a/main/utils_test.py b/main/utils_test.py index 0a338bcecc..ff2e4a5319 100644 --- a/main/utils_test.py +++ b/main/utils_test.py @@ -60,7 +60,7 @@ def test_normalize_to_start_of_day(): def test_chunks(): """ - test for chunks + Test for chunks """ input_list = list(range(113)) output_list = [] @@ -81,7 +81,7 @@ def test_chunks(): def test_chunks_iterable(): """ - test that chunks works on non-list iterables too + Test that chunks works on non-list iterables too """ count = 113 input_range = range(count) diff --git a/news_events/etl/sloan_exec_news.py b/news_events/etl/sloan_exec_news.py index 6b3e2d3fe0..fec1ce149a 100644 --- a/news_events/etl/sloan_exec_news.py +++ b/news_events/etl/sloan_exec_news.py @@ -110,7 +110,7 @@ def transform_item(item_data: dict) -> dict: ), "url": urljoin( SLOAN_EXEC_ARTICLE_PREFIX_URL, - f'{item_data.get("contentUrlName")}-{item_data.get("managedContentId")}', + f"{item_data.get('contentUrlName')}-{item_data.get('managedContentId')}", ), "image": { "url": urljoin( diff --git a/news_events/etl/utils.py b/news_events/etl/utils.py index ba54fe9114..c1ae5c5871 100644 --- a/news_events/etl/utils.py +++ b/news_events/etl/utils.py @@ -260,8 +260,7 @@ def parse_date_time_range( f"{start_time.ampm} {start_time.tz}" ) or dateparser.parse(f"{start_date_str} {default_time_str}") end_date = dateparser.parse( - f"{end_date_str} {end_time.hour}{end_time.minute} " - f"{end_time.ampm} {end_time.tz}" + f"{end_date_str} {end_time.hour}{end_time.minute} {end_time.ampm} {end_time.tz}" ) or dateparser.parse(f"{end_date_str} {default_time_str}") if end_date and start_date and end_date < start_date: diff --git a/news_events/factories.py b/news_events/factories.py index 6b70e39051..688a434c98 100644 --- a/news_events/factories.py +++ b/news_events/factories.py @@ -53,7 +53,7 @@ class FeedItemFactory(factory.django.DjangoModelFactory): """Factory for feed items""" source = factory.SubFactory(FeedSourceFactory) - guid = factory.Sequence(lambda n: "http://feed.mit.edu/%03d/rss" % n) + guid = factory.Sequence(lambda n: f"http://feed.mit.edu/{n:03d}/rss") title = factory.Faker("word") url = factory.Faker("url") summary = factory.Faker("paragraph") diff --git a/vector_search/serializers.py b/vector_search/serializers.py index 75cc62ec63..22b416f143 100644 --- a/vector_search/serializers.py +++ b/vector_search/serializers.py @@ -123,8 +123,7 @@ class LearningResourcesVectorSearchRequestSerializer(serializers.Serializer): course_feature = serializers.ListField( required=False, child=serializers.CharField(), - help_text="The course feature. " - "Possible options are at api/v1/course_features/", + help_text="The course feature. Possible options are at api/v1/course_features/", ) delivery_choices = LearningResourceDelivery.as_list() @@ -227,8 +226,7 @@ class ContentFileVectorSearchRequestSerializer(serializers.Serializer): required=False, child=serializers.CharField(), help_text=( - "The readable_id value of the parent learning " - "resource for the content file" + "The readable_id value of the parent learning resource for the content file" ), ) diff --git a/vector_search/utils.py b/vector_search/utils.py index bc0b310e48..cb87ffede2 100644 --- a/vector_search/utils.py +++ b/vector_search/utils.py @@ -168,8 +168,7 @@ def _process_resource_embeddings(serialized_resources): metadata.append(doc) ids.append(vector_point_id(vector_point_key)) docs.append( - f'{doc.get("title")} {doc.get("description")} ' - f'{doc.get("full_description")}' + f"{doc.get('title')} {doc.get('description')} {doc.get('full_description')}" ) embeddings = encoder.encode_batch(docs) return points_generator(ids, metadata, embeddings, vector_name) @@ -239,7 +238,7 @@ def _process_content_embeddings(serialized_content): split_ids = [ vector_point_id( - f'{doc['resource_readable_id']}.{doc['run_readable_id']}.{doc['key']}.{md["chunk_number"]}' + f"{doc['resource_readable_id']}.{doc['run_readable_id']}.{doc['key']}.{md['chunk_number']}" ) for md in split_metadatas ]