diff --git a/docs/examples/api-reference/client.py b/docs/examples/api-reference/client.py index 73ed77c91..554e5f260 100644 --- a/docs/examples/api-reference/client.py +++ b/docs/examples/api-reference/client.py @@ -1,9 +1,9 @@ import unittest from datetime import datetime +from typing import Optional import pandas as pd import requests -from typing import Optional from fennel.datasets import dataset, field from fennel.featuresets import feature, featureset, extractor @@ -102,12 +102,10 @@ def test_dag_resolution(self, client): assert response.status_code == requests.codes.OK, response.json() # /docsnip - # docsnip extract_features_api - feature_df = client.extract_features( - output_feature_list=[ - UserFeatures, - ], - input_feature_list=[UserFeatures.userid], + # docsnip extract_api + feature_df = client.extract( + outputs=[UserFeatures], + inputs=[UserFeatures.userid], input_dataframe=pd.DataFrame( {"UserFeatures.userid": [18232, 18234]} ), @@ -115,12 +113,10 @@ def test_dag_resolution(self, client): self.assertEqual(feature_df.shape, (2, 7)) # /docsnip - # docsnip extract_historical_features_api - response = client.extract_historical_features( - output_feature_list=[ - UserFeatures, - ], - input_feature_list=[UserFeatures.userid], + # docsnip extract_historical_api + response = client.extract_historical( + outputs=[UserFeatures], + inputs=[UserFeatures.userid], format="pandas", input_dataframe=pd.DataFrame( {"UserFeatures.userid": [18232, 18234], "timestamp": [now, now]} @@ -130,7 +126,7 @@ def test_dag_resolution(self, client): # /docsnip with self.assertRaises(NotImplementedError) as e: - # docsnip extract_historical_features_s3 + # docsnip extract_historical_s3 from fennel.sources import S3 s3 = S3( @@ -143,11 +139,9 @@ def test_dag_resolution(self, client): ) s3_output_connection = s3.bucket("bucket", prefix="output") - response = client.extract_historical_features( - output_feature_list=[ - UserFeatures, - ], - input_feature_list=[UserFeatures.userid], + response = client.extract_historical( + outputs=[UserFeatures], + inputs=[UserFeatures.userid], format="csv", timestamp_column="timestamp", input_s3=s3_input_connection, diff --git a/docs/examples/api-reference/rest-api.py b/docs/examples/api-reference/rest-api.py index 2c9474fdb..953a75339 100644 --- a/docs/examples/api-reference/rest-api.py +++ b/docs/examples/api-reference/rest-api.py @@ -45,8 +45,8 @@ def test_log(self, mock_post): assert response.status_code == requests.codes.OK, response.json() # /docsnip - # docsnip rest_extract_features_api - url = "{}/api/v1/extract_features".format(SERVER) + # docsnip rest_extract_api + url = "{}/api/v1/extract".format(SERVER) headers = {"Content-Type": "application/json"} data = [ {"UserFeatures.userid": 1}, @@ -54,8 +54,8 @@ def test_log(self, mock_post): {"UserFeatures.userid": 3}, ] req = { - "output_features": ["UserFeatures"], - "input_features": ["UserFeatures.userid"], + "outputs": ["UserFeatures"], + "inputs": ["UserFeatures.userid"], "data": data, "log": True, "workflow": "test", @@ -65,13 +65,13 @@ def test_log(self, mock_post): assert response.status_code == requests.codes.OK, response.json() # /docsnip - # docsnip rest_extract_features_api_columnar - url = "{}/api/v1/extract_features".format(SERVER) + # docsnip rest_extract_api_columnar + url = "{}/api/v1/extract".format(SERVER) headers = {"Content-Type": "application/json"} data = {"UserFeatures.userid": [1, 2, 3]} req = { - "output_features": ["UserFeatures"], - "input_features": ["UserFeatures.userid"], + "outputs": ["UserFeatures"], + "inputs": ["UserFeatures.userid"], "data": data, "log": True, "workflow": "test", diff --git a/docs/examples/datasets/lookups.py b/docs/examples/datasets/lookups.py index db204775f..8afa6a363 100644 --- a/docs/examples/datasets/lookups.py +++ b/docs/examples/datasets/lookups.py @@ -61,9 +61,9 @@ def test_user_dataset_lookup(client): res = client.log("fennel_webhook", "User", df) assert res.status_code == 200, res.json() - feature_df = client.extract_features( - output_feature_list=[UserFeature.in_home_city], - input_feature_list=[UserFeature.uid], + feature_df = client.extract( + outputs=[UserFeature.in_home_city], + inputs=[UserFeature.uid], input_dataframe=pd.DataFrame({"UserFeature.uid": [1, 2, 3]}), ) assert feature_df["UserFeature.in_home_city"].tolist() == [ diff --git a/docs/examples/datasets/operators.py b/docs/examples/datasets/operators.py index bd3fd572c..7d40181e7 100644 --- a/docs/examples/datasets/operators.py +++ b/docs/examples/datasets/operators.py @@ -237,9 +237,9 @@ def test_aggregate(client): three_days_ago = dt - timedelta(days=3) ts_series = pd.Series([dt, yes, dt, three_days_ago, yes]) uids = pd.Series([1, 1, 2, 2, 2]) - df = client.extract_historical_features( - input_feature_list=[UserAdStatsFeatures.uid], - output_feature_list=[UserAdStatsFeatures], + df = client.extract_historical( + inputs=[UserAdStatsFeatures.uid], + outputs=[UserAdStatsFeatures], input_dataframe=pd.DataFrame( {"UserAdStatsFeatures.uid": uids, "timestamps": ts_series} ), diff --git a/docs/examples/examples/ecommerce.py b/docs/examples/examples/ecommerce.py index 2c8a15876..1568fcc1f 100644 --- a/docs/examples/examples/ecommerce.py +++ b/docs/examples/examples/ecommerce.py @@ -113,12 +113,12 @@ def test_feature(self, client): response = client.log("fennel_webhook", "Order", df) assert response.status_code == requests.codes.OK, response.json() - feature_df = client.extract_features( - output_feature_list=[ + feature_df = client.extract( + outputs=[ "UserSeller.num_orders_1d", "UserSeller.num_orders_1w", ], - input_feature_list=[ + inputs=[ "UserSeller.uid", "UserSeller.seller_id", ], diff --git a/docs/examples/featuresets/e2e_extraction.py b/docs/examples/featuresets/e2e_extraction.py index 22473f67c..994d02f8e 100644 --- a/docs/examples/featuresets/e2e_extraction.py +++ b/docs/examples/featuresets/e2e_extraction.py @@ -51,14 +51,14 @@ class Request: def test_e2e_extraction(client): client.sync(featuresets=[User, UserPost, Request]) # docsnip e2e_extraction - feature_df = client.extract_features( - output_feature_list=[ + feature_df = client.extract( + outputs=[ "User.age", "UserPost.score", "UserPost.affinity" # there are 10 features in this list ], - input_feature_list=[ + inputs=[ "User.id", "UserPost.uid", "UserPost.pid", diff --git a/docs/examples/featuresets/overview.py b/docs/examples/featuresets/overview.py index 142c6846f..e811a6396 100644 --- a/docs/examples/featuresets/overview.py +++ b/docs/examples/featuresets/overview.py @@ -220,9 +220,9 @@ def test_multiple_features_extracted(client): res = client.log("fennel_webhook", "UserInfo", df) assert res.status_code == 200 - df = client.extract_features( - output_feature_list=[UserLocationFeatures], - input_feature_list=[UserLocationFeatures.uid], + df = client.extract( + outputs=[UserLocationFeatures], + inputs=[UserLocationFeatures.uid], input_dataframe=pd.DataFrame( {"UserLocationFeatures.uid": [1, 2, 3]}, ), @@ -292,9 +292,9 @@ def test_extractors_across_featuresets(client): res = client.log("fennel_webhook", "UserInfo", df) assert res.status_code == 200 - df = client.extract_features( - output_feature_list=[UserLocationFeaturesRefactored], - input_feature_list=[Request.uid], + df = client.extract( + outputs=[UserLocationFeaturesRefactored], + inputs=[Request.uid], input_dataframe=pd.DataFrame( {"Request.uid": [1, 2, 3]}, ), diff --git a/docs/examples/featuresets/reading_datasets.py b/docs/examples/featuresets/reading_datasets.py index 9afc86ce3..b0b9066be 100644 --- a/docs/examples/featuresets/reading_datasets.py +++ b/docs/examples/featuresets/reading_datasets.py @@ -92,9 +92,9 @@ def test_lookup_in_extractor(client): res = client.log("fennel_webhook", "User", data) assert res.status_code == 200, res.json() - feature_df = client.extract_features( - output_feature_list=[UserFeatures.name], - input_feature_list=[UserFeatures.uid], + feature_df = client.extract( + outputs=[UserFeatures.name], + inputs=[UserFeatures.uid], input_dataframe=pd.DataFrame( { "UserFeatures.uid": [1, 2, 3, 4], @@ -105,9 +105,9 @@ def test_lookup_in_extractor(client): expected = ["Alice", "Bob", "Charlie", "Unknown"] assert feature_df["UserFeatures.name"].tolist() == expected - feature_df = client.extract_features( - output_feature_list=[UserFeaturesDerived.name], - input_feature_list=[Request.user_id], + feature_df = client.extract( + outputs=[UserFeaturesDerived.name], + inputs=[Request.user_id], input_dataframe=pd.DataFrame( { "Request.user_id": [1, 2, 3, 4], @@ -116,9 +116,9 @@ def test_lookup_in_extractor(client): ) assert feature_df["UserFeaturesDerived.name"].tolist() == expected - feature_df = client.extract_features( - output_feature_list=[UserFeaturesDerived2.name], - input_feature_list=[Request2.uid], + feature_df = client.extract( + outputs=[UserFeaturesDerived2.name], + inputs=[Request2.uid], input_dataframe=pd.DataFrame( { "Request2.uid": [1, 2, 3, 4], diff --git a/docs/examples/getting-started/quickstart.py b/docs/examples/getting-started/quickstart.py index 05a5491c2..12973f65e 100644 --- a/docs/examples/getting-started/quickstart.py +++ b/docs/examples/getting-started/quickstart.py @@ -1,9 +1,9 @@ # docsnip imports from datetime import datetime, timedelta +from typing import Optional import pandas as pd import requests -from typing import Optional from fennel.datasets import dataset, pipeline, field, Dataset from fennel.featuresets import feature, featureset, extractor @@ -145,12 +145,12 @@ def myextractor(cls, ts: pd.Series, uids: pd.Series, sellers: pd.Series): # /docsnip # docsnip query -feature_df = client.extract_features( - output_feature_list=[ +feature_df = client.extract( + outputs=[ UserSellerFeatures.num_orders_1d, UserSellerFeatures.num_orders_1w, ], - input_feature_list=[ + inputs=[ UserSellerFeatures.uid, UserSellerFeatures.seller_id, ], @@ -170,12 +170,12 @@ def myextractor(cls, ts: pd.Series, uids: pd.Series, sellers: pd.Series): # /docsnip # docsnip historical -feature_df = client.extract_historical_features( - output_feature_list=[ +feature_df = client.extract_historical( + outputs=[ UserSellerFeatures.num_orders_1d, UserSellerFeatures.num_orders_1w, ], - input_feature_list=[ + inputs=[ UserSellerFeatures.uid, UserSellerFeatures.seller_id, ], diff --git a/docs/examples/testing-and-ci-cd/ci_cd/test.py b/docs/examples/testing-and-ci-cd/ci_cd/test.py index 7f57ce333..88eb6e9f5 100644 --- a/docs/examples/testing-and-ci-cd/ci_cd/test.py +++ b/docs/examples/testing-and-ci-cd/ci_cd/test.py @@ -1,9 +1,11 @@ from datetime import datetime + import pandas as pd -from fennel.test_lib import mock from ci_cd.datasets import Ticket from ci_cd.featuresets import TicketFeatures +from fennel.test_lib import mock + @mock def test_featureset_metaflags(client): @@ -17,9 +19,9 @@ def test_featureset_metaflags(client): columns=["ticket_id", "price", "at"], ) client.log("example", "ticket_sale", df) - feature_df = client.extract_features( - input_feature_list=[TicketFeatures.ticket_id], - output_feature_list=[TicketFeatures.price, TicketFeatures.ticket_id], + feature_df = client.extract( + inputs=[TicketFeatures.ticket_id], + outputs=[TicketFeatures.price, TicketFeatures.ticket_id], input_dataframe=pd.DataFrame( data={"TicketFeatures.ticket_id": ["123", "456"]} ), diff --git a/docs/examples/testing-and-ci-cd/unit_tests.py b/docs/examples/testing-and-ci-cd/unit_tests.py index 57e1119df..2b6f7ebd9 100644 --- a/docs/examples/testing-and-ci-cd/unit_tests.py +++ b/docs/examples/testing-and-ci-cd/unit_tests.py @@ -1,8 +1,8 @@ from datetime import datetime, timedelta +from typing import Optional import pandas as pd import requests -from typing import Optional # docsnip datasets from fennel.datasets import dataset, field, pipeline, Dataset @@ -234,11 +234,9 @@ def test_dag_resolution(self, client): response = client.log("fennel_webhook", "UserInfoDataset", df) assert response.status_code == requests.codes.OK, response.json() - feature_df = client.extract_features( - output_feature_list=[ - UserInfoMultipleExtractor, - ], - input_feature_list=[UserInfoMultipleExtractor.userid], + feature_df = client.extract( + outputs=[UserInfoMultipleExtractor], + inputs=[UserInfoMultipleExtractor.userid], input_dataframe=pd.DataFrame( {"UserInfoMultipleExtractor.userid": [18232, 18234]} ), diff --git a/docs/pages/api-reference/client.md b/docs/pages/api-reference/client.md index 0acd8ef05..bdc5df7d3 100644 --- a/docs/pages/api-reference/client.md +++ b/docs/pages/api-reference/client.md @@ -8,14 +8,14 @@ status: WIP Fennel Client has the following methods on it: -### extract\_features +### extract Given some input and output features, extracts the current value of all the output features given the values of the input features. **Arguments:** -* `output_feature_list: List[Union[Feature, Featureset]]`: list of features (written as fully qualified name of a feature along with the featureset) that should be extracted. Can also take featureset objects as input, in which case all features in the featureset are extracted. -* `input_feature_list: List[Union[Feature, Featureset]]` : list of features/featuresets for which values are known +* `inputs: List[Union[Feature, str]]`: List of feature objects or fully qualified feature names (when providing a str) can be used as input. We don't allow adding featureset as input because if an engineer adds a new feature to the featureset it would break all extract calls running in production. +* `outputs: List[Union[Feature, Featureset, str]]` : List of feature or featureset objects or fully qualified feature names (when providing a str) to compute. * `input_df: Dataframe`: a pandas dataframe object that contains the values of all features in the input feature list. Each row of the dataframe can be thought of as one entity for which features are desired. * `log: bool` - boolean which indicates if the extracted features should also be logged (for log-and-wait approach to training data generation). Default is False * `workflow: str` - the name of the workflow associated with the feature extraction. Only relevant when `log` is set to True @@ -28,11 +28,11 @@ client = Client() @featureset class UserFeatures: -userid: int = feature(id=1) -... 6 more features + userid: int = feature(id=1) + ... 6 more features ``` -

+

 
 
 ****
@@ -71,7 +71,7 @@ This method throws an error if the schema of the dataframe (i.e. column names an
 
 ****
 
-### **extract_historical_features**
+### **extract_historical**
 
 For offline training of models, users often need to extract features for a large number of entities.
 This method allows users to extract features for a large number of entities in a single call while ensuring
@@ -83,8 +83,8 @@ This api is an asynchronous api that returns a request id and the path to the ou
 **Arguments:**
 
 
-* `input_feature_list: List[Union[Feature, Featureset]]` - List of features or featuresets to use as input.
-* `output_feature_list: List[Union[Feature, Featureset]]` - List of features or featuresets to compute.
+* `inputs: List[Union[Feature, str]]`: List of feature objects or fully qualified feature names (when providing a str) can be used as input. We don't allow adding featureset as input because if an engineer adds a new feature to the featureset it would break all extract calls running in production.
+* `outputs: List[Union[Feature, Featureset, str]]` : List of feature or featureset objects or fully qualified feature names (when providing a str) to compute.
 * `timestamp_column: str` - The name of the column containing the timestamps.
 * `format: str` - The format of the input data. Can be either "pandas", "csv", "json" or "parquet". Default is "pandas".
 * `input_dataframe: Optional[pd.DataFrame]` - Dataframe containing the input features. Only relevant when format is "pandas".
@@ -115,25 +115,25 @@ A completion rate of 1.0 and a failure rate of 0.0 indicates that all processing
 
 Here is an example with `format="pandas"` and the default output bucket
 
-

+

 
 Here is an example specifying input and output S3 buckets
 
-

+

 
 ****
 
-### **extract_historical_features_progress**
+### **extract_historical_progress**
 
-This method allows users to monitor the progress of the extract_historical_features asynchronous operation.
-It accepts the request ID that was returned by the `extract_historical_features` method and returns the current status of that operation.
+This method allows users to monitor the progress of the extract_historical asynchronous operation.
+It accepts the request ID that was returned by the `extract_historical` method and returns the current status of that operation.
 
-The response format of this function and the `extract_historical_features` function are identical. 
+The response format of this function and the `extract_historical` function are identical. 
 
 **Arguments:**
 
 
-* `request_id: str` - The request ID returned by the `extract_historical_features` method. This ID uniquely identifies the feature extraction operation
+* `request_id: str` - The request ID returned by the `extract_historical` method. This ID uniquely identifies the feature extraction operation
 
 **Returns:**
 
@@ -158,13 +158,13 @@ client.extract_historical_features_progress(request_id='bf5dfe5d-0040-4405-a224-
 
 ### **extract_historical_cancel_request**
 
-The `extract_historical_cancel_request` method allows users to cancel an extract_historical_features asynchronous operation.
-The response format of this function and the `extract_historical_features` function are identical. 
+The `extract_historical_cancel_request` method allows users to cancel an extract_historical asynchronous operation.
+The response format of this function and the `extract_historical` function are identical. 
 
 **Arguments:**
 
 
-* `request_id: str` - The request ID returned by the `extract_historical_features` method. This ID uniquely identifies the feature extraction operation
+* `request_id: str` - The request ID returned by the `extract_historical` method. This ID uniquely identifies the feature extraction operation
 
 **Returns:**
 
diff --git a/docs/pages/api-reference/rest-api.md b/docs/pages/api-reference/rest-api.md
index 376f46cf4..1c27e3a19 100644
--- a/docs/pages/api-reference/rest-api.md
+++ b/docs/pages/api-reference/rest-api.md
@@ -20,24 +20,24 @@ Used to log data to a dataset. It's post call with the following properties:
 
 

 
-### /api/v1/extract\_features
+### /api/v1/extract
 
 
 Used to extract a set of output features given known values of some input features. It's a POST call with the following parameters:
 
-* `input_features`: list of fully qualified names of input features
-* `output_features`: list of fully qualified names of desired output features
+* `inputs`: list of fully qualified names of input features.
+* `outputs`: list of fully qualified names of desired output features.
 * `data`: json representing the dataframe of input feature values. The json can either be an array of json objects, each representing a row; or it can be a single json object where each key maps to a list of values representing a column. Strings of json are also accepted.
-* `log`: boolean, true if the extracted features should also be logged to serve as future training data
-* `workflow`: string describing the name of the workflow to which extract features should be logged (only relevant when `log` is set to true)
-* `sampling_rate`: float between 0-1 describing the sampling to be done while logging the extracted features (only relevant when `log` is true)
+* `log`: boolean, true if the extracted features should also be logged to serve as future training data.
+* `workflow`: string describing the name of the workflow to which extract features should be logged (only relevant when `log` is set to true).
+* `sampling_rate`: float between 0-1 describing the sampling to be done while logging the extracted features (only relevant when `log` is true).
 
 The response dataframe is returned as column oriented json.
 
 **Example**
 
 With column oriented data
-

+

 
 With row oriented data
-

+

diff --git a/docs/pages/development/unit-testing.md b/docs/pages/development/unit-testing.md
index d09c06312..92d8a6cc7 100644
--- a/docs/pages/development/unit-testing.md
+++ b/docs/pages/development/unit-testing.md
@@ -33,7 +33,7 @@ class TestDataset(unittest.TestCase):
         # ... some other stuff
         client.log("fennel_webhook", 'User', pd.Dataframe(...))
         # ... some other stuff
-        found = client.extract_features(...)
+        found = client.extract(...)
         self.assertEqual(found, expected)    
 ```
 
diff --git a/fennel/CHANGELOG.md b/fennel/CHANGELOG.md
index 434b34a1d..c3174b73f 100644
--- a/fennel/CHANGELOG.md
+++ b/fennel/CHANGELOG.md
@@ -1,4 +1,8 @@
 # Changelog
+## [0.20.0] - 2024-01-12
+- Adding functions in the client -> `extract`, `extract_historical` and `extract_historical_progress`.
+- Deprecating functions in the client -> `extract_features`, `extract_historical_features` and `extract_historical_progress_features`.
+
 ## [0.19.8] - 2024-01-11
 - Improved error reporting in case of joint operator failure in Mock Client.
 
diff --git a/fennel/client/client.py b/fennel/client/client.py
index 65382885c..2355cd2a8 100644
--- a/fennel/client/client.py
+++ b/fennel/client/client.py
@@ -2,18 +2,19 @@
 import gzip
 import json
 import math
+import warnings
+from typing import Dict, Optional, Any, Set, List, Union, Tuple
 from urllib.parse import urljoin
 
 import pandas as pd
-from typing import Dict, Optional, Any, Set, List, Union, Tuple
 
 import fennel._vendor.requests as requests  # type: ignore
 from fennel.datasets import Dataset
 from fennel.featuresets import Featureset, Feature, is_valid_feature
 from fennel.lib.schema import parse_json
 from fennel.lib.to_proto import to_sync_request_proto
-from fennel.utils import check_response, to_columnar_json
 from fennel.sources import S3Connector
+from fennel.utils import check_response, to_columnar_json
 
 V1_API = "/api/v1"
 
@@ -318,10 +319,10 @@ def source_definition(self, source_uuid: str) -> Dict:
             "{}/definitions/sources/{}".format(V1_API, source_uuid)
         ).json()
 
-    def extract_features(
+    def extract(
         self,
-        input_feature_list: List[Union[Feature, Featureset, str]],
-        output_feature_list: List[Union[Feature, Featureset, str]],
+        inputs: List[Union[Feature, str]],
+        outputs: List[Union[Feature, Featureset, str]],
         input_dataframe: pd.DataFrame,
         log: bool = False,
         workflow: Optional[str] = None,
@@ -332,8 +333,8 @@ def extract_features(
         feature list. The features are computed for the current time.
 
         Parameters:
-        input_feature_list (List[Union[Feature, Featureset]]): List of features or featuresets to use as input.
-        output_feature_list (List[Union[Feature, Featureset]]): List of features or featuresets to compute.
+        inputs (List[Union[Feature, str]]): List of feature objects or fully qualified feature names (when providing a str) can be used as input. We don't allow adding featureset as input because if an engineer adds a new feature to the featureset it would break all extract calls running in production.
+        outputs (List[Union[Feature, Featureset, str]]): List of feature or featureset objects or fully qualified feature names (when providing a str) to compute.
         input_dataframe (pd.DataFrame): Dataframe containing the input features.
         log (bool): Boolean which indicates if the extracted features should also be logged (for log-and-wait approach to training data generation). Default is False.
         workflow (Optional[str]): The name of the workflow associated with the feature extraction. Only relevant when log is set to True.
@@ -342,11 +343,11 @@ def extract_features(
         Returns:
         Union[pd.DataFrame, pd.Series]: Pandas dataframe or series containing the output features.
         """
-        if input_dataframe.empty or len(output_feature_list) == 0:
+        if input_dataframe.empty or len(outputs) == 0:
             return pd.DataFrame()
 
         input_feature_names = []
-        for inp_feature in input_feature_list:
+        for inp_feature in inputs:
             if isinstance(inp_feature, Feature):
                 input_feature_names.append(inp_feature.fqn())
             elif isinstance(inp_feature, str) and is_valid_feature(inp_feature):
@@ -367,7 +368,7 @@ def extract_features(
 
         output_feature_names = []
         output_feature_name_to_type: Dict[str, Any] = {}
-        for out_feature in output_feature_list:
+        for out_feature in outputs:
             if isinstance(out_feature, Feature):
                 output_feature_names.append(out_feature.fqn())
                 output_feature_name_to_type[
@@ -384,8 +385,8 @@ def extract_features(
                     output_feature_name_to_type[f.fqn()] = f.dtype
 
         req = {
-            "input_features": input_feature_names,
-            "output_features": output_feature_names,
+            "inputs": input_feature_names,
+            "outputs": output_feature_names,
             "data": to_columnar_json(input_dataframe),
             "log": log,
         }  # type: Dict[str, Any]
@@ -394,7 +395,7 @@ def extract_features(
         if sampling_rate is not None:
             req["sampling_rate"] = sampling_rate
 
-        response = self._post_json("{}/extract_features".format(V1_API), req)
+        response = self._post_json("{}/extract".format(V1_API), req)
         df = pd.DataFrame(response.json())
         for col in df.columns:
             if df[col].dtype == "object":
@@ -403,10 +404,47 @@ def extract_features(
                 )
         return df
 
-    def extract_historical_features(
+    def extract_features(
         self,
-        input_feature_list: List[Union[Feature, Featureset, str]],
+        input_feature_list: List[Union[Feature, str]],
         output_feature_list: List[Union[Feature, Featureset, str]],
+        input_dataframe: pd.DataFrame,
+        log: bool = False,
+        workflow: Optional[str] = None,
+        sampling_rate: Optional[float] = None,
+    ) -> Union[pd.DataFrame, pd.Series]:
+        """
+        Going to be deprecated in favor of extract and will be removed in the future.
+        Extract features for a given output feature list from an input
+        feature list. The features are computed for the current time.
+
+        Parameters:
+        input_feature_list (List[Union[Feature, str]]): List of feature objects or fully qualified feature names (when providing a str) can be used as input. We don't allow adding featureset as input because if an engineer adds a new feature to the featureset it would break all extract calls running in production.
+        output_feature_list (List[Union[Feature, Featureset, str]]): List of feature or featureset objects or fully qualified feature names (when providing a str) to compute.
+        input_dataframe (pd.DataFrame): Dataframe containing the input features.
+        log (bool): Boolean which indicates if the extracted features should also be logged (for log-and-wait approach to training data generation). Default is False.
+        workflow (Optional[str]): The name of the workflow associated with the feature extraction. Only relevant when log is set to True.
+        sampling_rate (float): The rate at which feature data should be sampled before logging. Only relevant when log is set to True. The default value is 1.0.
+
+        Returns:
+        Union[pd.DataFrame, pd.Series]: Pandas dataframe or series containing the output features.
+        """
+        warnings.warn(
+            "This method is going to be deprecated in favor of extract."
+        )
+        return self.extract(
+            inputs=input_feature_list,
+            outputs=output_feature_list,
+            input_dataframe=input_dataframe,
+            log=log,
+            workflow=workflow,
+            sampling_rate=sampling_rate,
+        )
+
+    def extract_historical(
+        self,
+        inputs: List[Union[Feature, str]],
+        outputs: List[Union[Feature, Featureset, str]],
         timestamp_column: str,
         format: str = "pandas",
         input_dataframe: Optional[pd.DataFrame] = None,
@@ -419,8 +457,8 @@ def extract_historical_features(
         timestamps are provided by the timestamps parameter.
 
         Parameters:
-        input_feature_list (List[Union[Feature, Featureset]]): List of features or featuresets to use as input.
-        output_feature_list (List[Union[Feature, Featureset]]): List of features or featuresets to compute.
+        inputs (List[Union[Feature, str]]): List of feature objects or fully qualified feature names (when providing a str) can be used as input. We don't allow adding featureset as input because if an engineer adds a new feature to the featureset it would break all extract calls running in production.
+        outputs (List[Union[Feature, Featureset, str]]): List of feature or featureset objects or fully qualified feature names (when providing a str) to compute.
         timestamp_column (str): The name of the column containing the timestamps.
         format (str): The format of the input data. Can be either "pandas",
             "csv", "json" or "parquet". Default is "pandas".
@@ -441,7 +479,6 @@ def extract_historical_features(
                         A failure rate of 0.0 indicates that all processing has been completed successfully.
                         The status of the request.
         """
-
         if format not in ["pandas", "csv", "json", "parquet"]:
             raise Exception(
                 "Invalid input format. "
@@ -450,7 +487,7 @@ def extract_historical_features(
             )
 
         input_feature_names = []
-        for input_feature in input_feature_list:
+        for input_feature in inputs:
             if isinstance(input_feature, Feature):
                 input_feature_names.append(input_feature.fqn())
             elif isinstance(input_feature, str):
@@ -535,7 +572,7 @@ def extract_historical_features(
             extract_historical_input["S3"] = input_info
 
         output_feature_names = []
-        for output_feature in output_feature_list:
+        for output_feature in outputs:
             if isinstance(output_feature, Feature):
                 output_feature_names.append(output_feature.fqn())
             elif isinstance(output_feature, str):
@@ -557,15 +594,83 @@ def extract_historical_features(
             "timestamp_column": timestamp_column,
             "s3_output": _s3_connector_dict(output_s3) if output_s3 else None,
         }
-        return self._post_json(
-            "{}/extract_historical_features".format(V1_API), req
+        return self._post_json("{}/extract_historical".format(V1_API), req)
+
+    def extract_historical_features(
+        self,
+        input_feature_list: List[Union[Feature, str]],
+        output_feature_list: List[Union[Feature, Featureset, str]],
+        timestamp_column: str,
+        format: str = "pandas",
+        input_dataframe: Optional[pd.DataFrame] = None,
+        input_s3: Optional[S3Connector] = None,
+        output_s3: Optional[S3Connector] = None,
+        feature_to_column_map: Optional[Dict[Feature, str]] = None,
+    ) -> Dict[str, Any]:
+        """
+        Going to be deprecated in favor of extract_historical and will be removed in the future.
+        Extract point in time correct features from a dataframe, where the
+        timestamps are provided by the timestamps parameter.
+
+        Parameters:
+        input_feature_list (List[Union[Feature, str]]): List of feature objects or fully qualified feature names (when providing a str) can be used as input. We don't allow adding featureset as input because if an engineer adds a new feature to the featureset it would break all extract calls running in production.
+        output_feature_list (List[Union[Feature, Featureset, str]]): List of feature or featureset objects or fully qualified feature names (when providing a str) to compute.
+        timestamp_column (str): The name of the column containing the timestamps.
+        format (str): The format of the input data. Can be either "pandas",
+            "csv", "json" or "parquet". Default is "pandas".
+        input_dataframe (Optional[pd.DataFrame]): Dataframe containing the input features. Only relevant when format is "pandas".
+        output_s3 (Optional[S3Connector]): Contains the S3 info -- bucket, prefix, and optional access key id
+            and secret key -- used for storing the output of the extract historical request
+
+        The following parameters are only relevant when format is "csv", "json" or "parquet".
+
+        input_s3 (Optional[sources.S3Connector]): The info for the input S3 data, containing bucket, prefix, and optional access key id
+            and secret key
+        feature_to_column_map (Optional[Dict[Feature, str]]): A dictionary that maps columns in the S3 data to the required features.
+
+
+        Returns:
+        Dict[str, Any]: A dictionary containing the request_id, the output s3 bucket and prefix, the completion rate and the failure rate.
+                        A completion rate of 1.0 indicates that all processing has been completed.
+                        A failure rate of 0.0 indicates that all processing has been completed successfully.
+                        The status of the request.
+        """
+        warnings.warn(
+            "This method is going to be deprecated in favor of extract_historical."
+        )
+        return self.extract_historical(
+            inputs=input_feature_list,
+            outputs=output_feature_list,
+            timestamp_column=timestamp_column,
+            format=format,
+            input_dataframe=input_dataframe,
+            input_s3=input_s3,
+            output_s3=output_s3,
+            feature_to_column_map=feature_to_column_map,
+        )
+
+    def extract_historical_progress(self, request_id):
+        """
+        Get the status of extract historical features request.
+
+        :param request_id: The request id returned by extract_historical.
+
+        Returns:
+        Dict[str, Any]: A dictionary containing the request_id, the output s3 bucket and prefix, the completion rate and the failure rate.
+                        A completion rate of 1.0 indicates that all processing has been completed.
+                        A failure rate of 0.0 indicates that all processing has been completed successfully.
+                        The status of the request.
+        """
+        return self._get(
+            f"{V1_API}/extract_historical_request/status?request_id={request_id}"
         )
 
     def extract_historical_features_progress(self, request_id):
         """
+        Going to be deprecated in favor of extract_historical_progress and will be removed in the future.
         Get the status of extract historical features request.
 
-        :param request_id: The request id returned by extract_historical_features.
+        :param request_id: The request id returned by extract_historical.
 
         Returns:
         Dict[str, Any]: A dictionary containing the request_id, the output s3 bucket and prefix, the completion rate and the failure rate.
@@ -581,7 +686,7 @@ def extract_historical_cancel_request(self, request_id):
         """
         Cancel the extract historical features request.
 
-        :param request_id: The request id returned by extract_historical_features.
+        :param request_id: The request id returned by extract_historical.
 
         Returns:
         Dict[str, Any]: A dictionary containing the request_id, the output s3 bucket and prefix, the completion rate and the failure rate.
diff --git a/fennel/client_tests/test_complex_autogen_extractor.py b/fennel/client_tests/test_complex_autogen_extractor.py
index dbf3946f1..a1b5745ea 100644
--- a/fennel/client_tests/test_complex_autogen_extractor.py
+++ b/fennel/client_tests/test_complex_autogen_extractor.py
@@ -1,21 +1,15 @@
-import sys
-
 from datetime import datetime, timedelta
 from typing import Optional
 
 import pandas as pd
 import pytest
 
-import fennel
-
 from fennel import meta, Count, Window, featureset, feature, extractor
-from fennel.client import Client
-from fennel.lib.schema import inputs, oneof, outputs
-from fennel.lib.aggregate import Max, Min
-from fennel.sources import Webhook, S3, MySQL
 from fennel.datasets import dataset, field, pipeline, Dataset
+from fennel.lib.schema import inputs, outputs
+from fennel.sources import Webhook
 from fennel.sources import source
-from fennel.test_lib import MockClient, mock
+from fennel.test_lib import mock
 
 webhook = Webhook(name="fennel_webhook")
 
@@ -322,9 +316,9 @@ def test_complex_auto_gen_extractors(client):
     )
     assert log_response.status_code == 200
 
-    extracted_df = client.extract_features(
-        input_feature_list=[RequestFeatures0.rider_id],
-        output_feature_list=[RiderFeatures],
+    extracted_df = client.extract(
+        inputs=[RequestFeatures0.rider_id],
+        outputs=[RiderFeatures],
         input_dataframe=pd.DataFrame({"RequestFeatures0.rider_id": [1, 2]}),
     )
     assert extracted_df.shape[0] == 2
diff --git a/fennel/client_tests/test_featureset.py b/fennel/client_tests/test_featureset.py
index d855965a7..fb914dac3 100644
--- a/fennel/client_tests/test_featureset.py
+++ b/fennel/client_tests/test_featureset.py
@@ -1,10 +1,10 @@
 import unittest
 from datetime import datetime, timedelta
+from typing import Optional, Dict, List
 
 import numpy as np
 import pandas as pd
 import pytest
-from typing import Optional, Dict, List
 
 import fennel._vendor.requests as requests
 from fennel.datasets import dataset, field
@@ -340,8 +340,8 @@ def test_derived_extractor(self, client):
         response = client.log("fennel_webhook", "FlightDataset", df)
         assert response.status_code == requests.codes.OK, response.json()
         client.sleep()
-        feature_df = client.extract_features(
-            output_feature_list=[
+        feature_df = client.extract(
+            outputs=[
                 UserInfoSingleExtractor.userid,
                 GeneratedFeatures.user_id,
                 GeneratedFeatures.country,
@@ -350,7 +350,7 @@ def test_derived_extractor(self, client):
                 GeneratedFeatures.pilots,
                 GeneratedFeatures.base_region,
             ],
-            input_feature_list=[
+            inputs=[
                 UserInfoSingleExtractor.userid,
                 FlightRequest.id,
                 FlightCrewRequest.id,
@@ -416,8 +416,8 @@ def test_dag_resolution2(self, client):
         response = client.log("fennel_webhook", "UserInfoDataset", df)
         assert response.status_code == requests.codes.OK, response.json()
         client.sleep()
-        feature_df = client.extract_features(
-            output_feature_list=[
+        feature_df = client.extract(
+            outputs=[
                 UserInfoMultipleExtractor.userid,
                 UserInfoMultipleExtractor.name,
                 UserInfoMultipleExtractor.country_geoid,
@@ -426,18 +426,16 @@ def test_dag_resolution2(self, client):
                 UserInfoMultipleExtractor.age_cubed,
                 UserInfoMultipleExtractor.is_name_common,
             ],
-            input_feature_list=[UserInfoMultipleExtractor.userid],
+            inputs=[UserInfoMultipleExtractor.userid],
             input_dataframe=pd.DataFrame(
                 {"UserInfoMultipleExtractor.userid": [18232, 18234]}
             ),
         )
         self.assertEqual(feature_df.shape, (2, 7))
 
-        feature_df = client.extract_features(
-            output_feature_list=[
-                UserInfoMultipleExtractor,
-            ],
-            input_feature_list=[UserInfoMultipleExtractor.userid],
+        feature_df = client.extract(
+            outputs=[UserInfoMultipleExtractor],
+            inputs=[UserInfoMultipleExtractor.userid],
             input_dataframe=pd.DataFrame(
                 {"UserInfoMultipleExtractor.userid": [18232, 18234]}
             ),
@@ -503,13 +501,13 @@ def test_dag_resolution_complex(self, client):
         assert response.status_code == requests.codes.OK, response.json()
         client.sleep()
 
-        feature_df = client.extract_features(
-            output_feature_list=[
+        feature_df = client.extract(
+            outputs=[
                 UserInfoTransformedFeatures.age_power_four,
                 UserInfoTransformedFeatures.is_name_common,
                 UserInfoTransformedFeatures.country_geoid_square,
             ],
-            input_feature_list=[UserInfoMultipleExtractor.userid],
+            inputs=[UserInfoMultipleExtractor.userid],
             input_dataframe=pd.DataFrame(
                 {"UserInfoMultipleExtractor.userid": [18232, 18234]}
             ),
@@ -534,13 +532,13 @@ def test_dag_resolution_complex(self, client):
         if client.is_integration_client():
             return
 
-        feature_df = client.extract_historical_features(
-            output_feature_list=[
+        feature_df = client.extract_historical(
+            outputs=[
                 "UserInfoTransformedFeatures.age_power_four",
                 "UserInfoTransformedFeatures.is_name_common",
                 "UserInfoTransformedFeatures.country_geoid_square",
             ],
-            input_feature_list=["UserInfoMultipleExtractor.userid"],
+            inputs=["UserInfoMultipleExtractor.userid"],
             input_dataframe=pd.DataFrame(
                 {
                     "UserInfoMultipleExtractor.userid": [18232, 18234],
@@ -641,11 +639,9 @@ def test_document_featureset(self, client):
         response = client.log("fennel_webhook", "DocumentContentDataset", df)
         assert response.status_code == requests.codes.OK, response.json()
         client.sleep()
-        feature_df = client.extract_features(
-            output_feature_list=[
-                DocumentFeatures,
-            ],
-            input_feature_list=[DocumentFeatures.doc_id],
+        feature_df = client.extract(
+            outputs=[DocumentFeatures],
+            inputs=[DocumentFeatures.doc_id],
             input_dataframe=pd.DataFrame(
                 {"DocumentFeatures.doc_id": [18232, 18234]}
             ),
@@ -675,11 +671,9 @@ def test_document_featureset(self, client):
         if client.is_integration_client():
             return
 
-        feature_df = client.extract_historical_features(
-            output_feature_list=[
-                DocumentFeatures,
-            ],
-            input_feature_list=[DocumentFeatures.doc_id],
+        feature_df = client.extract_historical(
+            outputs=[DocumentFeatures],
+            inputs=[DocumentFeatures.doc_id],
             input_dataframe=pd.DataFrame(
                 {
                     "DocumentFeatures.doc_id": [18232, 18234],
diff --git a/fennel/client_tests/test_fraud_detection.py b/fennel/client_tests/test_fraud_detection.py
index 831e5201a..0086befc5 100644
--- a/fennel/client_tests/test_fraud_detection.py
+++ b/fennel/client_tests/test_fraud_detection.py
@@ -186,10 +186,10 @@ def test_fraud_detection_pipeline(client):
         "UserTransactionSumsFeatures.cc_num": 99
     }
 
-    df = client.extract_features(
-        input_feature_list=[UserTransactionSumsFeatures.cc_num],
+    df = client.extract(
+        inputs=[UserTransactionSumsFeatures.cc_num],
         # Input from featureset,
-        output_feature_list=[
+        outputs=[
             UserTransactionSumsFeatures.cc_num,
             UserTransactionSumsFeatures.sum_amt_1d,
             UserTransactionSumsFeatures.sum_amt_7d,
diff --git a/fennel/client_tests/test_invalid.py b/fennel/client_tests/test_invalid.py
index aa2b8ba97..acb76b956 100644
--- a/fennel/client_tests/test_invalid.py
+++ b/fennel/client_tests/test_invalid.py
@@ -186,9 +186,9 @@ def get_domain_feature(cls, ts: pd.Series, domain: pd.Series):
                 ],
                 featuresets=[DomainFeatures, Query],
             )
-            client.extract_features(
-                output_feature_list=[DomainFeatures2],
-                input_feature_list=[Query.member_id],
+            client.extract(
+                outputs=[DomainFeatures2],
+                inputs=[Query.member_id],
                 input_dataframe=pd.DataFrame(
                     {
                         "Query.domain": [
@@ -210,9 +210,9 @@ def test_missing_dataset(self, client):
             datasets=[MemberDataset], featuresets=[DomainFeatures2, Query]
         )
         with pytest.raises(Exception) as e:
-            client.extract_features(
-                output_feature_list=[DomainFeatures2],
-                input_feature_list=[Query.domain],
+            client.extract(
+                outputs=[DomainFeatures2],
+                inputs=[Query.domain],
                 input_dataframe=pd.DataFrame(
                     {
                         "Query.domain": [
@@ -242,9 +242,9 @@ def test_no_access(self, client):
                 datasets=[MemberDataset, MemberActivityDatasetCopy],
                 featuresets=[DomainFeatures2, Query],
             )
-            client.extract_features(
-                output_feature_list=[DomainFeatures2],
-                input_feature_list=[Query.domain],
+            client.extract(
+                outputs=[DomainFeatures2],
+                inputs=[Query.domain],
                 input_dataframe=pd.DataFrame(
                     {
                         "Query.domain": [
diff --git a/fennel/client_tests/test_movie_tickets.py b/fennel/client_tests/test_movie_tickets.py
index 92dfad61f..4f1038321 100644
--- a/fennel/client_tests/test_movie_tickets.py
+++ b/fennel/client_tests/test_movie_tickets.py
@@ -1,23 +1,21 @@
 import unittest
 from datetime import datetime, timedelta
+from typing import List, Optional
 
 import pandas as pd
-
 import requests
 
 from fennel import featureset, extractor, feature
 from fennel.datasets import dataset, field
-from fennel.lib.metadata import meta
-from fennel.lib.schema import inputs, outputs, between
-from fennel.sources import source
 from fennel.datasets import pipeline, Dataset
 from fennel.lib.aggregate import Sum, LastK, Distinct
+from fennel.lib.metadata import meta
+from fennel.lib.schema import inputs, outputs
 from fennel.lib.window import Window
 from fennel.sources import Webhook
+from fennel.sources import source
 from fennel.test_lib import mock, MockClient
 
-from typing import List, Optional
-
 client = MockClient()
 
 webhook = Webhook(name="fennel_webhook")
@@ -219,9 +217,9 @@ def test_movie_ticket_sale(self, client):
             response.status_code == requests.codes.OK
         ), response.json()  # noqa
 
-        features = client.extract_features(
-            input_feature_list=[RequestFeatures.name],  # type: ignore
-            output_feature_list=[ActorFeatures.revenue],  # type: ignore
+        features = client.extract(
+            inputs=[RequestFeatures.name],  # type: ignore
+            outputs=[ActorFeatures.revenue],  # type: ignore
             input_dataframe=pd.DataFrame(
                 {
                     "RequestFeatures.name": [
diff --git a/fennel/client_tests/test_outbrain.py b/fennel/client_tests/test_outbrain.py
index 191d3e2e8..7efe4c791 100644
--- a/fennel/client_tests/test_outbrain.py
+++ b/fennel/client_tests/test_outbrain.py
@@ -140,12 +140,12 @@ def test_outbrain(client):
         inplace=True,
     )
     input_df = input_df.reset_index(drop=True)
-    feature_df = client.extract_features(
-        output_feature_list=[
+    feature_df = client.extract(
+        outputs=[
             Request,
             UserPageViewFeatures,
         ],
-        input_feature_list=[Request.uuid, Request.document_id],
+        inputs=[Request.uuid, Request.document_id],
         input_dataframe=input_df,
     )
     assert feature_df.shape[0] == 347
diff --git a/fennel/client_tests/test_search.py b/fennel/client_tests/test_search.py
index b8d983e90..251c2646d 100644
--- a/fennel/client_tests/test_search.py
+++ b/fennel/client_tests/test_search.py
@@ -1,11 +1,11 @@
 import unittest
 from collections import defaultdict
 from datetime import datetime
+from typing import Dict, List
 
 import numpy as np
 import pandas as pd
 import pytest
-from typing import Dict, List, Optional
 
 import fennel._vendor.requests as requests
 from fennel import sources
@@ -579,13 +579,13 @@ def test_search_e2e(self, client):
                 "Query.doc_id": [31234, 33234],
             }
         )
-        df = client.extract_features(
-            output_feature_list=[
+        df = client.extract(
+            outputs=[
                 UserBehaviorFeatures,
                 DocumentFeatures,
                 DocumentContentFeatures,
             ],
-            input_feature_list=[Query.doc_id, Query.user_id],
+            inputs=[Query.doc_id, Query.user_id],
             input_dataframe=input_df,
         )
         assert df.shape == (2, 15)
@@ -634,9 +634,9 @@ def test_search_e2e(self, client):
                 "TopWordsFeatures.word": ["This", "Coda"],
             }
         )
-        df = client.extract_features(
-            output_feature_list=[TopWordsFeatures.count],
-            input_feature_list=[TopWordsFeatures.word],
+        df = client.extract(
+            outputs=[TopWordsFeatures.count],
+            inputs=[TopWordsFeatures.word],
             input_dataframe=input_df,
         )
         assert df.shape == (2, 1)
diff --git a/fennel/client_tests/test_social_network.py b/fennel/client_tests/test_social_network.py
index 2a41991e6..447b6b734 100644
--- a/fennel/client_tests/test_social_network.py
+++ b/fennel/client_tests/test_social_network.py
@@ -191,9 +191,9 @@ def test_social_network(client):
     )
     assert found.to_list() == [True, True, True]
 
-    feature_df = client.extract_features(
-        output_feature_list=[UserFeatures],
-        input_feature_list=[Request.user_id, Request.category],
+    feature_df = client.extract(
+        outputs=[UserFeatures],
+        inputs=[Request.user_id, Request.category],
         input_dataframe=pd.DataFrame(
             {
                 "Request.user_id": [
diff --git a/fennel/client_tests/test_struct_type.py b/fennel/client_tests/test_struct_type.py
index 1aa36c3a2..55e66b9ca 100644
--- a/fennel/client_tests/test_struct_type.py
+++ b/fennel/client_tests/test_struct_type.py
@@ -1,8 +1,8 @@
 from datetime import datetime
+from typing import List
 
 import pandas as pd
 import pytest
-from typing import List
 
 import fennel._vendor.requests as requests
 from fennel import sources
@@ -140,12 +140,12 @@ def test_struct_type(client):
             ],
         }
     )
-    df = client.extract_features(
-        output_feature_list=[
+    df = client.extract(
+        outputs=[
             MovieFeatures.cast_list,
             MovieFeatures.average_cast_age,
         ],
-        input_feature_list=[MovieFeatures.movie],
+        inputs=[MovieFeatures.movie],
         input_dataframe=input_df,
     )
 
diff --git a/fennel/lib/graph_algorithms/extractor_order.py b/fennel/lib/graph_algorithms/extractor_order.py
index 5b9320614..cef51c311 100644
--- a/fennel/lib/graph_algorithms/extractor_order.py
+++ b/fennel/lib/graph_algorithms/extractor_order.py
@@ -112,7 +112,7 @@ def get_feature_vertex(f: Union[Feature, Featureset, str]) -> str:
 
 
 def get_extractor_order(
-    input_features: List[Union[Feature, Featureset, str]],
+    input_features: List[Union[Feature, str]],
     output_features: List[Union[Feature, Featureset, str]],
     extractors: List[Extractor],
 ) -> List[Extractor]:
@@ -129,7 +129,7 @@ def get_extractor_order(
     for f in input_features:
         resolved_features.update(_get_features(f))
     to_find: Set[str] = set()
-    for f in output_features:
+    for f in output_features:  # type: ignore
         to_find.update(_get_features(f))
     # Find the extractors that need to be run to produce the output features.
     extractor_names: Set[str] = set()
diff --git a/fennel/lib/includes/test_includes.py b/fennel/lib/includes/test_includes.py
index 166b757b5..f06b5a078 100644
--- a/fennel/lib/includes/test_includes.py
+++ b/fennel/lib/includes/test_includes.py
@@ -1,9 +1,9 @@
 from datetime import datetime
+from typing import Optional
 
 import pandas as pd
 import pytest
 from google.protobuf.json_format import ParseDict  # type: ignore
-from typing import Optional
 
 import fennel._vendor.requests as requests
 from fennel.datasets import dataset, field
@@ -117,9 +117,9 @@ def test_simple_extractor(client):
     assert response.status_code == requests.codes.OK, response.json()
     if client.is_integration_client():
         client.sleep()
-    feature_df = client.extract_features(
-        output_feature_list=[UserInfoSingleExtractor],
-        input_feature_list=[UserInfoSingleExtractor.userid],
+    feature_df = client.extract(
+        outputs=[UserInfoSingleExtractor],
+        inputs=[UserInfoSingleExtractor.userid],
         input_dataframe=pd.DataFrame(
             {"UserInfoSingleExtractor.userid": [18232, 18234]}
         ),
diff --git a/fennel/lib/includes/test_includes_invalid.py b/fennel/lib/includes/test_includes_invalid.py
index 58dbc63bd..4bc2e0f18 100644
--- a/fennel/lib/includes/test_includes_invalid.py
+++ b/fennel/lib/includes/test_includes_invalid.py
@@ -1,8 +1,8 @@
 from datetime import datetime
+from typing import Optional
 
 import pandas as pd
 import pytest
-from typing import Optional
 
 import fennel._vendor.requests as requests
 from fennel.datasets import dataset, field
@@ -92,9 +92,9 @@ def test_simple_invalid_extractor(client):
         client.sleep()
 
     with pytest.raises(Exception) as e:
-        client.extract_features(
-            output_feature_list=[UserInfoExtractor],
-            input_feature_list=[UserInfoExtractor.userid],
+        client.extract(
+            outputs=[UserInfoExtractor],
+            inputs=[UserInfoExtractor.userid],
             input_dataframe=pd.DataFrame(
                 {"UserInfoExtractor.userid": [18232, 18234]}
             ),
diff --git a/fennel/test_lib/mock_client.py b/fennel/test_lib/mock_client.py
index 52002e521..c4b02f1b8 100644
--- a/fennel/test_lib/mock_client.py
+++ b/fennel/test_lib/mock_client.py
@@ -2,6 +2,7 @@
 
 import copy
 import json
+import logging
 import os
 import sys
 import types
@@ -9,13 +10,11 @@
 from dataclasses import dataclass
 from datetime import datetime
 from functools import partial
-import logging
-from fennel.sources.sources import S3Connector
+from typing import Callable, Dict, List, Tuple, Optional, Union
 
 import numpy as np
 import pandas as pd
 from frozendict import frozendict
-from typing import Callable, Dict, List, Tuple, Optional, Union
 
 import fennel.datasets.datasets
 import fennel.sources as sources
@@ -24,7 +23,6 @@
 from fennel.datasets import Dataset, field, Pipeline, OnDemand  # noqa
 from fennel.datasets.datasets import sync_validation_for_pipelines
 from fennel.featuresets import Featureset, Feature, Extractor, is_valid_feature
-from fennel.featuresets.featureset import sync_validation_for_extractors
 from fennel.gen.dataset_pb2 import CoreDataset
 from fennel.gen.featureset_pb2 import CoreFeatureset
 from fennel.gen.featureset_pb2 import (
@@ -45,6 +43,7 @@
     extractors_from_fs,
     featureset_to_proto,
 )
+from fennel.sources.sources import S3Connector
 from fennel.test_lib.executor import Executor
 from fennel.test_lib.integration_client import IntegrationClient
 from fennel.test_lib.test_utils import cast_col_to_dtype
@@ -473,9 +472,27 @@ def sync(
             raise Exception("Cyclic graph detected in extractors")
         return FakeResponse(200, "OK")
 
+    def extract(
+        self,
+        inputs: List[Union[Feature, str]],
+        outputs: List[Union[Feature, Featureset, str]],
+        input_dataframe: pd.DataFrame,
+        log: bool = False,
+        workflow: Optional[str] = None,
+        sampling_rate: Optional[float] = None,
+    ) -> pd.DataFrame:
+        return self.extract_features(
+            input_feature_list=inputs,
+            output_feature_list=outputs,
+            input_dataframe=input_dataframe,
+            log=log,
+            workflow=workflow,
+            sampling_rate=sampling_rate,
+        )
+
     def extract_features(
         self,
-        input_feature_list: List[Union[Feature, Featureset, str]],
+        input_feature_list: List[Union[Feature, str]],
         output_feature_list: List[Union[Feature, Featureset, str]],
         input_dataframe: pd.DataFrame,
         log: bool = False,
@@ -524,9 +541,31 @@ def extract_features(
             extractors, input_dataframe, output_feature_list, timestamps
         )
 
+    def extract_historical(
+        self,
+        inputs: List[Union[Feature, str]],
+        outputs: List[Union[Feature, Featureset, str]],
+        timestamp_column: str,
+        format: str = "pandas",
+        input_dataframe: Optional[pd.DataFrame] = None,
+        input_s3: Optional[S3Connector] = None,
+        output_s3: Optional[S3Connector] = None,
+        feature_to_column_map: Optional[Dict[Feature, str]] = None,
+    ) -> Union[pd.DataFrame, pd.Series]:
+        return self.extract_historical_features(
+            input_feature_list=inputs,
+            output_feature_list=outputs,
+            timestamp_column=timestamp_column,
+            format=format,
+            input_dataframe=input_dataframe,
+            input_s3=input_s3,
+            output_s3=output_s3,
+            feature_to_column_map=feature_to_column_map,
+        )
+
     def extract_historical_features(
         self,
-        input_feature_list: List[Union[Feature, Featureset, str]],
+        input_feature_list: List[Union[Feature, str]],
         output_feature_list: List[Union[Feature, Featureset, str]],
         timestamp_column: str,
         format: str = "pandas",
@@ -591,6 +630,9 @@ def extract_historical_features(
         output_df[timestamp_column] = timestamps
         return output_df
 
+    def extract_historical_progress(self, request_id):
+        return FakeResponse(404, "Extract historical features not supported")
+
     def extract_historical_features_progress(self, request_id):
         return FakeResponse(404, "Extract historical features not supported")
 
diff --git a/pyproject.toml b/pyproject.toml
index 40e1b68bf..1c1ba07c1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [tool.poetry]
 name = "fennel-ai"
-version = "0.19.9"
+version = "0.20.0"
 description = "The modern realtime feature engineering platform"
 authors = ["Fennel AI "]
 packages = [{ include = "fennel" }]