-
Notifications
You must be signed in to change notification settings - Fork 1
/
request.d.ts
243 lines (243 loc) · 9.99 KB
/
request.d.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
import type { BinaryLike } from 'node:crypto';
import type { Dictionary } from '@crawlee/types';
import type { EnqueueLinksOptions } from './enqueue_links/enqueue_links';
import type { AllowedHttpMethods } from './typedefs';
export declare enum RequestState {
UNPROCESSED = 0,
BEFORE_NAV = 1,
AFTER_NAV = 2,
REQUEST_HANDLER = 3,
DONE = 4,
ERROR_HANDLER = 5,
ERROR = 6,
SKIPPED = 7
}
/**
* Represents a URL to be crawled, optionally including HTTP method, headers, payload and other metadata.
* The `Request` object also stores information about errors that occurred during processing of the request.
*
* Each `Request` instance has the `uniqueKey` property, which can be either specified
* manually in the constructor or generated automatically from the URL. Two requests with the same `uniqueKey`
* are considered as pointing to the same web resource. This behavior applies to all Crawlee classes,
* such as {@apilink RequestList}, {@apilink RequestQueue}, {@apilink PuppeteerCrawler} or {@apilink PlaywrightCrawler}.
*
* > To access and examine the actual request sent over http, with all autofilled headers you can access
* `response.request` object from the request handler
*
* Example use:
*
* ```javascript
* const request = new Request({
* url: 'http://www.example.com',
* headers: { Accept: 'application/json' },
* });
*
* ...
*
* request.userData.foo = 'bar';
* request.pushErrorMessage(new Error('Request failed!'));
*
* ...
*
* const foo = request.userData.foo;
* ```
* @category Sources
*/
export declare class Request<UserData extends Dictionary = Dictionary> {
/** Request ID */
id?: string;
/** URL of the web page to crawl. */
url: string;
/**
* An actually loaded URL after redirects, if present. HTTP redirects are guaranteed
* to be included.
*
* When using {@apilink PuppeteerCrawler} or {@apilink PlaywrightCrawler}, meta tag and JavaScript redirects may,
* or may not be included, depending on their nature. This generally means that redirects,
* which happen immediately will most likely be included, but delayed redirects will not.
*/
loadedUrl?: string;
/**
* A unique key identifying the request.
* Two requests with the same `uniqueKey` are considered as pointing to the same URL.
*/
uniqueKey: string;
/** HTTP method, e.g. `GET` or `POST`. */
method: AllowedHttpMethods;
/** HTTP request payload, e.g. for POST requests. */
payload?: string;
/** The `true` value indicates that the request will not be automatically retried on error. */
noRetry: boolean;
/** Indicates the number of times the crawling of the request has been retried on error. */
retryCount: number;
/** An array of error messages from request processing. */
errorMessages: string[];
/** Object with HTTP headers. Key is header name, value is the value. */
headers?: Record<string, string>;
/** Private store for the custom user data assigned to the request. */
private _userData;
/** Custom user data assigned to the request. */
userData: UserData;
/**
* ISO datetime string that indicates the time when the request has been processed.
* Is `null` if the request has not been crawled yet.
*/
handledAt?: string;
/**
* `Request` parameters including the URL, HTTP method and headers, and others.
*/
constructor(options: RequestOptions<UserData>);
/** Tells the crawler processing this request to skip the navigation and process the request directly. */
get skipNavigation(): boolean;
/** Tells the crawler processing this request to skip the navigation and process the request directly. */
set skipNavigation(value: boolean);
/** Indicates the number of times the crawling of the request has rotated the session due to a session or a proxy error. */
get sessionRotationCount(): number;
/** Indicates the number of times the crawling of the request has rotated the session due to a session or a proxy error. */
set sessionRotationCount(value: number);
/** shortcut for getting `request.userData.label` */
get label(): string | undefined;
/** shortcut for setting `request.userData.label` */
set label(value: string | undefined);
/** Maximum number of retries for this request. Allows to override the global `maxRequestRetries` option of `BasicCrawler`. */
get maxRetries(): number | undefined;
/** Maximum number of retries for this request. Allows to override the global `maxRequestRetries` option of `BasicCrawler`. */
set maxRetries(value: number | undefined);
/** Describes the request's current lifecycle state. */
get state(): RequestState;
/** Describes the request's current lifecycle state. */
set state(value: RequestState);
private get enqueueStrategy();
private set enqueueStrategy(value);
/**
* Stores information about an error that occurred during processing of this request.
*
* You should always use Error instances when throwing errors in JavaScript.
*
* Nevertheless, to improve the debugging experience when using third party libraries
* that may not always throw an Error instance, the function performs a type
* inspection of the passed argument and attempts to extract as much information
* as possible, since just throwing a bad type error makes any debugging rather difficult.
*
* @param errorOrMessage Error object or error message to be stored in the request.
* @param [options]
*/
pushErrorMessage(errorOrMessage: unknown, options?: PushErrorMessageOptions): void;
protected _computeUniqueKey(options: ComputeUniqueKeyOptions): string;
protected _hashPayload(payload: BinaryLike): string;
/** @internal */
static computeUniqueKey({ url, method, payload, keepUrlFragment, useExtendedUniqueKey, }: ComputeUniqueKeyOptions): string;
/** @internal */
static hashPayload(payload: BinaryLike): string;
}
/**
* Specifies required and optional fields for constructing a {@apilink Request}.
*/
export interface RequestOptions<UserData extends Dictionary = Dictionary> {
/** URL of the web page to crawl. It must be a non-empty string. */
url: string;
/**
* A unique key identifying the request.
* Two requests with the same `uniqueKey` are considered as pointing to the same URL.
*
* If `uniqueKey` is not provided, then it is automatically generated by normalizing the URL.
* For example, the URL of `HTTP://www.EXAMPLE.com/something/` will produce the `uniqueKey`
* of `http://www.example.com/something`.
*
* The `keepUrlFragment` option determines whether URL hash fragment is included in the `uniqueKey` or not.
*
* The `useExtendedUniqueKey` options determines whether method and payload are included in the `uniqueKey`,
* producing a `uniqueKey` in the following format: `METHOD(payloadHash):normalizedUrl`. This is useful
* when requests point to the same URL, but with different methods and payloads. For example: form submits.
*
* Pass an arbitrary non-empty text value to the `uniqueKey` property
* to override the default behavior and specify which URLs shall be considered equal.
*/
uniqueKey?: string;
/** @default 'GET' */
method?: AllowedHttpMethods | Lowercase<AllowedHttpMethods>;
/** HTTP request payload, e.g. for POST requests. */
payload?: string;
/**
* HTTP headers in the following format:
* ```
* {
* Accept: 'text/html',
* 'Content-Type': 'application/json'
* }
* ```
*/
headers?: Record<string, string>;
/**
* Custom user data assigned to the request. Use this to save any request related data to the
* request's scope, keeping them accessible on retries, failures etc.
*/
userData?: UserData;
/**
* Shortcut for setting `userData: { label: '...' }`.
*/
label?: string;
/**
* If `false` then the hash part of a URL is removed when computing the `uniqueKey` property.
* For example, this causes the `http://www.example.com#foo` and `http://www.example.com#bar` URLs
* to have the same `uniqueKey` of `http://www.example.com` and thus the URLs are considered equal.
* Note that this option only has an effect if `uniqueKey` is not set.
* @default false
*/
keepUrlFragment?: boolean;
/**
* If `true` then the `uniqueKey` is computed not only from the URL, but also from the method and payload
* properties. This is useful when making requests to the same URL that are differentiated by method
* or payload, such as form submit navigations in browsers.
* @default false
*/
useExtendedUniqueKey?: boolean;
/**
* The `true` value indicates that the request will not be automatically retried on error.
* @default false
*/
noRetry?: boolean;
/**
* If set to `true` then the crawler processing this request evaluates
* the `requestHandler` immediately without prior browser navigation.
* @default false
*/
skipNavigation?: boolean;
/**
* Maximum number of retries for this request. Allows to override the global `maxRequestRetries` option of `BasicCrawler`.
*/
maxRetries?: number;
/** @internal */
id?: string;
/** @internal */
handledAt?: string;
/** @internal */
lockExpiresAt?: Date;
/** @internal */
enqueueStrategy?: EnqueueLinksOptions['strategy'];
}
export interface PushErrorMessageOptions {
/**
* Only push the error message without stack trace when true.
* @default false
*/
omitStack?: boolean;
}
interface ComputeUniqueKeyOptions {
url: string;
method: AllowedHttpMethods;
payload?: string | Buffer;
keepUrlFragment?: boolean;
useExtendedUniqueKey?: boolean;
}
export type Source = (Partial<RequestOptions> & {
requestsFromUrl?: string;
regex?: RegExp;
}) | Request;
/** @internal */
export interface InternalSource {
requestsFromUrl: string;
regex?: RegExp;
}
export {};
//# sourceMappingURL=request.d.ts.map