@@ -166,24 +166,27 @@ def list_and_lock_head(self, *, lock_duration: timedelta, limit: int | None = No
166166 result = response_to_dict (response )
167167 return HeadAndLockResponse .model_validate (result ).data
168168
169- def add_request (self , request : dict , * , forefront : bool | None = None ) -> RequestRegistration :
169+ def add_request (self , request : dict | RequestDraft , * , forefront : bool | None = None ) -> RequestRegistration :
170170 """Add a request to the queue.
171171
172172 https://docs.apify.com/api/v2#/reference/request-queues/request-collection/add-request
173173
174174 Args:
175- request: The request to add to the queue.
175+ request: The request to add to the queue, as a dictionary or `RequestDraft` model .
176176 forefront: Whether to add the request to the head or the end of the queue.
177177
178178 Returns:
179179 The added request.
180180 """
181+ if isinstance (request , dict ):
182+ request = RequestDraft .model_validate (request )
183+
181184 request_params = self ._build_params (forefront = forefront , clientKey = self .client_key )
182185
183186 response = self ._http_client .call (
184187 url = self ._build_url ('requests' ),
185188 method = 'POST' ,
186- json = request ,
189+ json = request . model_dump ( by_alias = True , exclude_none = True ) ,
187190 params = request_params ,
188191 timeout = FAST_OPERATION_TIMEOUT ,
189192 )
@@ -217,26 +220,29 @@ def get_request(self, request_id: str) -> Request | None:
217220
218221 return None
219222
220- def update_request (self , request : dict , * , forefront : bool | None = None ) -> RequestRegistration :
223+ def update_request (self , request : dict | Request , * , forefront : bool | None = None ) -> RequestRegistration :
221224 """Update a request in the queue.
222225
223226 https://docs.apify.com/api/v2#/reference/request-queues/request/update-request
224227
225228 Args:
226- request: The updated request.
229+ request: The updated request, as a dictionary or `Request` model .
227230 forefront: Whether to put the updated request in the beginning or the end of the queue.
228231
229232 Returns:
230233 The updated request.
231234 """
232- request_id = request ['id' ]
235+ if isinstance (request , dict ):
236+ request = Request .model_validate (request )
237+
238+ request_id = request .id
233239
234240 request_params = self ._build_params (forefront = forefront , clientKey = self .client_key )
235241
236242 response = self ._http_client .call (
237243 url = self ._build_url (f'requests/{ request_id } ' ),
238244 method = 'PUT' ,
239- json = request ,
245+ json = request . model_dump ( by_alias = True , exclude_none = True ) ,
240246 params = request_params ,
241247 timeout = STANDARD_OPERATION_TIMEOUT ,
242248 )
@@ -315,7 +321,7 @@ def delete_request_lock(self, request_id: str, *, forefront: bool | None = None)
315321
316322 def batch_add_requests (
317323 self ,
318- requests : list [dict ],
324+ requests : list [dict | RequestDraft ],
319325 * ,
320326 forefront : bool = False ,
321327 max_parallel : int = 1 ,
@@ -329,7 +335,7 @@ def batch_add_requests(
329335 https://docs.apify.com/api/v2#/reference/request-queues/batch-request-operations/add-requests
330336
331337 Args:
332- requests: List of requests to be added to the queue.
338+ requests: List of requests to be added to the queue, each as a dictionary or `RequestDraft` model .
333339 forefront: Whether to add requests to the front of the queue.
334340 max_parallel: Specifies the maximum number of parallel tasks for API calls. This is only applicable
335341 to the async client. For the sync client, this value must be set to 1, as parallel execution
@@ -348,14 +354,19 @@ def batch_add_requests(
348354 if max_parallel != 1 :
349355 raise NotImplementedError ('max_parallel is only supported in async client' )
350356
357+ requests_as_dicts : list [dict ] = [
358+ (RequestDraft .model_validate (r ) if isinstance (r , dict ) else r ).model_dump (by_alias = True , exclude_none = True )
359+ for r in requests
360+ ]
361+
351362 request_params = self ._build_params (clientKey = self .client_key , forefront = forefront )
352363
353364 # Compute the payload size limit to ensure it doesn't exceed the maximum allowed size.
354365 payload_size_limit_bytes = _MAX_PAYLOAD_SIZE_BYTES - math .ceil (_MAX_PAYLOAD_SIZE_BYTES * _SAFETY_BUFFER_PERCENT )
355366
356367 # Split the requests into batches, constrained by the max payload size and max requests per batch.
357368 batches = constrained_batches (
358- requests ,
369+ requests_as_dicts ,
359370 max_size = payload_size_limit_bytes ,
360371 max_count = _RQ_MAX_REQUESTS_PER_BATCH ,
361372 )
@@ -580,24 +591,27 @@ async def list_and_lock_head(self, *, lock_duration: timedelta, limit: int | Non
580591 result = response_to_dict (response )
581592 return HeadAndLockResponse .model_validate (result ).data
582593
583- async def add_request (self , request : dict , * , forefront : bool | None = None ) -> RequestRegistration :
594+ async def add_request (self , request : dict | RequestDraft , * , forefront : bool | None = None ) -> RequestRegistration :
584595 """Add a request to the queue.
585596
586597 https://docs.apify.com/api/v2#/reference/request-queues/request-collection/add-request
587598
588599 Args:
589- request: The request to add to the queue.
600+ request: The request to add to the queue, as a dictionary or `RequestDraft` model .
590601 forefront: Whether to add the request to the head or the end of the queue.
591602
592603 Returns:
593604 The added request.
594605 """
606+ if isinstance (request , dict ):
607+ request = RequestDraft .model_validate (request )
608+
595609 request_params = self ._build_params (forefront = forefront , clientKey = self .client_key )
596610
597611 response = await self ._http_client .call (
598612 url = self ._build_url ('requests' ),
599613 method = 'POST' ,
600- json = request ,
614+ json = request . model_dump ( by_alias = True , exclude_none = True ) ,
601615 params = request_params ,
602616 timeout = FAST_OPERATION_TIMEOUT ,
603617 )
@@ -629,26 +643,29 @@ async def get_request(self, request_id: str) -> Request | None:
629643 catch_not_found_or_throw (exc )
630644 return None
631645
632- async def update_request (self , request : dict , * , forefront : bool | None = None ) -> RequestRegistration :
646+ async def update_request (self , request : dict | Request , * , forefront : bool | None = None ) -> RequestRegistration :
633647 """Update a request in the queue.
634648
635649 https://docs.apify.com/api/v2#/reference/request-queues/request/update-request
636650
637651 Args:
638- request: The updated request.
652+ request: The updated request, as a dictionary or `Request` model .
639653 forefront: Whether to put the updated request in the beginning or the end of the queue.
640654
641655 Returns:
642656 The updated request.
643657 """
644- request_id = request ['id' ]
658+ if isinstance (request , dict ):
659+ request = Request .model_validate (request )
660+
661+ request_id = request .id
645662
646663 request_params = self ._build_params (forefront = forefront , clientKey = self .client_key )
647664
648665 response = await self ._http_client .call (
649666 url = self ._build_url (f'requests/{ request_id } ' ),
650667 method = 'PUT' ,
651- json = request ,
668+ json = request . model_dump ( by_alias = True , exclude_none = True ) ,
652669 params = request_params ,
653670 timeout = STANDARD_OPERATION_TIMEOUT ,
654671 )
@@ -777,7 +794,7 @@ async def _batch_add_requests_worker(
777794
778795 async def batch_add_requests (
779796 self ,
780- requests : list [dict ],
797+ requests : list [dict | RequestDraft ],
781798 * ,
782799 forefront : bool = False ,
783800 max_parallel : int = 5 ,
@@ -791,7 +808,7 @@ async def batch_add_requests(
791808 https://docs.apify.com/api/v2#/reference/request-queues/batch-request-operations/add-requests
792809
793810 Args:
794- requests: List of requests to be added to the queue.
811+ requests: List of requests to be added to the queue, each as a dictionary or `RequestDraft` model .
795812 forefront: Whether to add requests to the front of the queue.
796813 max_parallel: Specifies the maximum number of parallel tasks for API calls. This is only applicable
797814 to the async client. For the sync client, this value must be set to 1, as parallel execution
@@ -807,6 +824,11 @@ async def batch_add_requests(
807824 if min_delay_between_unprocessed_requests_retries :
808825 logger .warning ('`min_delay_between_unprocessed_requests_retries` is deprecated and not used anymore.' )
809826
827+ requests_as_dicts : list [dict ] = [
828+ (RequestDraft .model_validate (r ) if isinstance (r , dict ) else r ).model_dump (by_alias = True , exclude_none = True )
829+ for r in requests
830+ ]
831+
810832 asyncio_queue : asyncio .Queue [Iterable [dict ]] = asyncio .Queue ()
811833 request_params = self ._build_params (clientKey = self .client_key , forefront = forefront )
812834
@@ -815,7 +837,7 @@ async def batch_add_requests(
815837
816838 # Split the requests into batches, constrained by the max payload size and max requests per batch.
817839 batches = constrained_batches (
818- requests ,
840+ requests_as_dicts ,
819841 max_size = payload_size_limit_bytes ,
820842 max_count = _RQ_MAX_REQUESTS_PER_BATCH ,
821843 )
0 commit comments