Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for perform selenium action before return the response from middleware . #89

Open
wants to merge 3 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,3 +97,26 @@ yield SeleniumRequest(
script='window.scrollTo(0, document.body.scrollHeight);',
)
```

#### interact

When used, will call this function with driver as parameter and the data of this call return will be added to the response `meta`:

```python
def interact_on_page(driver):
radio_all = driver.find_element(By.CSS_SELECTOR, '.some-class a')
ActionChains(driver).move_to_element(radio_all).click(radio_all).perform()
WebDriverWait(driver, timeout=30).until_not(lambda d: d.find_element(By.CLASS_NAME, '.loading'))
data = driver.execute_script('const data={}; ********; return data;')
return data

yield SeleniumRequest(
url=url,
callback=self.parse_result,
interact=interact_on_page
)

def parse_result(self, response):
dynamic_data = response.request.meta['interact_data']
```

3 changes: 2 additions & 1 deletion scrapy_selenium/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
class SeleniumRequest(Request):
"""Scrapy ``Request`` subclass providing additional arguments"""

def __init__(self, wait_time=None, wait_until=None, screenshot=False, script=None, *args, **kwargs):
def __init__(self, wait_time=None, wait_until=None, screenshot=False, script=None, interact=None,*args, **kwargs):
"""Initialize a new selenium request

Parameters
Expand All @@ -28,5 +28,6 @@ def __init__(self, wait_time=None, wait_until=None, screenshot=False, script=Non
self.wait_until = wait_until
self.screenshot = screenshot
self.script = script
self.interact = interact

super().__init__(*args, **kwargs)
9 changes: 4 additions & 5 deletions scrapy_selenium/middlewares.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,18 +46,14 @@ def __init__(self, driver_name, driver_executable_path,
for argument in driver_arguments:
driver_options.add_argument(argument)

driver_kwargs = {
'executable_path': driver_executable_path,
f'{driver_name}_options': driver_options
}

# locally installed driver
if driver_executable_path is not None:
driver_kwargs = {
'executable_path': driver_executable_path,
f'{driver_name}_options': driver_options
}
self.driver = driver_klass(**driver_kwargs)

# remote driver
elif command_executor is not None:
from selenium import webdriver
Expand Down Expand Up @@ -121,6 +117,9 @@ def process_request(self, request, spider):
if request.script:
self.driver.execute_script(request.script)

if interact_func := request.interact:
request.meta['interact_data'] = interact_func(self.driver)

body = str.encode(self.driver.page_source)

# Expose the driver via the "meta" attribute
Expand Down
28 changes: 28 additions & 0 deletions tests/test_middlewares.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,12 @@

from unittest.mock import patch

from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains

from scrapy import Request
from scrapy.crawler import Crawler
from selenium.webdriver.support.wait import WebDriverWait

from scrapy_selenium.http import SeleniumRequest
from scrapy_selenium.middlewares import SeleniumMiddleware
Expand Down Expand Up @@ -135,3 +139,27 @@ def test_process_request_should_execute_script_if_script_option(self):
html_response.selector.xpath('//title/text()').extract_first(),
'scrapy_selenium'
)

def test_process_request_should_execute_interact_if_interact_option(self):
"""Test that the ``process_request`` should execute the script and return a response"""

def page_interact(driver):
el = driver.find_element(By.CSS_SELECTOR, '#downloads')
ActionChains(driver).move_to_element(el).perform()

# fake dynamic loading
dl_url = WebDriverWait(driver, timeout=30).until(lambda d: d.find_element(By.CSS_SELECTOR, '#downloads .element-1 a')).get_attribute('href')

return {'dl_link': 'fake_url'}

selenium_request = SeleniumRequest(
url='http://www.python.org',
interact=page_interact
)

html_response = self.selenium_middleware.process_request(
request=selenium_request,
spider=None
)

self.assertEqual(html_response.request.meta['interact_data']['dl_link'], 'fake_url')