second commit
This commit is contained in:
19
venv/Lib/site-packages/selenium/__init__.py
Normal file
19
venv/Lib/site-packages/selenium/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
__version__ = "4.1.5"
|
||||
18
venv/Lib/site-packages/selenium/common/__init__.py
Normal file
18
venv/Lib/site-packages/selenium/common/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from . import exceptions # noqa
|
||||
326
venv/Lib/site-packages/selenium/common/exceptions.py
Normal file
326
venv/Lib/site-packages/selenium/common/exceptions.py
Normal file
@@ -0,0 +1,326 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exceptions that may happen in all the webdriver code.
|
||||
"""
|
||||
|
||||
from typing import Optional, Sequence
|
||||
|
||||
|
||||
class WebDriverException(Exception):
|
||||
"""
|
||||
Base webdriver exception.
|
||||
"""
|
||||
|
||||
def __init__(self, msg: Optional[str] = None, screen: Optional[str] = None, stacktrace: Optional[Sequence[str]] = None) -> None:
|
||||
self.msg = msg
|
||||
self.screen = screen
|
||||
self.stacktrace = stacktrace
|
||||
|
||||
def __str__(self) -> str:
|
||||
exception_msg = "Message: %s\n" % self.msg
|
||||
if self.screen:
|
||||
exception_msg += "Screenshot: available via screen\n"
|
||||
if self.stacktrace:
|
||||
stacktrace = "\n".join(self.stacktrace)
|
||||
exception_msg += "Stacktrace:\n%s" % stacktrace
|
||||
return exception_msg
|
||||
|
||||
|
||||
class InvalidSwitchToTargetException(WebDriverException):
|
||||
"""
|
||||
Thrown when frame or window target to be switched doesn't exist.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoSuchFrameException(InvalidSwitchToTargetException):
|
||||
"""
|
||||
Thrown when frame target to be switched doesn't exist.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoSuchWindowException(InvalidSwitchToTargetException):
|
||||
"""
|
||||
Thrown when window target to be switched doesn't exist.
|
||||
|
||||
To find the current set of active window handles, you can get a list
|
||||
of the active window handles in the following way::
|
||||
|
||||
print driver.window_handles
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoSuchElementException(WebDriverException):
|
||||
"""
|
||||
Thrown when element could not be found.
|
||||
|
||||
If you encounter this exception, you may want to check the following:
|
||||
* Check your selector used in your find_by...
|
||||
* Element may not yet be on the screen at the time of the find operation,
|
||||
(webpage is still loading) see selenium.webdriver.support.wait.WebDriverWait()
|
||||
for how to write a wait wrapper to wait for an element to appear.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoSuchAttributeException(WebDriverException):
|
||||
"""
|
||||
Thrown when the attribute of element could not be found.
|
||||
|
||||
You may want to check if the attribute exists in the particular browser you are
|
||||
testing against. Some browsers may have different property names for the same
|
||||
property. (IE8's .innerText vs. Firefox .textContent)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoSuchShadowRootException(WebDriverException):
|
||||
"""
|
||||
Thrown when trying to access the shadow root of an element when it does not
|
||||
have a shadow root attached.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class StaleElementReferenceException(WebDriverException):
|
||||
"""
|
||||
Thrown when a reference to an element is now "stale".
|
||||
|
||||
Stale means the element no longer appears on the DOM of the page.
|
||||
|
||||
|
||||
Possible causes of StaleElementReferenceException include, but not limited to:
|
||||
* You are no longer on the same page, or the page may have refreshed since the element
|
||||
was located.
|
||||
* The element may have been removed and re-added to the screen, since it was located.
|
||||
Such as an element being relocated.
|
||||
This can happen typically with a javascript framework when values are updated and the
|
||||
node is rebuilt.
|
||||
* Element may have been inside an iframe or another context which was refreshed.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InvalidElementStateException(WebDriverException):
|
||||
"""
|
||||
Thrown when a command could not be completed because the element is in an invalid state.
|
||||
|
||||
This can be caused by attempting to clear an element that isn't both editable and resettable.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class UnexpectedAlertPresentException(WebDriverException):
|
||||
"""
|
||||
Thrown when an unexpected alert has appeared.
|
||||
|
||||
Usually raised when an unexpected modal is blocking the webdriver from executing
|
||||
commands.
|
||||
"""
|
||||
|
||||
def __init__(self, msg: Optional[str] = None, screen: Optional[str] = None, stacktrace: Optional[Sequence[str]] = None, alert_text: Optional[str] = None) -> None:
|
||||
super(UnexpectedAlertPresentException, self).__init__(msg, screen, stacktrace)
|
||||
self.alert_text = alert_text
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Alert Text: %s\n%s" % (self.alert_text, super(UnexpectedAlertPresentException, self).__str__())
|
||||
|
||||
|
||||
class NoAlertPresentException(WebDriverException):
|
||||
"""
|
||||
Thrown when switching to no presented alert.
|
||||
|
||||
This can be caused by calling an operation on the Alert() class when an alert is
|
||||
not yet on the screen.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ElementNotVisibleException(InvalidElementStateException):
|
||||
"""
|
||||
Thrown when an element is present on the DOM, but
|
||||
it is not visible, and so is not able to be interacted with.
|
||||
|
||||
Most commonly encountered when trying to click or read text
|
||||
of an element that is hidden from view.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ElementNotInteractableException(InvalidElementStateException):
|
||||
"""
|
||||
Thrown when an element is present in the DOM but interactions
|
||||
with that element will hit another element due to paint order
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ElementNotSelectableException(InvalidElementStateException):
|
||||
"""
|
||||
Thrown when trying to select an unselectable element.
|
||||
|
||||
For example, selecting a 'script' element.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InvalidCookieDomainException(WebDriverException):
|
||||
"""
|
||||
Thrown when attempting to add a cookie under a different domain
|
||||
than the current URL.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class UnableToSetCookieException(WebDriverException):
|
||||
"""
|
||||
Thrown when a driver fails to set a cookie.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class RemoteDriverServerException(WebDriverException):
|
||||
"""
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutException(WebDriverException):
|
||||
"""
|
||||
Thrown when a command does not complete in enough time.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class MoveTargetOutOfBoundsException(WebDriverException):
|
||||
"""
|
||||
Thrown when the target provided to the `ActionsChains` move()
|
||||
method is invalid, i.e. out of document.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class UnexpectedTagNameException(WebDriverException):
|
||||
"""
|
||||
Thrown when a support class did not get an expected web element.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InvalidSelectorException(WebDriverException):
|
||||
"""
|
||||
Thrown when the selector which is used to find an element does not return
|
||||
a WebElement. Currently this only happens when the selector is an xpath
|
||||
expression and it is either syntactically invalid (i.e. it is not a
|
||||
xpath expression) or the expression does not select WebElements
|
||||
(e.g. "count(//input)").
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ImeNotAvailableException(WebDriverException):
|
||||
"""
|
||||
Thrown when IME support is not available. This exception is thrown for every IME-related
|
||||
method call if IME support is not available on the machine.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ImeActivationFailedException(WebDriverException):
|
||||
"""
|
||||
Thrown when activating an IME engine has failed.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArgumentException(WebDriverException):
|
||||
"""
|
||||
The arguments passed to a command are either invalid or malformed.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class JavascriptException(WebDriverException):
|
||||
"""
|
||||
An error occurred while executing JavaScript supplied by the user.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoSuchCookieException(WebDriverException):
|
||||
"""
|
||||
No cookie matching the given path name was found amongst the associated cookies of the
|
||||
current browsing context's active document.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ScreenshotException(WebDriverException):
|
||||
"""
|
||||
A screen capture was made impossible.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ElementClickInterceptedException(WebDriverException):
|
||||
"""
|
||||
The Element Click command could not be completed because the element receiving the events
|
||||
is obscuring the element that was requested to be clicked.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InsecureCertificateException(WebDriverException):
|
||||
"""
|
||||
Navigation caused the user agent to hit a certificate warning, which is usually the result
|
||||
of an expired or invalid TLS certificate.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InvalidCoordinatesException(WebDriverException):
|
||||
"""
|
||||
The coordinates provided to an interaction's operation are invalid.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InvalidSessionIdException(WebDriverException):
|
||||
"""
|
||||
Occurs if the given session id is not in the list of active sessions, meaning the session
|
||||
either does not exist or that it's not active.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class SessionNotCreatedException(WebDriverException):
|
||||
"""
|
||||
A new session could not be created.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class UnknownMethodException(WebDriverException):
|
||||
"""
|
||||
The requested command matched a known URL but did not match any methods for that URL.
|
||||
"""
|
||||
pass
|
||||
23
venv/Lib/site-packages/selenium/types.py
Normal file
23
venv/Lib/site-packages/selenium/types.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Selenium type definitions."""
|
||||
|
||||
from typing import Union
|
||||
|
||||
|
||||
AnyKey = Union[str, int, float]
|
||||
40
venv/Lib/site-packages/selenium/webdriver/__init__.py
Normal file
40
venv/Lib/site-packages/selenium/webdriver/__init__.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from .firefox.webdriver import WebDriver as Firefox # noqa
|
||||
from .firefox.firefox_profile import FirefoxProfile # noqa
|
||||
from .firefox.options import Options as FirefoxOptions # noqa
|
||||
from .chrome.webdriver import WebDriver as Chrome # noqa
|
||||
from .chrome.options import Options as ChromeOptions # noqa
|
||||
from .ie.webdriver import WebDriver as Ie # noqa
|
||||
from .ie.options import Options as IeOptions # noqa
|
||||
from .edge.webdriver import WebDriver as Edge # noqa
|
||||
from .edge.webdriver import WebDriver as ChromiumEdge # noqa
|
||||
from .edge.options import Options as EdgeOptions # noqa
|
||||
from .opera.webdriver import WebDriver as Opera # noqa
|
||||
from .safari.webdriver import WebDriver as Safari # noqa
|
||||
from .webkitgtk.webdriver import WebDriver as WebKitGTK # noqa
|
||||
from .webkitgtk.options import Options as WebKitGTKOptions # noqa
|
||||
from .wpewebkit.webdriver import WebDriver as WPEWebKit # noqa
|
||||
from .wpewebkit.options import Options as WPEWebKitOptions # noqa
|
||||
from .remote.webdriver import WebDriver as Remote # noqa
|
||||
from .common.desired_capabilities import DesiredCapabilities # noqa
|
||||
from .common.action_chains import ActionChains # noqa
|
||||
from .common.proxy import Proxy # noqa
|
||||
from .common.keys import Keys # noqa
|
||||
|
||||
__version__ = '4.1.5'
|
||||
16
venv/Lib/site-packages/selenium/webdriver/chrome/__init__.py
Normal file
16
venv/Lib/site-packages/selenium/webdriver/chrome/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
34
venv/Lib/site-packages/selenium/webdriver/chrome/options.py
Normal file
34
venv/Lib/site-packages/selenium/webdriver/chrome/options.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from selenium.webdriver.chromium.options import ChromiumOptions
|
||||
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class Options(ChromiumOptions):
|
||||
|
||||
@property
|
||||
def default_capabilities(self) -> dict:
|
||||
return DesiredCapabilities.CHROME.copy()
|
||||
|
||||
def enable_mobile(self,
|
||||
android_package: str = "com.android.chrome",
|
||||
android_activity: Optional[str] = None,
|
||||
device_serial: Optional[str] = None
|
||||
) -> None:
|
||||
super().enable_mobile(android_package, android_activity, device_serial)
|
||||
48
venv/Lib/site-packages/selenium/webdriver/chrome/service.py
Normal file
48
venv/Lib/site-packages/selenium/webdriver/chrome/service.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from typing import List
|
||||
from selenium.webdriver.chromium import service
|
||||
|
||||
|
||||
DEFAULT_EXECUTABLE_PATH = "chromedriver"
|
||||
|
||||
|
||||
class Service(service.ChromiumService):
|
||||
"""
|
||||
Object that manages the starting and stopping of the ChromeDriver
|
||||
"""
|
||||
|
||||
def __init__(self, executable_path: str = DEFAULT_EXECUTABLE_PATH,
|
||||
port: int = 0, service_args: List[str] = None,
|
||||
log_path: str = None, env: dict = None):
|
||||
"""
|
||||
Creates a new instance of the Service
|
||||
|
||||
:Args:
|
||||
- executable_path : Path to the ChromeDriver
|
||||
- port : Port the service is running on
|
||||
- service_args : List of args to pass to the chromedriver service
|
||||
- log_path : Path for the chromedriver service to log to"""
|
||||
|
||||
super(Service, self).__init__(
|
||||
executable_path,
|
||||
port,
|
||||
service_args,
|
||||
log_path,
|
||||
env,
|
||||
"Please see https://chromedriver.chromium.org/home")
|
||||
@@ -0,0 +1,73 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import warnings
|
||||
from selenium.webdriver.chromium.webdriver import ChromiumDriver
|
||||
from .options import Options
|
||||
from .service import DEFAULT_EXECUTABLE_PATH, Service
|
||||
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
|
||||
|
||||
|
||||
DEFAULT_PORT = 0
|
||||
DEFAULT_SERVICE_LOG_PATH = None
|
||||
DEFAULT_KEEP_ALIVE = None
|
||||
|
||||
|
||||
class WebDriver(ChromiumDriver):
|
||||
"""
|
||||
Controls the ChromeDriver and allows you to drive the browser.
|
||||
You will need to download the ChromeDriver executable from
|
||||
http://chromedriver.storage.googleapis.com/index.html
|
||||
"""
|
||||
|
||||
def __init__(self, executable_path=DEFAULT_EXECUTABLE_PATH, port=DEFAULT_PORT,
|
||||
options: Options = None, service_args=None,
|
||||
desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH,
|
||||
chrome_options=None, service: Service = None, keep_alive=DEFAULT_KEEP_ALIVE):
|
||||
"""
|
||||
Creates a new instance of the chrome driver.
|
||||
Starts the service and then creates new instance of chrome driver.
|
||||
|
||||
:Args:
|
||||
- executable_path - Deprecated: path to the executable. If the default is used it assumes the executable is in the $PATH
|
||||
- port - Deprecated: port you would like the service to run, if left as 0, a free port will be found.
|
||||
- options - this takes an instance of ChromeOptions
|
||||
- service - Service object for handling the browser driver if you need to pass extra details
|
||||
- service_args - Deprecated: List of args to pass to the driver service
|
||||
- desired_capabilities - Deprecated: Dictionary object with non-browser specific
|
||||
capabilities only, such as "proxy" or "loggingPref".
|
||||
- service_log_path - Deprecated: Where to log information from the driver.
|
||||
- keep_alive - Deprecated: Whether to configure ChromeRemoteConnection to use HTTP keep-alive.
|
||||
"""
|
||||
if executable_path != 'chromedriver':
|
||||
warnings.warn('executable_path has been deprecated, please pass in a Service object',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
if chrome_options:
|
||||
warnings.warn('use options instead of chrome_options',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
options = chrome_options
|
||||
if keep_alive != DEFAULT_KEEP_ALIVE:
|
||||
warnings.warn('keep_alive has been deprecated, please pass in a Service object',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
else:
|
||||
keep_alive = True
|
||||
if not service:
|
||||
service = Service(executable_path, port, service_args, service_log_path)
|
||||
|
||||
super(WebDriver, self).__init__(DesiredCapabilities.CHROME['browserName'], "goog",
|
||||
port, options,
|
||||
service_args, desired_capabilities,
|
||||
service_log_path, service, keep_alive)
|
||||
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
179
venv/Lib/site-packages/selenium/webdriver/chromium/options.py
Normal file
179
venv/Lib/site-packages/selenium/webdriver/chromium/options.py
Normal file
@@ -0,0 +1,179 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import os
|
||||
import warnings
|
||||
from typing import List, Union
|
||||
|
||||
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
|
||||
from selenium.webdriver.common.options import ArgOptions
|
||||
|
||||
|
||||
class ChromiumOptions(ArgOptions):
|
||||
KEY = "goog:chromeOptions"
|
||||
|
||||
def __init__(self) -> None:
|
||||
super(ChromiumOptions, self).__init__()
|
||||
self._binary_location = ''
|
||||
self._extension_files = []
|
||||
self._extensions = []
|
||||
self._experimental_options = {}
|
||||
self._debugger_address = None
|
||||
|
||||
@property
|
||||
def binary_location(self) -> str:
|
||||
"""
|
||||
:Returns: The location of the binary, otherwise an empty string
|
||||
"""
|
||||
return self._binary_location
|
||||
|
||||
@binary_location.setter
|
||||
def binary_location(self, value: str) -> None:
|
||||
"""
|
||||
Allows you to set where the chromium binary lives
|
||||
:Args:
|
||||
- value: path to the Chromium binary
|
||||
"""
|
||||
self._binary_location = value
|
||||
|
||||
@property
|
||||
def debugger_address(self: str) -> str:
|
||||
"""
|
||||
:Returns: The address of the remote devtools instance
|
||||
"""
|
||||
return self._debugger_address
|
||||
|
||||
@debugger_address.setter
|
||||
def debugger_address(self, value: str) -> None:
|
||||
"""
|
||||
Allows you to set the address of the remote devtools instance
|
||||
that the ChromeDriver instance will try to connect to during an
|
||||
active wait.
|
||||
:Args:
|
||||
- value: address of remote devtools instance if any (hostname[:port])
|
||||
"""
|
||||
self._debugger_address = value
|
||||
|
||||
@property
|
||||
def extensions(self) -> List[str]:
|
||||
"""
|
||||
:Returns: A list of encoded extensions that will be loaded
|
||||
"""
|
||||
encoded_extensions = []
|
||||
for ext in self._extension_files:
|
||||
file_ = open(ext, 'rb')
|
||||
# Should not use base64.encodestring() which inserts newlines every
|
||||
# 76 characters (per RFC 1521). Chromedriver has to remove those
|
||||
# unnecessary newlines before decoding, causing performance hit.
|
||||
encoded_extensions.append(base64.b64encode(file_.read()).decode('UTF-8'))
|
||||
|
||||
file_.close()
|
||||
return encoded_extensions + self._extensions
|
||||
|
||||
def add_extension(self, extension: str) -> None:
|
||||
"""
|
||||
Adds the path to the extension to a list that will be used to extract it
|
||||
to the ChromeDriver
|
||||
|
||||
:Args:
|
||||
- extension: path to the \\*.crx file
|
||||
"""
|
||||
if extension:
|
||||
extension_to_add = os.path.abspath(os.path.expanduser(extension))
|
||||
if os.path.exists(extension_to_add):
|
||||
self._extension_files.append(extension_to_add)
|
||||
else:
|
||||
raise IOError("Path to the extension doesn't exist")
|
||||
else:
|
||||
raise ValueError("argument can not be null")
|
||||
|
||||
def add_encoded_extension(self, extension: str) -> None:
|
||||
"""
|
||||
Adds Base64 encoded string with extension data to a list that will be used to extract it
|
||||
to the ChromeDriver
|
||||
|
||||
:Args:
|
||||
- extension: Base64 encoded string with extension data
|
||||
"""
|
||||
if extension:
|
||||
self._extensions.append(extension)
|
||||
else:
|
||||
raise ValueError("argument can not be null")
|
||||
|
||||
@property
|
||||
def experimental_options(self) -> dict:
|
||||
"""
|
||||
:Returns: A dictionary of experimental options for chromium
|
||||
"""
|
||||
return self._experimental_options
|
||||
|
||||
def add_experimental_option(self, name: str, value: Union[str, int, dict, List[str]]) -> None:
|
||||
"""
|
||||
Adds an experimental option which is passed to chromium.
|
||||
|
||||
:Args:
|
||||
name: The experimental option name.
|
||||
value: The option value.
|
||||
"""
|
||||
if name.lower() == "w3c" and (value == "false" or value is False):
|
||||
warnings.warn(UserWarning("Manipulating `w3c` setting can have unintended consequences."))
|
||||
self._experimental_options[name] = value
|
||||
|
||||
@property
|
||||
def headless(self) -> bool:
|
||||
"""
|
||||
:Returns: True if the headless argument is set, else False
|
||||
"""
|
||||
return '--headless' in self._arguments
|
||||
|
||||
@headless.setter
|
||||
def headless(self, value: bool) -> None:
|
||||
"""
|
||||
Sets the headless argument
|
||||
:Args:
|
||||
value: boolean value indicating to set the headless option
|
||||
"""
|
||||
args = {'--headless'}
|
||||
if value is True:
|
||||
self._arguments.extend(args)
|
||||
else:
|
||||
self._arguments = list(set(self._arguments) - args)
|
||||
|
||||
def to_capabilities(self) -> dict:
|
||||
"""
|
||||
Creates a capabilities with all the options that have been set
|
||||
:Returns: A dictionary with everything
|
||||
"""
|
||||
caps = self._caps
|
||||
chrome_options = self.experimental_options.copy()
|
||||
if self.mobile_options:
|
||||
chrome_options.update(self.mobile_options)
|
||||
chrome_options["extensions"] = self.extensions
|
||||
if self.binary_location:
|
||||
chrome_options["binary"] = self.binary_location
|
||||
chrome_options["args"] = self._arguments
|
||||
if self.debugger_address:
|
||||
chrome_options["debuggerAddress"] = self.debugger_address
|
||||
|
||||
caps[self.KEY] = chrome_options
|
||||
|
||||
return caps
|
||||
|
||||
@property
|
||||
def default_capabilities(self) -> dict:
|
||||
return DesiredCapabilities.CHROME.copy()
|
||||
@@ -0,0 +1,36 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from selenium.webdriver.remote.remote_connection import RemoteConnection
|
||||
|
||||
|
||||
class ChromiumRemoteConnection(RemoteConnection):
|
||||
def __init__(self, remote_server_addr, vendor_prefix, browser_name, keep_alive=True, ignore_proxy=False):
|
||||
RemoteConnection.__init__(self, remote_server_addr, keep_alive, ignore_proxy=ignore_proxy)
|
||||
self.browser_name = browser_name
|
||||
self._commands["launchApp"] = ('POST', '/session/$sessionId/chromium/launch_app')
|
||||
self._commands["setPermissions"] = ('POST', '/session/$sessionId/permissions')
|
||||
self._commands["setNetworkConditions"] = ('POST', '/session/$sessionId/chromium/network_conditions')
|
||||
self._commands["getNetworkConditions"] = ('GET', '/session/$sessionId/chromium/network_conditions')
|
||||
self._commands["deleteNetworkConditions"] = ('DELETE', '/session/$sessionId/chromium/network_conditions')
|
||||
self._commands['executeCdpCommand'] = ('POST', '/session/$sessionId/{}/cdp/execute'.format(vendor_prefix))
|
||||
self._commands['getSinks'] = ('GET', '/session/$sessionId/{}/cast/get_sinks'.format(vendor_prefix))
|
||||
self._commands['getIssueMessage'] = ('GET', '/session/$sessionId/{}/cast/get_issue_message'.format(vendor_prefix))
|
||||
self._commands['setSinkToUse'] = ('POST', '/session/$sessionId/{}/cast/set_sink_to_use'.format(vendor_prefix))
|
||||
self._commands['startDesktopMirroring'] = ('POST', '/session/$sessionId/{}/cast/start_desktop_mirroring'.format(vendor_prefix))
|
||||
self._commands['startTabMirroring'] = ('POST', '/session/$sessionId/{}/cast/start_tab_mirroring'.format(vendor_prefix))
|
||||
self._commands['stopCasting'] = ('POST', '/session/$sessionId/{}/cast/stop_casting'.format(vendor_prefix))
|
||||
@@ -0,0 +1,48 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from typing import List
|
||||
from selenium.webdriver.common import service
|
||||
|
||||
|
||||
class ChromiumService(service.Service):
|
||||
"""
|
||||
Object that manages the starting and stopping the WebDriver instance of the ChromiumDriver
|
||||
"""
|
||||
|
||||
def __init__(self, executable_path: str, port: int = 0, service_args: List[str] = None,
|
||||
log_path: str = None, env: dict = None, start_error_message: str = None):
|
||||
"""
|
||||
Creates a new instance of the Service
|
||||
|
||||
:Args:
|
||||
- executable_path : Path to the WebDriver executable
|
||||
- port : Port the service is running on
|
||||
- service_args : List of args to pass to the WebDriver service
|
||||
- log_path : Path for the WebDriver service to log to"""
|
||||
|
||||
self.service_args = service_args or []
|
||||
if log_path:
|
||||
self.service_args.append('--log-path=%s' % log_path)
|
||||
|
||||
if not start_error_message:
|
||||
raise AttributeError("start_error_message should not be empty")
|
||||
|
||||
service.Service.__init__(self, executable_path, port=port, env=env, start_error_message=start_error_message)
|
||||
|
||||
def command_line_args(self) -> List[str]:
|
||||
return ["--port=%d" % self.port] + self.service_args
|
||||
245
venv/Lib/site-packages/selenium/webdriver/chromium/webdriver.py
Normal file
245
venv/Lib/site-packages/selenium/webdriver/chromium/webdriver.py
Normal file
@@ -0,0 +1,245 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from selenium.webdriver.common.options import BaseOptions
|
||||
from selenium.webdriver.common.service import Service
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
import warnings
|
||||
|
||||
from selenium.webdriver.chromium.remote_connection import ChromiumRemoteConnection
|
||||
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
|
||||
|
||||
DEFAULT_PORT = 0
|
||||
DEFAULT_SERVICE_LOG_PATH = None
|
||||
DEFAULT_KEEP_ALIVE = None
|
||||
|
||||
|
||||
class ChromiumDriver(RemoteWebDriver):
|
||||
"""
|
||||
Controls the WebDriver instance of ChromiumDriver and allows you to drive the browser.
|
||||
"""
|
||||
|
||||
def __init__(self, browser_name, vendor_prefix,
|
||||
port=DEFAULT_PORT, options: BaseOptions = None, service_args=None,
|
||||
desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH,
|
||||
service: Service = None, keep_alive=DEFAULT_KEEP_ALIVE):
|
||||
"""
|
||||
Creates a new WebDriver instance of the ChromiumDriver.
|
||||
Starts the service and then creates new WebDriver instance of ChromiumDriver.
|
||||
|
||||
:Args:
|
||||
- browser_name - Browser name used when matching capabilities.
|
||||
- vendor_prefix - Company prefix to apply to vendor-specific WebDriver extension commands.
|
||||
- port - Deprecated: port you would like the service to run, if left as 0, a free port will be found.
|
||||
- options - this takes an instance of ChromiumOptions
|
||||
- service_args - Deprecated: List of args to pass to the driver service
|
||||
- desired_capabilities - Deprecated: Dictionary object with non-browser specific
|
||||
capabilities only, such as "proxy" or "loggingPref".
|
||||
- service_log_path - Deprecated: Where to log information from the driver.
|
||||
- keep_alive - Deprecated: Whether to configure ChromiumRemoteConnection to use HTTP keep-alive.
|
||||
"""
|
||||
if desired_capabilities:
|
||||
warnings.warn('desired_capabilities has been deprecated, please pass in a Service object',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
if port != DEFAULT_PORT:
|
||||
warnings.warn('port has been deprecated, please pass in a Service object',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
self.port = port
|
||||
if service_log_path != DEFAULT_SERVICE_LOG_PATH:
|
||||
warnings.warn('service_log_path has been deprecated, please pass in a Service object',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
if keep_alive != DEFAULT_KEEP_ALIVE and type(self) == __class__:
|
||||
warnings.warn('keep_alive has been deprecated, please pass in a Service object',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
else:
|
||||
keep_alive = True
|
||||
|
||||
self.vendor_prefix = vendor_prefix
|
||||
|
||||
_ignore_proxy = None
|
||||
if not options:
|
||||
options = self.create_options()
|
||||
|
||||
if desired_capabilities:
|
||||
for key, value in desired_capabilities.items():
|
||||
options.set_capability(key, value)
|
||||
|
||||
if options._ignore_local_proxy:
|
||||
_ignore_proxy = options._ignore_local_proxy
|
||||
|
||||
if not service:
|
||||
raise AttributeError('service cannot be None')
|
||||
|
||||
self.service = service
|
||||
self.service.start()
|
||||
|
||||
try:
|
||||
RemoteWebDriver.__init__(
|
||||
self,
|
||||
command_executor=ChromiumRemoteConnection(
|
||||
remote_server_addr=self.service.service_url,
|
||||
browser_name=browser_name, vendor_prefix=vendor_prefix,
|
||||
keep_alive=keep_alive, ignore_proxy=_ignore_proxy),
|
||||
options=options)
|
||||
except Exception:
|
||||
self.quit()
|
||||
raise
|
||||
self._is_remote = False
|
||||
|
||||
def launch_app(self, id):
|
||||
"""Launches Chromium app specified by id."""
|
||||
return self.execute("launchApp", {'id': id})
|
||||
|
||||
def get_network_conditions(self):
|
||||
"""
|
||||
Gets Chromium network emulation settings.
|
||||
|
||||
:Returns:
|
||||
A dict. For example:
|
||||
{'latency': 4, 'download_throughput': 2, 'upload_throughput': 2,
|
||||
'offline': False}
|
||||
"""
|
||||
return self.execute("getNetworkConditions")['value']
|
||||
|
||||
def set_network_conditions(self, **network_conditions) -> None:
|
||||
"""
|
||||
Sets Chromium network emulation settings.
|
||||
|
||||
:Args:
|
||||
- network_conditions: A dict with conditions specification.
|
||||
|
||||
:Usage:
|
||||
::
|
||||
|
||||
driver.set_network_conditions(
|
||||
offline=False,
|
||||
latency=5, # additional latency (ms)
|
||||
download_throughput=500 * 1024, # maximal throughput
|
||||
upload_throughput=500 * 1024) # maximal throughput
|
||||
|
||||
Note: 'throughput' can be used to set both (for download and upload).
|
||||
"""
|
||||
self.execute("setNetworkConditions", {
|
||||
'network_conditions': network_conditions
|
||||
})
|
||||
|
||||
def delete_network_conditions(self) -> None:
|
||||
"""
|
||||
Resets Chromium network emulation settings.
|
||||
"""
|
||||
self.execute("deleteNetworkConditions")
|
||||
|
||||
def set_permissions(self, name: str, value: str) -> None:
|
||||
"""
|
||||
Sets Applicable Permission.
|
||||
|
||||
:Args:
|
||||
- name: The item to set the permission on.
|
||||
- value: The value to set on the item
|
||||
|
||||
:Usage:
|
||||
::
|
||||
driver.set_permissions('clipboard-read', 'denied')
|
||||
"""
|
||||
self.execute("setPermissions", {'descriptor': {'name': name}, 'state': value})
|
||||
|
||||
def execute_cdp_cmd(self, cmd: str, cmd_args: dict):
|
||||
"""
|
||||
Execute Chrome Devtools Protocol command and get returned result
|
||||
The command and command args should follow chrome devtools protocol domains/commands, refer to link
|
||||
https://chromedevtools.github.io/devtools-protocol/
|
||||
|
||||
:Args:
|
||||
- cmd: A str, command name
|
||||
- cmd_args: A dict, command args. empty dict {} if there is no command args
|
||||
:Usage:
|
||||
::
|
||||
driver.execute_cdp_cmd('Network.getResponseBody', {'requestId': requestId})
|
||||
:Returns:
|
||||
A dict, empty dict {} if there is no result to return.
|
||||
For example to getResponseBody:
|
||||
{'base64Encoded': False, 'body': 'response body string'}
|
||||
"""
|
||||
return self.execute("executeCdpCommand", {'cmd': cmd, 'params': cmd_args})['value']
|
||||
|
||||
def get_sinks(self) -> list:
|
||||
"""
|
||||
:Returns: A list of sinks available for Cast.
|
||||
"""
|
||||
return self.execute('getSinks')['value']
|
||||
|
||||
def get_issue_message(self):
|
||||
"""
|
||||
:Returns: An error message when there is any issue in a Cast session.
|
||||
"""
|
||||
return self.execute('getIssueMessage')['value']
|
||||
|
||||
def set_sink_to_use(self, sink_name: str) -> str:
|
||||
"""
|
||||
Sets a specific sink, using its name, as a Cast session receiver target.
|
||||
|
||||
:Args:
|
||||
- sink_name: Name of the sink to use as the target.
|
||||
"""
|
||||
return self.execute('setSinkToUse', {'sinkName': sink_name})
|
||||
|
||||
def start_desktop_mirroring(self, sink_name: str) -> str:
|
||||
"""
|
||||
Starts a desktop mirroring session on a specific receiver target.
|
||||
|
||||
:Args:
|
||||
- sink_name: Name of the sink to use as the target.
|
||||
"""
|
||||
return self.execute('startDesktopMirroring', {'sinkName': sink_name})
|
||||
|
||||
def start_tab_mirroring(self, sink_name: str) -> str:
|
||||
"""
|
||||
Starts a tab mirroring session on a specific receiver target.
|
||||
|
||||
:Args:
|
||||
- sink_name: Name of the sink to use as the target.
|
||||
"""
|
||||
return self.execute('startTabMirroring', {'sinkName': sink_name})
|
||||
|
||||
def stop_casting(self, sink_name: str) -> str:
|
||||
"""
|
||||
Stops the existing Cast session on a specific receiver target.
|
||||
|
||||
:Args:
|
||||
- sink_name: Name of the sink to stop the Cast session.
|
||||
"""
|
||||
return self.execute('stopCasting', {'sinkName': sink_name})
|
||||
|
||||
def quit(self) -> None:
|
||||
"""
|
||||
Closes the browser and shuts down the ChromiumDriver executable
|
||||
that is started when starting the ChromiumDriver
|
||||
"""
|
||||
try:
|
||||
RemoteWebDriver.quit(self)
|
||||
except Exception:
|
||||
# We don't care about the message because something probably has gone wrong
|
||||
pass
|
||||
finally:
|
||||
self.service.stop()
|
||||
|
||||
def create_options(self) -> BaseOptions:
|
||||
if self.vendor_prefix == "ms":
|
||||
return EdgeOptions()
|
||||
else:
|
||||
return ChromeOptions()
|
||||
16
venv/Lib/site-packages/selenium/webdriver/common/__init__.py
Normal file
16
venv/Lib/site-packages/selenium/webdriver/common/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
@@ -0,0 +1,344 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The ActionChains implementation,
|
||||
"""
|
||||
|
||||
from .utils import keys_to_typing
|
||||
from .actions.action_builder import ActionBuilder
|
||||
|
||||
|
||||
class ActionChains(object):
|
||||
"""
|
||||
ActionChains are a way to automate low level interactions such as
|
||||
mouse movements, mouse button actions, key press, and context menu interactions.
|
||||
This is useful for doing more complex actions like hover over and drag and drop.
|
||||
|
||||
Generate user actions.
|
||||
When you call methods for actions on the ActionChains object,
|
||||
the actions are stored in a queue in the ActionChains object.
|
||||
When you call perform(), the events are fired in the order they
|
||||
are queued up.
|
||||
|
||||
ActionChains can be used in a chain pattern::
|
||||
|
||||
menu = driver.find_element(By.CSS_SELECTOR, ".nav")
|
||||
hidden_submenu = driver.find_element(By.CSS_SELECTOR, ".nav #submenu1")
|
||||
|
||||
ActionChains(driver).move_to_element(menu).click(hidden_submenu).perform()
|
||||
|
||||
Or actions can be queued up one by one, then performed.::
|
||||
|
||||
menu = driver.find_element(By.CSS_SELECTOR, ".nav")
|
||||
hidden_submenu = driver.find_element(By.CSS_SELECTOR, ".nav #submenu1")
|
||||
|
||||
actions = ActionChains(driver)
|
||||
actions.move_to_element(menu)
|
||||
actions.click(hidden_submenu)
|
||||
actions.perform()
|
||||
|
||||
Either way, the actions are performed in the order they are called, one after
|
||||
another.
|
||||
"""
|
||||
|
||||
def __init__(self, driver, duration=250):
|
||||
"""
|
||||
Creates a new ActionChains.
|
||||
|
||||
:Args:
|
||||
- driver: The WebDriver instance which performs user actions.
|
||||
- duration: override the default 250 msecs of DEFAULT_MOVE_DURATION in PointerInput
|
||||
"""
|
||||
self._driver = driver
|
||||
self._actions = []
|
||||
self.w3c_actions = ActionBuilder(driver, duration=duration)
|
||||
|
||||
def perform(self):
|
||||
"""
|
||||
Performs all stored actions.
|
||||
"""
|
||||
self.w3c_actions.perform()
|
||||
|
||||
def reset_actions(self):
|
||||
"""
|
||||
Clears actions that are already stored locally and on the remote end
|
||||
"""
|
||||
self.w3c_actions.clear_actions()
|
||||
for device in self.w3c_actions.devices:
|
||||
device.clear_actions()
|
||||
self._actions = []
|
||||
|
||||
def click(self, on_element=None):
|
||||
"""
|
||||
Clicks an element.
|
||||
|
||||
:Args:
|
||||
- on_element: The element to click.
|
||||
If None, clicks on current mouse position.
|
||||
"""
|
||||
if on_element:
|
||||
self.move_to_element(on_element)
|
||||
|
||||
self.w3c_actions.pointer_action.click()
|
||||
self.w3c_actions.key_action.pause()
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def click_and_hold(self, on_element=None):
|
||||
"""
|
||||
Holds down the left mouse button on an element.
|
||||
|
||||
:Args:
|
||||
- on_element: The element to mouse down.
|
||||
If None, clicks on current mouse position.
|
||||
"""
|
||||
if on_element:
|
||||
self.move_to_element(on_element)
|
||||
|
||||
self.w3c_actions.pointer_action.click_and_hold()
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def context_click(self, on_element=None):
|
||||
"""
|
||||
Performs a context-click (right click) on an element.
|
||||
|
||||
:Args:
|
||||
- on_element: The element to context-click.
|
||||
If None, clicks on current mouse position.
|
||||
"""
|
||||
if on_element:
|
||||
self.move_to_element(on_element)
|
||||
|
||||
self.w3c_actions.pointer_action.context_click()
|
||||
self.w3c_actions.key_action.pause()
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def double_click(self, on_element=None):
|
||||
"""
|
||||
Double-clicks an element.
|
||||
|
||||
:Args:
|
||||
- on_element: The element to double-click.
|
||||
If None, clicks on current mouse position.
|
||||
"""
|
||||
if on_element:
|
||||
self.move_to_element(on_element)
|
||||
|
||||
self.w3c_actions.pointer_action.double_click()
|
||||
for _ in range(4):
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def drag_and_drop(self, source, target):
|
||||
"""
|
||||
Holds down the left mouse button on the source element,
|
||||
then moves to the target element and releases the mouse button.
|
||||
|
||||
:Args:
|
||||
- source: The element to mouse down.
|
||||
- target: The element to mouse up.
|
||||
"""
|
||||
self.click_and_hold(source)
|
||||
self.release(target)
|
||||
return self
|
||||
|
||||
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
|
||||
"""
|
||||
Holds down the left mouse button on the source element,
|
||||
then moves to the target offset and releases the mouse button.
|
||||
|
||||
:Args:
|
||||
- source: The element to mouse down.
|
||||
- xoffset: X offset to move to.
|
||||
- yoffset: Y offset to move to.
|
||||
"""
|
||||
self.click_and_hold(source)
|
||||
self.move_by_offset(xoffset, yoffset)
|
||||
self.release()
|
||||
return self
|
||||
|
||||
def key_down(self, value, element=None):
|
||||
"""
|
||||
Sends a key press only, without releasing it.
|
||||
Should only be used with modifier keys (Control, Alt and Shift).
|
||||
|
||||
:Args:
|
||||
- value: The modifier key to send. Values are defined in `Keys` class.
|
||||
- element: The element to send keys.
|
||||
If None, sends a key to current focused element.
|
||||
|
||||
Example, pressing ctrl+c::
|
||||
|
||||
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
|
||||
|
||||
"""
|
||||
if element:
|
||||
self.click(element)
|
||||
|
||||
self.w3c_actions.key_action.key_down(value)
|
||||
self.w3c_actions.pointer_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def key_up(self, value, element=None):
|
||||
"""
|
||||
Releases a modifier key.
|
||||
|
||||
:Args:
|
||||
- value: The modifier key to send. Values are defined in Keys class.
|
||||
- element: The element to send keys.
|
||||
If None, sends a key to current focused element.
|
||||
|
||||
Example, pressing ctrl+c::
|
||||
|
||||
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
|
||||
|
||||
"""
|
||||
if element:
|
||||
self.click(element)
|
||||
|
||||
self.w3c_actions.key_action.key_up(value)
|
||||
self.w3c_actions.pointer_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def move_by_offset(self, xoffset, yoffset):
|
||||
"""
|
||||
Moving the mouse to an offset from current mouse position.
|
||||
|
||||
:Args:
|
||||
- xoffset: X offset to move to, as a positive or negative integer.
|
||||
- yoffset: Y offset to move to, as a positive or negative integer.
|
||||
"""
|
||||
|
||||
self.w3c_actions.pointer_action.move_by(xoffset, yoffset)
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def move_to_element(self, to_element):
|
||||
"""
|
||||
Moving the mouse to the middle of an element.
|
||||
|
||||
:Args:
|
||||
- to_element: The WebElement to move to.
|
||||
"""
|
||||
|
||||
self.w3c_actions.pointer_action.move_to(to_element)
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
|
||||
"""
|
||||
Move the mouse by an offset of the specified element.
|
||||
Offsets are relative to the top-left corner of the element.
|
||||
|
||||
:Args:
|
||||
- to_element: The WebElement to move to.
|
||||
- xoffset: X offset to move to.
|
||||
- yoffset: Y offset to move to.
|
||||
"""
|
||||
|
||||
self.w3c_actions.pointer_action.move_to(to_element,
|
||||
int(xoffset),
|
||||
int(yoffset))
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def pause(self, seconds):
|
||||
""" Pause all inputs for the specified duration in seconds """
|
||||
|
||||
self.w3c_actions.pointer_action.pause(seconds)
|
||||
self.w3c_actions.key_action.pause(seconds)
|
||||
|
||||
return self
|
||||
|
||||
def release(self, on_element=None):
|
||||
"""
|
||||
Releasing a held mouse button on an element.
|
||||
|
||||
:Args:
|
||||
- on_element: The element to mouse up.
|
||||
If None, releases on current mouse position.
|
||||
"""
|
||||
if on_element:
|
||||
self.move_to_element(on_element)
|
||||
|
||||
self.w3c_actions.pointer_action.release()
|
||||
self.w3c_actions.key_action.pause()
|
||||
|
||||
return self
|
||||
|
||||
def send_keys(self, *keys_to_send):
|
||||
"""
|
||||
Sends keys to current focused element.
|
||||
|
||||
:Args:
|
||||
- keys_to_send: The keys to send. Modifier keys constants can be found in the
|
||||
'Keys' class.
|
||||
"""
|
||||
typing = keys_to_typing(keys_to_send)
|
||||
|
||||
for key in typing:
|
||||
self.key_down(key)
|
||||
self.key_up(key)
|
||||
|
||||
return self
|
||||
|
||||
def send_keys_to_element(self, element, *keys_to_send):
|
||||
"""
|
||||
Sends keys to an element.
|
||||
|
||||
:Args:
|
||||
- element: The element to send keys.
|
||||
- keys_to_send: The keys to send. Modifier keys constants can be found in the
|
||||
'Keys' class.
|
||||
"""
|
||||
self.click(element)
|
||||
self.send_keys(*keys_to_send)
|
||||
return self
|
||||
|
||||
def scroll(self, x: int, y: int, delta_x: int, delta_y: int, duration: int = 0, origin: str = "viewport"):
|
||||
"""
|
||||
Sends wheel scroll information to the browser to be processed.
|
||||
|
||||
:Args:
|
||||
- x: starting X coordinate
|
||||
- y: starting Y coordinate
|
||||
- delta_x: the distance the mouse will scroll on the x axis
|
||||
- delta_y: the distance the mouse will scroll on the y axis
|
||||
"""
|
||||
self.w3c_actions.wheel_action.scroll(x=x, y=y, delta_x=delta_x, delta_y=delta_y,
|
||||
duration=duration, origin=origin)
|
||||
return self
|
||||
|
||||
# Context manager so ActionChains can be used in a 'with .. as' statements.
|
||||
|
||||
def __enter__(self):
|
||||
return self # Return created instance of self.
|
||||
|
||||
def __exit__(self, _type, _value, _traceback):
|
||||
pass # Do nothing, does not require additional cleanup.
|
||||
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
@@ -0,0 +1,97 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from typing import Union, List
|
||||
from selenium.webdriver.remote.command import Command
|
||||
from . import interaction
|
||||
from .key_actions import KeyActions
|
||||
from .key_input import KeyInput
|
||||
from .pointer_actions import PointerActions
|
||||
from .pointer_input import PointerInput
|
||||
from .wheel_input import WheelInput
|
||||
from .wheel_actions import WheelActions
|
||||
|
||||
|
||||
class ActionBuilder(object):
|
||||
def __init__(self, driver, mouse=None, wheel=None, keyboard=None, duration=250) -> None:
|
||||
if not mouse:
|
||||
mouse = PointerInput(interaction.POINTER_MOUSE, "mouse")
|
||||
if not keyboard:
|
||||
keyboard = KeyInput(interaction.KEY)
|
||||
if not wheel:
|
||||
wheel = WheelInput(interaction.WHEEL)
|
||||
self.devices = [mouse, keyboard, wheel]
|
||||
self._key_action = KeyActions(keyboard)
|
||||
self._pointer_action = PointerActions(mouse, duration=duration)
|
||||
self._wheel_action = WheelActions(wheel)
|
||||
self.driver = driver
|
||||
|
||||
def get_device_with(self, name) -> Union["WheelInput", "PointerInput", "KeyInput"]:
|
||||
return next(filter(lambda x: x == name, self.devices), None)
|
||||
|
||||
@property
|
||||
def pointer_inputs(self) -> List[PointerInput]:
|
||||
return [device for device in self.devices if device.type == interaction.POINTER]
|
||||
|
||||
@property
|
||||
def key_inputs(self) -> List[KeyInput]:
|
||||
return [device for device in self.devices if device.type == interaction.KEY]
|
||||
|
||||
@property
|
||||
def key_action(self) -> KeyActions:
|
||||
return self._key_action
|
||||
|
||||
@property
|
||||
def pointer_action(self) -> PointerActions:
|
||||
return self._pointer_action
|
||||
|
||||
@property
|
||||
def wheel_action(self) -> WheelActions:
|
||||
return self._wheel_action
|
||||
|
||||
def add_key_input(self, name) -> KeyInput:
|
||||
new_input = KeyInput(name)
|
||||
self._add_input(new_input)
|
||||
return new_input
|
||||
|
||||
def add_pointer_input(self, kind, name) -> PointerInput:
|
||||
new_input = PointerInput(kind, name)
|
||||
self._add_input(new_input)
|
||||
return new_input
|
||||
|
||||
def add_wheel_input(self, kind, name) -> WheelInput:
|
||||
new_input = WheelInput(kind, name)
|
||||
self._add_input(new_input)
|
||||
return new_input
|
||||
|
||||
def perform(self) -> None:
|
||||
enc = {"actions": []}
|
||||
for device in self.devices:
|
||||
encoded = device.encode()
|
||||
if encoded['actions']:
|
||||
enc["actions"].append(encoded)
|
||||
device.actions = []
|
||||
self.driver.execute(Command.W3C_ACTIONS, enc)
|
||||
|
||||
def clear_actions(self) -> None:
|
||||
"""
|
||||
Clears actions that are already stored on the remote end
|
||||
"""
|
||||
self.driver.execute(Command.W3C_CLEAR_ACTIONS)
|
||||
|
||||
def _add_input(self, input) -> None:
|
||||
self.devices.append(input)
|
||||
@@ -0,0 +1,43 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
|
||||
class InputDevice(object):
|
||||
"""
|
||||
Describes the input device being used for the action.
|
||||
"""
|
||||
def __init__(self, name=None):
|
||||
if not name:
|
||||
self.name = uuid.uuid4()
|
||||
else:
|
||||
self.name = name
|
||||
|
||||
self.actions = []
|
||||
|
||||
def add_action(self, action):
|
||||
"""
|
||||
|
||||
"""
|
||||
self.actions.append(action)
|
||||
|
||||
def clear_actions(self):
|
||||
self.actions = []
|
||||
|
||||
def create_pause(self, duration=0):
|
||||
pass
|
||||
@@ -0,0 +1,51 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
KEY = "key"
|
||||
POINTER = "pointer"
|
||||
NONE = "none"
|
||||
WHEEL = "wheel"
|
||||
SOURCE_TYPES = set([KEY, POINTER, NONE])
|
||||
|
||||
POINTER_MOUSE = "mouse"
|
||||
POINTER_TOUCH = "touch"
|
||||
POINTER_PEN = "pen"
|
||||
|
||||
POINTER_KINDS = set([POINTER_MOUSE, POINTER_TOUCH, POINTER_PEN])
|
||||
|
||||
|
||||
class Interaction(object):
|
||||
|
||||
PAUSE = "pause"
|
||||
|
||||
def __init__(self, source):
|
||||
self.source = source
|
||||
|
||||
|
||||
class Pause(Interaction):
|
||||
|
||||
def __init__(self, source, duration=0):
|
||||
super(Interaction, self).__init__()
|
||||
self.source = source
|
||||
self.duration = duration
|
||||
|
||||
def encode(self):
|
||||
return {
|
||||
"type": self.PAUSE,
|
||||
"duration": int(self.duration * 1000)
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from .interaction import Interaction, KEY
|
||||
from .key_input import KeyInput
|
||||
from ..utils import keys_to_typing
|
||||
|
||||
|
||||
class KeyActions(Interaction):
|
||||
|
||||
def __init__(self, source=None):
|
||||
if not source:
|
||||
source = KeyInput(KEY)
|
||||
self.source = source
|
||||
super(KeyActions, self).__init__(source)
|
||||
|
||||
def key_down(self, letter):
|
||||
return self._key_action("create_key_down", letter)
|
||||
|
||||
def key_up(self, letter):
|
||||
return self._key_action("create_key_up", letter)
|
||||
|
||||
def pause(self, duration=0):
|
||||
return self._key_action("create_pause", duration)
|
||||
|
||||
def send_keys(self, text):
|
||||
if not isinstance(text, list):
|
||||
text = keys_to_typing(text)
|
||||
for letter in text:
|
||||
self.key_down(letter)
|
||||
self.key_up(letter)
|
||||
return self
|
||||
|
||||
def _key_action(self, action, letter):
|
||||
meth = getattr(self.source, action)
|
||||
meth(letter)
|
||||
return self
|
||||
@@ -0,0 +1,51 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from . import interaction
|
||||
|
||||
from .input_device import InputDevice
|
||||
from .interaction import (Interaction,
|
||||
Pause)
|
||||
|
||||
|
||||
class KeyInput(InputDevice):
|
||||
def __init__(self, name) -> None:
|
||||
super(KeyInput, self).__init__()
|
||||
self.name = name
|
||||
self.type = interaction.KEY
|
||||
|
||||
def encode(self) -> dict:
|
||||
return {"type": self.type, "id": self.name, "actions": [acts.encode() for acts in self.actions]}
|
||||
|
||||
def create_key_down(self, key) -> None:
|
||||
self.add_action(TypingInteraction(self, "keyDown", key))
|
||||
|
||||
def create_key_up(self, key) -> None:
|
||||
self.add_action(TypingInteraction(self, "keyUp", key))
|
||||
|
||||
def create_pause(self, pause_duration=0) -> None:
|
||||
self.add_action(Pause(self, pause_duration))
|
||||
|
||||
|
||||
class TypingInteraction(Interaction):
|
||||
|
||||
def __init__(self, source, type_, key) -> None:
|
||||
super(TypingInteraction, self).__init__(source)
|
||||
self.type = type_
|
||||
self.key = key
|
||||
|
||||
def encode(self) -> dict:
|
||||
return {"type": self.type, "value": self.key}
|
||||
@@ -0,0 +1,23 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class MouseButton(object):
|
||||
|
||||
LEFT = 0
|
||||
MIDDLE = 1
|
||||
RIGHT = 2
|
||||
@@ -0,0 +1,124 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from . import interaction
|
||||
|
||||
from .interaction import Interaction
|
||||
from .mouse_button import MouseButton
|
||||
from .pointer_input import PointerInput
|
||||
|
||||
from selenium.webdriver.remote.webelement import WebElement
|
||||
|
||||
|
||||
class PointerActions(Interaction):
|
||||
|
||||
def __init__(self, source=None, duration=250):
|
||||
"""
|
||||
Args:
|
||||
- source: PointerInput instance
|
||||
- duration: override the default 250 msecs of DEFAULT_MOVE_DURATION in source
|
||||
"""
|
||||
if not source:
|
||||
source = PointerInput(interaction.POINTER_MOUSE, "mouse")
|
||||
self.source = source
|
||||
self._duration = duration
|
||||
super(PointerActions, self).__init__(source)
|
||||
|
||||
def pointer_down(self, button=MouseButton.LEFT, width=None, height=None, pressure=None,
|
||||
tangential_pressure=None, tilt_x=None, tilt_y=None, twist=None,
|
||||
altitude_angle=None, azimuth_angle=None):
|
||||
self._button_action("create_pointer_down", button=button, width=width, height=height,
|
||||
pressure=pressure, tangential_pressure=tangential_pressure,
|
||||
tilt_x=tilt_x, tilt_y=tilt_y, twist=twist,
|
||||
altitude_angle=altitude_angle, azimuth_angle=azimuth_angle)
|
||||
return self
|
||||
|
||||
def pointer_up(self, button=MouseButton.LEFT):
|
||||
self._button_action("create_pointer_up", button=button)
|
||||
return self
|
||||
|
||||
def move_to(self, element, x=0, y=0, width=None, height=None, pressure=None,
|
||||
tangential_pressure=None, tilt_x=None, tilt_y=None, twist=None,
|
||||
altitude_angle=None, azimuth_angle=None):
|
||||
if not isinstance(element, WebElement):
|
||||
raise AttributeError("move_to requires a WebElement")
|
||||
|
||||
if x or y:
|
||||
el_rect = element.rect
|
||||
left_offset = el_rect['width'] / 2
|
||||
top_offset = el_rect['height'] / 2
|
||||
left = -left_offset + (x or 0)
|
||||
top = -top_offset + (y or 0)
|
||||
else:
|
||||
left = 0
|
||||
top = 0
|
||||
|
||||
self.source.create_pointer_move(origin=element, duration=self._duration, x=int(left), y=int(top),
|
||||
width=width, height=height, pressure=pressure,
|
||||
tangential_pressure=tangential_pressure,
|
||||
tilt_x=tilt_x, tilt_y=tilt_y, twist=twist,
|
||||
altitude_angle=altitude_angle, azimuth_angle=azimuth_angle)
|
||||
return self
|
||||
|
||||
def move_by(self, x, y):
|
||||
self.source.create_pointer_move(origin=interaction.POINTER, duration=self._duration, x=int(x), y=int(y))
|
||||
return self
|
||||
|
||||
def move_to_location(self, x, y):
|
||||
self.source.create_pointer_move(origin='viewport', duration=self._duration, x=int(x), y=int(y))
|
||||
return self
|
||||
|
||||
def click(self, element=None):
|
||||
if element:
|
||||
self.move_to(element)
|
||||
self.pointer_down(MouseButton.LEFT)
|
||||
self.pointer_up(MouseButton.LEFT)
|
||||
return self
|
||||
|
||||
def context_click(self, element=None):
|
||||
if element:
|
||||
self.move_to(element)
|
||||
self.pointer_down(MouseButton.RIGHT)
|
||||
self.pointer_up(MouseButton.RIGHT)
|
||||
return self
|
||||
|
||||
def click_and_hold(self, element=None):
|
||||
if element:
|
||||
self.move_to(element)
|
||||
self.pointer_down()
|
||||
return self
|
||||
|
||||
def release(self):
|
||||
self.pointer_up()
|
||||
return self
|
||||
|
||||
def double_click(self, element=None):
|
||||
if element:
|
||||
self.move_to(element)
|
||||
self.pointer_down(MouseButton.LEFT)
|
||||
self.pointer_up(MouseButton.LEFT)
|
||||
self.pointer_down(MouseButton.LEFT)
|
||||
self.pointer_up(MouseButton.LEFT)
|
||||
return self
|
||||
|
||||
def pause(self, duration=0):
|
||||
self.source.create_pause(duration)
|
||||
return self
|
||||
|
||||
def _button_action(self, action, **kwargs):
|
||||
meth = getattr(self.source, action)
|
||||
meth(**kwargs)
|
||||
return self
|
||||
@@ -0,0 +1,79 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from .input_device import InputDevice
|
||||
from .interaction import POINTER, POINTER_KINDS
|
||||
|
||||
from selenium.common.exceptions import InvalidArgumentException
|
||||
from selenium.webdriver.remote.webelement import WebElement
|
||||
|
||||
|
||||
class PointerInput(InputDevice):
|
||||
|
||||
DEFAULT_MOVE_DURATION = 250
|
||||
|
||||
def __init__(self, kind, name):
|
||||
super(PointerInput, self).__init__()
|
||||
if kind not in POINTER_KINDS:
|
||||
raise InvalidArgumentException("Invalid PointerInput kind '%s'" % kind)
|
||||
self.type = POINTER
|
||||
self.kind = kind
|
||||
self.name = name
|
||||
|
||||
def create_pointer_move(self, duration=DEFAULT_MOVE_DURATION, x=0, y=0, origin=None, **kwargs):
|
||||
action = dict(type="pointerMove", duration=duration)
|
||||
action["x"] = x
|
||||
action["y"] = y
|
||||
action.update(**kwargs)
|
||||
if isinstance(origin, WebElement):
|
||||
action["origin"] = {"element-6066-11e4-a52e-4f735466cecf": origin.id}
|
||||
elif origin:
|
||||
action["origin"] = origin
|
||||
|
||||
self.add_action(self._convert_keys(action))
|
||||
|
||||
def create_pointer_down(self, **kwargs):
|
||||
data = dict(type="pointerDown", duration=0)
|
||||
data.update(**kwargs)
|
||||
self.add_action(self._convert_keys(data))
|
||||
|
||||
def create_pointer_up(self, button):
|
||||
self.add_action({"type": "pointerUp", "duration": 0, "button": button})
|
||||
|
||||
def create_pointer_cancel(self):
|
||||
self.add_action({"type": "pointerCancel"})
|
||||
|
||||
def create_pause(self, pause_duration):
|
||||
self.add_action({"type": "pause", "duration": int(pause_duration * 1000)})
|
||||
|
||||
def encode(self):
|
||||
return {"type": self.type,
|
||||
"parameters": {"pointerType": self.kind},
|
||||
"id": self.name,
|
||||
"actions": [acts for acts in self.actions]}
|
||||
|
||||
def _convert_keys(self, actions):
|
||||
out = {}
|
||||
for k in actions.keys():
|
||||
if actions[k] is None:
|
||||
continue
|
||||
if k == "x" or k == "y":
|
||||
out[k] = int(actions[k])
|
||||
continue
|
||||
splits = k.split('_')
|
||||
new_key = splits[0] + ''.join(v.title() for v in splits[1:])
|
||||
out[new_key] = actions[k]
|
||||
return out
|
||||
@@ -0,0 +1,34 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from .wheel_input import WheelInput
|
||||
from .interaction import Interaction
|
||||
|
||||
|
||||
class WheelActions(Interaction):
|
||||
|
||||
def __init__(self, source: WheelInput = None):
|
||||
if not source:
|
||||
source = WheelInput("wheel")
|
||||
super(WheelActions, self).__init__(source)
|
||||
|
||||
def pause(self, duration=0):
|
||||
self.source.create_pause(duration)
|
||||
return self
|
||||
|
||||
def scroll(self, x, y, delta_x, delta_y, duration, origin):
|
||||
self.source.create_scroll(x, y, delta_x, delta_y, duration, origin)
|
||||
return self
|
||||
@@ -0,0 +1,46 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from . import interaction
|
||||
from .input_device import InputDevice
|
||||
from typing import Union
|
||||
|
||||
from selenium.webdriver.remote.webelement import WebElement
|
||||
|
||||
|
||||
class WheelInput(InputDevice):
|
||||
|
||||
def __init__(self, name) -> None:
|
||||
super().__init__(name=name)
|
||||
self.name = name
|
||||
self.type = interaction.WHEEL
|
||||
|
||||
def encode(self) -> dict:
|
||||
return {"type": self.type,
|
||||
"id": self.name,
|
||||
"actions": [acts for acts in self.actions]}
|
||||
|
||||
def create_scroll(self, x: int, y: int, delta_x: int,
|
||||
delta_y: int, duration: int, origin) -> None:
|
||||
if isinstance(origin, WebElement):
|
||||
origin = {"element-6066-11e4-a52e-4f735466cecf": origin.id}
|
||||
self.add_action({"type": "scroll", "x": x, "y": y, "deltaX": delta_x,
|
||||
"deltaY": delta_y, "duration": duration,
|
||||
"origin": origin})
|
||||
|
||||
def create_pause(self, pause_duration: Union[int, float]) -> None:
|
||||
self.add_action(
|
||||
{"type": "pause", "duration": int(pause_duration * 1000)})
|
||||
90
venv/Lib/site-packages/selenium/webdriver/common/alert.py
Normal file
90
venv/Lib/site-packages/selenium/webdriver/common/alert.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The Alert implementation.
|
||||
"""
|
||||
|
||||
from selenium.webdriver.common.utils import keys_to_typing
|
||||
from selenium.webdriver.remote.command import Command
|
||||
|
||||
|
||||
class Alert(object):
|
||||
"""
|
||||
Allows to work with alerts.
|
||||
|
||||
Use this class to interact with alert prompts. It contains methods for dismissing,
|
||||
accepting, inputting, and getting text from alert prompts.
|
||||
|
||||
Accepting / Dismissing alert prompts::
|
||||
|
||||
Alert(driver).accept()
|
||||
Alert(driver).dismiss()
|
||||
|
||||
Inputting a value into an alert prompt:
|
||||
|
||||
name_prompt = Alert(driver)
|
||||
name_prompt.send_keys("Willian Shakesphere")
|
||||
name_prompt.accept()
|
||||
|
||||
|
||||
Reading a the text of a prompt for verification:
|
||||
|
||||
alert_text = Alert(driver).text
|
||||
self.assertEqual("Do you wish to quit?", alert_text)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
"""
|
||||
Creates a new Alert.
|
||||
|
||||
:Args:
|
||||
- driver: The WebDriver instance which performs user actions.
|
||||
"""
|
||||
self.driver = driver
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""
|
||||
Gets the text of the Alert.
|
||||
"""
|
||||
return self.driver.execute(Command.W3C_GET_ALERT_TEXT)["value"]
|
||||
|
||||
def dismiss(self):
|
||||
"""
|
||||
Dismisses the alert available.
|
||||
"""
|
||||
self.driver.execute(Command.W3C_DISMISS_ALERT)
|
||||
|
||||
def accept(self):
|
||||
"""
|
||||
Accepts the alert available.
|
||||
|
||||
Usage::
|
||||
Alert(driver).accept() # Confirm a alert dialog.
|
||||
"""
|
||||
self.driver.execute(Command.W3C_ACCEPT_ALERT)
|
||||
|
||||
def send_keys(self, keysToSend):
|
||||
"""
|
||||
Send Keys to the Alert.
|
||||
|
||||
:Args:
|
||||
- keysToSend: The text to be sent to Alert.
|
||||
"""
|
||||
self.driver.execute(Command.W3C_SET_ALERT_VALUE, {'value': keys_to_typing(keysToSend), 'text': keysToSend})
|
||||
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
483
venv/Lib/site-packages/selenium/webdriver/common/bidi/cdp.py
Normal file
483
venv/Lib/site-packages/selenium/webdriver/common/bidi/cdp.py
Normal file
@@ -0,0 +1,483 @@
|
||||
# The MIT License(MIT)
|
||||
#
|
||||
# Copyright(c) 2018 Hyperion Gray
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files(the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
#
|
||||
# This code comes from https://github.com/HyperionGray/trio-chrome-devtools-protocol/tree/master/trio_cdp
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
from trio_websocket import (
|
||||
ConnectionClosed as WsConnectionClosed,
|
||||
connect_websocket_url,
|
||||
)
|
||||
import trio
|
||||
from collections import defaultdict
|
||||
from contextlib import (contextmanager, asynccontextmanager)
|
||||
from dataclasses import dataclass
|
||||
import contextvars
|
||||
import importlib
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import typing
|
||||
|
||||
|
||||
logger = logging.getLogger('trio_cdp')
|
||||
T = typing.TypeVar('T')
|
||||
MAX_WS_MESSAGE_SIZE = 2**24
|
||||
|
||||
devtools = None
|
||||
version = None
|
||||
|
||||
|
||||
def import_devtools(ver):
|
||||
global devtools
|
||||
global version
|
||||
version = ver
|
||||
devtools = importlib.import_module("selenium.webdriver.common.devtools.v{}".format(version))
|
||||
|
||||
|
||||
_connection_context: contextvars.ContextVar = contextvars.ContextVar('connection_context')
|
||||
_session_context: contextvars.ContextVar = contextvars.ContextVar('session_context')
|
||||
|
||||
|
||||
def get_connection_context(fn_name):
|
||||
'''
|
||||
Look up the current connection. If there is no current connection, raise a
|
||||
``RuntimeError`` with a helpful message.
|
||||
'''
|
||||
try:
|
||||
return _connection_context.get()
|
||||
except LookupError:
|
||||
raise RuntimeError(f'{fn_name}() must be called in a connection context.')
|
||||
|
||||
|
||||
def get_session_context(fn_name):
|
||||
'''
|
||||
Look up the current session. If there is no current session, raise a
|
||||
``RuntimeError`` with a helpful message.
|
||||
'''
|
||||
try:
|
||||
return _session_context.get()
|
||||
except LookupError:
|
||||
raise RuntimeError(f'{fn_name}() must be called in a session context.')
|
||||
|
||||
|
||||
@contextmanager
|
||||
def connection_context(connection):
|
||||
''' This context manager installs ``connection`` as the session context for the current
|
||||
Trio task. '''
|
||||
token = _connection_context.set(connection)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_connection_context.reset(token)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def session_context(session):
|
||||
''' This context manager installs ``session`` as the session context for the current
|
||||
Trio task. '''
|
||||
token = _session_context.set(session)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_session_context.reset(token)
|
||||
|
||||
|
||||
def set_global_connection(connection):
|
||||
'''
|
||||
Install ``connection`` in the root context so that it will become the default
|
||||
connection for all tasks. This is generally not recommended, except it may be
|
||||
necessary in certain use cases such as running inside Jupyter notebook.
|
||||
'''
|
||||
global _connection_context
|
||||
_connection_context = contextvars.ContextVar('_connection_context',
|
||||
default=connection)
|
||||
|
||||
|
||||
def set_global_session(session):
|
||||
'''
|
||||
Install ``session`` in the root context so that it will become the default
|
||||
session for all tasks. This is generally not recommended, except it may be
|
||||
necessary in certain use cases such as running inside Jupyter notebook.
|
||||
'''
|
||||
global _session_context
|
||||
_session_context = contextvars.ContextVar('_session_context', default=session)
|
||||
|
||||
|
||||
class BrowserError(Exception):
|
||||
''' This exception is raised when the browser's response to a command
|
||||
indicates that an error occurred. '''
|
||||
|
||||
def __init__(self, obj):
|
||||
self.code = obj['code']
|
||||
self.message = obj['message']
|
||||
self.detail = obj.get('data')
|
||||
|
||||
def __str__(self):
|
||||
return 'BrowserError<code={} message={}> {}'.format(self.code,
|
||||
self.message, self.detail)
|
||||
|
||||
|
||||
class CdpConnectionClosed(WsConnectionClosed):
|
||||
''' Raised when a public method is called on a closed CDP connection. '''
|
||||
|
||||
def __init__(self, reason):
|
||||
'''
|
||||
Constructor.
|
||||
:param reason:
|
||||
:type reason: wsproto.frame_protocol.CloseReason
|
||||
'''
|
||||
self.reason = reason
|
||||
|
||||
def __repr__(self):
|
||||
''' Return representation. '''
|
||||
return '{}<{}>'.format(self.__class__.__name__, self.reason)
|
||||
|
||||
|
||||
class InternalError(Exception):
|
||||
''' This exception is only raised when there is faulty logic in TrioCDP or
|
||||
the integration with PyCDP. '''
|
||||
|
||||
|
||||
@dataclass
|
||||
class CmEventProxy:
|
||||
''' A proxy object returned by :meth:`CdpBase.wait_for()``. After the
|
||||
context manager executes, this proxy object will have a value set that
|
||||
contains the returned event. '''
|
||||
value: typing.Any = None
|
||||
|
||||
|
||||
class CdpBase:
|
||||
|
||||
def __init__(self, ws, session_id, target_id):
|
||||
self.ws = ws
|
||||
self.session_id = session_id
|
||||
self.target_id = target_id
|
||||
self.channels = defaultdict(set)
|
||||
self.id_iter = itertools.count()
|
||||
self.inflight_cmd = dict()
|
||||
self.inflight_result = dict()
|
||||
|
||||
async def execute(self, cmd: typing.Generator[dict, T, typing.Any]) -> T:
|
||||
'''
|
||||
Execute a command on the server and wait for the result.
|
||||
:param cmd: any CDP command
|
||||
:returns: a CDP result
|
||||
'''
|
||||
cmd_id = next(self.id_iter)
|
||||
cmd_event = trio.Event()
|
||||
self.inflight_cmd[cmd_id] = cmd, cmd_event
|
||||
request = next(cmd)
|
||||
request['id'] = cmd_id
|
||||
if self.session_id:
|
||||
request['sessionId'] = self.session_id
|
||||
request_str = json.dumps(request)
|
||||
try:
|
||||
await self.ws.send_message(request_str)
|
||||
except WsConnectionClosed as wcc:
|
||||
raise CdpConnectionClosed(wcc.reason) from None
|
||||
await cmd_event.wait()
|
||||
response = self.inflight_result.pop(cmd_id)
|
||||
if isinstance(response, Exception):
|
||||
raise response
|
||||
return response
|
||||
|
||||
def listen(self, *event_types, buffer_size=10):
|
||||
''' Return an async iterator that iterates over events matching the
|
||||
indicated types. '''
|
||||
sender, receiver = trio.open_memory_channel(buffer_size)
|
||||
for event_type in event_types:
|
||||
self.channels[event_type].add(sender)
|
||||
return receiver
|
||||
|
||||
@asynccontextmanager
|
||||
async def wait_for(self, event_type: typing.Type[T], buffer_size=10) -> \
|
||||
typing.AsyncGenerator[CmEventProxy, None]:
|
||||
'''
|
||||
Wait for an event of the given type and return it.
|
||||
This is an async context manager, so you should open it inside an async
|
||||
with block. The block will not exit until the indicated event is
|
||||
received.
|
||||
'''
|
||||
sender, receiver = trio.open_memory_channel(buffer_size)
|
||||
self.channels[event_type].add(sender)
|
||||
proxy = CmEventProxy()
|
||||
yield proxy
|
||||
async with receiver:
|
||||
event = await receiver.receive()
|
||||
proxy.value = event
|
||||
|
||||
def _handle_data(self, data):
|
||||
'''
|
||||
Handle incoming WebSocket data.
|
||||
:param dict data: a JSON dictionary
|
||||
'''
|
||||
if 'id' in data:
|
||||
self._handle_cmd_response(data)
|
||||
else:
|
||||
self._handle_event(data)
|
||||
|
||||
def _handle_cmd_response(self, data):
|
||||
'''
|
||||
Handle a response to a command. This will set an event flag that will
|
||||
return control to the task that called the command.
|
||||
:param dict data: response as a JSON dictionary
|
||||
'''
|
||||
cmd_id = data['id']
|
||||
try:
|
||||
cmd, event = self.inflight_cmd.pop(cmd_id)
|
||||
except KeyError:
|
||||
logger.warning('Got a message with a command ID that does'
|
||||
' not exist: {}'.format(data))
|
||||
return
|
||||
if 'error' in data:
|
||||
# If the server reported an error, convert it to an exception and do
|
||||
# not process the response any further.
|
||||
self.inflight_result[cmd_id] = BrowserError(data['error'])
|
||||
else:
|
||||
# Otherwise, continue the generator to parse the JSON result
|
||||
# into a CDP object.
|
||||
try:
|
||||
response = cmd.send(data['result'])
|
||||
raise InternalError("The command's generator function "
|
||||
"did not exit when expected!")
|
||||
except StopIteration as exit:
|
||||
return_ = exit.value
|
||||
self.inflight_result[cmd_id] = return_
|
||||
event.set()
|
||||
|
||||
def _handle_event(self, data):
|
||||
'''
|
||||
Handle an event.
|
||||
:param dict data: event as a JSON dictionary
|
||||
'''
|
||||
global devtools
|
||||
event = devtools.util.parse_json_event(data)
|
||||
logger.debug('Received event: %s', event)
|
||||
to_remove = set()
|
||||
for sender in self.channels[type(event)]:
|
||||
try:
|
||||
sender.send_nowait(event)
|
||||
except trio.WouldBlock:
|
||||
logger.error('Unable to send event "%r" due to full channel %s',
|
||||
event, sender)
|
||||
except trio.BrokenResourceError:
|
||||
to_remove.add(sender)
|
||||
if to_remove:
|
||||
self.channels[type(event)] -= to_remove
|
||||
|
||||
|
||||
class CdpSession(CdpBase):
|
||||
'''
|
||||
Contains the state for a CDP session.
|
||||
Generally you should not instantiate this object yourself; you should call
|
||||
:meth:`CdpConnection.open_session`.
|
||||
'''
|
||||
|
||||
def __init__(self, ws, session_id, target_id):
|
||||
'''
|
||||
Constructor.
|
||||
:param trio_websocket.WebSocketConnection ws:
|
||||
:param devtools.target.SessionID session_id:
|
||||
:param devtools.target.TargetID target_id:
|
||||
'''
|
||||
super().__init__(ws, session_id, target_id)
|
||||
|
||||
self._dom_enable_count = 0
|
||||
self._dom_enable_lock = trio.Lock()
|
||||
self._page_enable_count = 0
|
||||
self._page_enable_lock = trio.Lock()
|
||||
|
||||
@asynccontextmanager
|
||||
async def dom_enable(self):
|
||||
'''
|
||||
A context manager that executes ``dom.enable()`` when it enters and then
|
||||
calls ``dom.disable()``.
|
||||
This keeps track of concurrent callers and only disables DOM events when
|
||||
all callers have exited.
|
||||
'''
|
||||
global devtools
|
||||
async with self._dom_enable_lock:
|
||||
self._dom_enable_count += 1
|
||||
if self._dom_enable_count == 1:
|
||||
await self.execute(devtools.dom.enable())
|
||||
|
||||
yield
|
||||
|
||||
async with self._dom_enable_lock:
|
||||
self._dom_enable_count -= 1
|
||||
if self._dom_enable_count == 0:
|
||||
await self.execute(devtools.dom.disable())
|
||||
|
||||
@asynccontextmanager
|
||||
async def page_enable(self):
|
||||
'''
|
||||
A context manager that executes ``page.enable()`` when it enters and
|
||||
then calls ``page.disable()`` when it exits.
|
||||
This keeps track of concurrent callers and only disables page events
|
||||
when all callers have exited.
|
||||
'''
|
||||
global devtools
|
||||
async with self._page_enable_lock:
|
||||
self._page_enable_count += 1
|
||||
if self._page_enable_count == 1:
|
||||
await self.execute(devtools.page.enable())
|
||||
|
||||
yield
|
||||
|
||||
async with self._page_enable_lock:
|
||||
self._page_enable_count -= 1
|
||||
if self._page_enable_count == 0:
|
||||
await self.execute(devtools.page.disable())
|
||||
|
||||
|
||||
class CdpConnection(CdpBase, trio.abc.AsyncResource):
|
||||
'''
|
||||
Contains the connection state for a Chrome DevTools Protocol server.
|
||||
CDP can multiplex multiple "sessions" over a single connection. This class
|
||||
corresponds to the "root" session, i.e. the implicitly created session that
|
||||
has no session ID. This class is responsible for reading incoming WebSocket
|
||||
messages and forwarding them to the corresponding session, as well as
|
||||
handling messages targeted at the root session itself.
|
||||
You should generally call the :func:`open_cdp()` instead of
|
||||
instantiating this class directly.
|
||||
'''
|
||||
|
||||
def __init__(self, ws):
|
||||
'''
|
||||
Constructor
|
||||
:param trio_websocket.WebSocketConnection ws:
|
||||
'''
|
||||
super().__init__(ws, session_id=None, target_id=None)
|
||||
self.sessions = dict()
|
||||
|
||||
async def aclose(self):
|
||||
'''
|
||||
Close the underlying WebSocket connection.
|
||||
This will cause the reader task to gracefully exit when it tries to read
|
||||
the next message from the WebSocket. All of the public APIs
|
||||
(``execute()``, ``listen()``, etc.) will raise
|
||||
``CdpConnectionClosed`` after the CDP connection is closed.
|
||||
It is safe to call this multiple times.
|
||||
'''
|
||||
await self.ws.aclose()
|
||||
|
||||
@asynccontextmanager
|
||||
async def open_session(self, target_id) -> \
|
||||
typing.AsyncIterator[CdpSession]:
|
||||
'''
|
||||
This context manager opens a session and enables the "simple" style of calling
|
||||
CDP APIs.
|
||||
For example, inside a session context, you can call ``await dom.get_document()``
|
||||
and it will execute on the current session automatically.
|
||||
'''
|
||||
session = await self.connect_session(target_id)
|
||||
with session_context(session):
|
||||
yield session
|
||||
|
||||
async def connect_session(self, target_id) -> 'CdpSession':
|
||||
'''
|
||||
Returns a new :class:`CdpSession` connected to the specified target.
|
||||
'''
|
||||
global devtools
|
||||
session_id = await self.execute(devtools.target.attach_to_target(
|
||||
target_id, True))
|
||||
session = CdpSession(self.ws, session_id, target_id)
|
||||
self.sessions[session_id] = session
|
||||
return session
|
||||
|
||||
async def _reader_task(self):
|
||||
'''
|
||||
Runs in the background and handles incoming messages: dispatching
|
||||
responses to commands and events to listeners.
|
||||
'''
|
||||
global devtools
|
||||
while True:
|
||||
try:
|
||||
message = await self.ws.get_message()
|
||||
except WsConnectionClosed:
|
||||
# If the WebSocket is closed, we don't want to throw an
|
||||
# exception from the reader task. Instead we will throw
|
||||
# exceptions from the public API methods, and we can quietly
|
||||
# exit the reader task here.
|
||||
break
|
||||
try:
|
||||
data = json.loads(message)
|
||||
except json.JSONDecodeError:
|
||||
raise BrowserError({
|
||||
'code': -32700,
|
||||
'message': 'Client received invalid JSON',
|
||||
'data': message
|
||||
})
|
||||
logger.debug('Received message %r', data)
|
||||
if 'sessionId' in data:
|
||||
session_id = devtools.target.SessionID(data['sessionId'])
|
||||
try:
|
||||
session = self.sessions[session_id]
|
||||
except KeyError:
|
||||
raise BrowserError('Browser sent a message for an invalid '
|
||||
'session: {!r}'.format(session_id))
|
||||
session._handle_data(data)
|
||||
else:
|
||||
self._handle_data(data)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def open_cdp(url) -> typing.AsyncIterator[CdpConnection]:
|
||||
'''
|
||||
This async context manager opens a connection to the browser specified by
|
||||
``url`` before entering the block, then closes the connection when the block
|
||||
exits.
|
||||
The context manager also sets the connection as the default connection for the
|
||||
current task, so that commands like ``await target.get_targets()`` will run on this
|
||||
connection automatically. If you want to use multiple connections concurrently, it
|
||||
is recommended to open each on in a separate task.
|
||||
'''
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
conn = await connect_cdp(nursery, url)
|
||||
try:
|
||||
with connection_context(conn):
|
||||
yield conn
|
||||
finally:
|
||||
await conn.aclose()
|
||||
|
||||
|
||||
async def connect_cdp(nursery, url) -> CdpConnection:
|
||||
'''
|
||||
Connect to the browser specified by ``url`` and spawn a background task in the
|
||||
specified nursery.
|
||||
The ``open_cdp()`` context manager is preferred in most situations. You should only
|
||||
use this function if you need to specify a custom nursery.
|
||||
This connection is not automatically closed! You can either use the connection
|
||||
object as a context manager (``async with conn:``) or else call ``await
|
||||
conn.aclose()`` on it when you are done with it.
|
||||
If ``set_context`` is True, then the returned connection will be installed as
|
||||
the default connection for the current task. This argument is for unusual use cases,
|
||||
such as running inside of a notebook.
|
||||
'''
|
||||
ws = await connect_websocket_url(nursery, url,
|
||||
max_message_size=MAX_WS_MESSAGE_SIZE)
|
||||
cdp_conn = CdpConnection(ws)
|
||||
nursery.start_soon(cdp_conn._reader_task)
|
||||
return cdp_conn
|
||||
@@ -0,0 +1,25 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Console(Enum):
|
||||
|
||||
ALL = "all"
|
||||
LOG = "log"
|
||||
ERROR = "error"
|
||||
35
venv/Lib/site-packages/selenium/webdriver/common/by.py
Normal file
35
venv/Lib/site-packages/selenium/webdriver/common/by.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The By implementation.
|
||||
"""
|
||||
|
||||
|
||||
class By(object):
|
||||
"""
|
||||
Set of supported locator strategies.
|
||||
"""
|
||||
|
||||
ID = "id"
|
||||
XPATH = "xpath"
|
||||
LINK_TEXT = "link text"
|
||||
PARTIAL_LINK_TEXT = "partial link text"
|
||||
NAME = "name"
|
||||
TAG_NAME = "tag name"
|
||||
CLASS_NAME = "class name"
|
||||
CSS_SELECTOR = "css selector"
|
||||
@@ -0,0 +1,113 @@
|
||||
# Licensed to the Software Freedom Conservancy (SFC) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The SFC licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The Desired Capabilities implementation.
|
||||
"""
|
||||
|
||||
|
||||
class DesiredCapabilities(object):
|
||||
"""
|
||||
Set of default supported desired capabilities.
|
||||
|
||||
Use this as a starting point for creating a desired capabilities object for
|
||||
requesting remote webdrivers for connecting to selenium server or selenium grid.
|
||||
|
||||
Usage Example::
|
||||
|
||||
from selenium import webdriver
|
||||
|
||||
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
|
||||
|
||||
# Create a desired capabilities object as a starting point.
|
||||
capabilities = DesiredCapabilities.FIREFOX.copy()
|
||||
capabilities['platform'] = "WINDOWS"
|
||||
capabilities['version'] = "10"
|
||||
|
||||
# Instantiate an instance of Remote WebDriver with the desired capabilities.
|
||||
driver = webdriver.Remote(desired_capabilities=capabilities,
|
||||
command_executor=selenium_grid_url)
|
||||
|
||||
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
|
||||
effects of altering the Global class instance.
|
||||
|
||||
"""
|
||||
|
||||
FIREFOX = {
|
||||
"browserName": "firefox",
|
||||
"acceptInsecureCerts": True,
|
||||
"moz:debuggerAddress": True,
|
||||
}
|
||||
|
||||
INTERNETEXPLORER = {
|
||||
"browserName": "internet explorer",
|
||||
"platformName": "windows",
|
||||
}
|
||||
|
||||
EDGE = {
|
||||
"browserName": "MicrosoftEdge",
|
||||
}
|
||||
|
||||
CHROME = {
|
||||
"browserName": "chrome",
|
||||
}
|
||||
|
||||
OPERA = {
|
||||
"browserName": "opera",
|
||||
}
|
||||
|
||||
SAFARI = {
|
||||
"browserName": "safari",
|
||||
"platformName": "mac",
|
||||
}
|
||||
|
||||
HTMLUNIT = {
|
||||
"browserName": "htmlunit",
|
||||
"version": "",
|
||||
"platform": "ANY",
|
||||
}
|
||||
|
||||
HTMLUNITWITHJS = {
|
||||
"browserName": "htmlunit",
|
||||
"version": "firefox",
|
||||
"platform": "ANY",
|
||||
"javascriptEnabled": True,
|
||||
}
|
||||
|
||||
IPHONE = {
|
||||
"browserName": "iPhone",
|
||||
"version": "",
|
||||
"platform": "mac",
|
||||
}
|
||||
|
||||
IPAD = {
|
||||
"browserName": "iPad",
|
||||
"version": "",
|
||||
"platform": "mac",
|
||||
}
|
||||
|
||||
WEBKITGTK = {
|
||||
"browserName": "MiniBrowser",
|
||||
"version": "",
|
||||
"platform": "ANY",
|
||||
}
|
||||
|
||||
WPEWEBKIT = {
|
||||
"browserName": "MiniBrowser",
|
||||
"version": "",
|
||||
"platform": "ANY",
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
from . import accessibility
|
||||
from . import animation
|
||||
from . import audits
|
||||
from . import background_service
|
||||
from . import browser
|
||||
from . import css
|
||||
from . import cache_storage
|
||||
from . import cast
|
||||
from . import console
|
||||
from . import dom
|
||||
from . import dom_debugger
|
||||
from . import dom_snapshot
|
||||
from . import dom_storage
|
||||
from . import database
|
||||
from . import debugger
|
||||
from . import device_orientation
|
||||
from . import emulation
|
||||
from . import event_breakpoints
|
||||
from . import fetch
|
||||
from . import headless_experimental
|
||||
from . import heap_profiler
|
||||
from . import io
|
||||
from . import indexed_db
|
||||
from . import input_
|
||||
from . import inspector
|
||||
from . import layer_tree
|
||||
from . import log
|
||||
from . import media
|
||||
from . import memory
|
||||
from . import network
|
||||
from . import overlay
|
||||
from . import page
|
||||
from . import performance
|
||||
from . import performance_timeline
|
||||
from . import profiler
|
||||
from . import runtime
|
||||
from . import schema
|
||||
from . import security
|
||||
from . import service_worker
|
||||
from . import storage
|
||||
from . import system_info
|
||||
from . import target
|
||||
from . import tethering
|
||||
from . import tracing
|
||||
from . import web_audio
|
||||
from . import web_authn
|
||||
from . import util
|
||||
|
||||
@@ -0,0 +1,644 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Accessibility (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import page
|
||||
from . import runtime
|
||||
|
||||
|
||||
class AXNodeId(str):
|
||||
'''
|
||||
Unique accessibility node identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> AXNodeId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'AXNodeId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AXValueType(enum.Enum):
|
||||
'''
|
||||
Enum of possible property types.
|
||||
'''
|
||||
BOOLEAN = "boolean"
|
||||
TRISTATE = "tristate"
|
||||
BOOLEAN_OR_UNDEFINED = "booleanOrUndefined"
|
||||
IDREF = "idref"
|
||||
IDREF_LIST = "idrefList"
|
||||
INTEGER = "integer"
|
||||
NODE = "node"
|
||||
NODE_LIST = "nodeList"
|
||||
NUMBER = "number"
|
||||
STRING = "string"
|
||||
COMPUTED_STRING = "computedString"
|
||||
TOKEN = "token"
|
||||
TOKEN_LIST = "tokenList"
|
||||
DOM_RELATION = "domRelation"
|
||||
ROLE = "role"
|
||||
INTERNAL_ROLE = "internalRole"
|
||||
VALUE_UNDEFINED = "valueUndefined"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AXValueSourceType(enum.Enum):
|
||||
'''
|
||||
Enum of possible property sources.
|
||||
'''
|
||||
ATTRIBUTE = "attribute"
|
||||
IMPLICIT = "implicit"
|
||||
STYLE = "style"
|
||||
CONTENTS = "contents"
|
||||
PLACEHOLDER = "placeholder"
|
||||
RELATED_ELEMENT = "relatedElement"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AXValueNativeSourceType(enum.Enum):
|
||||
'''
|
||||
Enum of possible native property sources (as a subtype of a particular AXValueSourceType).
|
||||
'''
|
||||
DESCRIPTION = "description"
|
||||
FIGCAPTION = "figcaption"
|
||||
LABEL = "label"
|
||||
LABELFOR = "labelfor"
|
||||
LABELWRAPPED = "labelwrapped"
|
||||
LEGEND = "legend"
|
||||
RUBYANNOTATION = "rubyannotation"
|
||||
TABLECAPTION = "tablecaption"
|
||||
TITLE = "title"
|
||||
OTHER = "other"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXValueSource:
|
||||
'''
|
||||
A single source for a computed AX property.
|
||||
'''
|
||||
#: What type of source this is.
|
||||
type_: AXValueSourceType
|
||||
|
||||
#: The value of this property source.
|
||||
value: typing.Optional[AXValue] = None
|
||||
|
||||
#: The name of the relevant attribute, if any.
|
||||
attribute: typing.Optional[str] = None
|
||||
|
||||
#: The value of the relevant attribute, if any.
|
||||
attribute_value: typing.Optional[AXValue] = None
|
||||
|
||||
#: Whether this source is superseded by a higher priority source.
|
||||
superseded: typing.Optional[bool] = None
|
||||
|
||||
#: The native markup source for this value, e.g. a <label> element.
|
||||
native_source: typing.Optional[AXValueNativeSourceType] = None
|
||||
|
||||
#: The value, such as a node or node list, of the native source.
|
||||
native_source_value: typing.Optional[AXValue] = None
|
||||
|
||||
#: Whether the value for this property is invalid.
|
||||
invalid: typing.Optional[bool] = None
|
||||
|
||||
#: Reason for the value being invalid, if it is.
|
||||
invalid_reason: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value.to_json()
|
||||
if self.attribute is not None:
|
||||
json['attribute'] = self.attribute
|
||||
if self.attribute_value is not None:
|
||||
json['attributeValue'] = self.attribute_value.to_json()
|
||||
if self.superseded is not None:
|
||||
json['superseded'] = self.superseded
|
||||
if self.native_source is not None:
|
||||
json['nativeSource'] = self.native_source.to_json()
|
||||
if self.native_source_value is not None:
|
||||
json['nativeSourceValue'] = self.native_source_value.to_json()
|
||||
if self.invalid is not None:
|
||||
json['invalid'] = self.invalid
|
||||
if self.invalid_reason is not None:
|
||||
json['invalidReason'] = self.invalid_reason
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=AXValueSourceType.from_json(json['type']),
|
||||
value=AXValue.from_json(json['value']) if 'value' in json else None,
|
||||
attribute=str(json['attribute']) if 'attribute' in json else None,
|
||||
attribute_value=AXValue.from_json(json['attributeValue']) if 'attributeValue' in json else None,
|
||||
superseded=bool(json['superseded']) if 'superseded' in json else None,
|
||||
native_source=AXValueNativeSourceType.from_json(json['nativeSource']) if 'nativeSource' in json else None,
|
||||
native_source_value=AXValue.from_json(json['nativeSourceValue']) if 'nativeSourceValue' in json else None,
|
||||
invalid=bool(json['invalid']) if 'invalid' in json else None,
|
||||
invalid_reason=str(json['invalidReason']) if 'invalidReason' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXRelatedNode:
|
||||
#: The BackendNodeId of the related DOM node.
|
||||
backend_dom_node_id: dom.BackendNodeId
|
||||
|
||||
#: The IDRef value provided, if any.
|
||||
idref: typing.Optional[str] = None
|
||||
|
||||
#: The text alternative of this node in the current context.
|
||||
text: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
|
||||
if self.idref is not None:
|
||||
json['idref'] = self.idref
|
||||
if self.text is not None:
|
||||
json['text'] = self.text
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']),
|
||||
idref=str(json['idref']) if 'idref' in json else None,
|
||||
text=str(json['text']) if 'text' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXProperty:
|
||||
#: The name of this property.
|
||||
name: AXPropertyName
|
||||
|
||||
#: The value of this property.
|
||||
value: AXValue
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name.to_json()
|
||||
json['value'] = self.value.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=AXPropertyName.from_json(json['name']),
|
||||
value=AXValue.from_json(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXValue:
|
||||
'''
|
||||
A single computed AX property.
|
||||
'''
|
||||
#: The type of this value.
|
||||
type_: AXValueType
|
||||
|
||||
#: The computed value of this property.
|
||||
value: typing.Optional[typing.Any] = None
|
||||
|
||||
#: One or more related nodes, if applicable.
|
||||
related_nodes: typing.Optional[typing.List[AXRelatedNode]] = None
|
||||
|
||||
#: The sources which contributed to the computation of this property.
|
||||
sources: typing.Optional[typing.List[AXValueSource]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value
|
||||
if self.related_nodes is not None:
|
||||
json['relatedNodes'] = [i.to_json() for i in self.related_nodes]
|
||||
if self.sources is not None:
|
||||
json['sources'] = [i.to_json() for i in self.sources]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=AXValueType.from_json(json['type']),
|
||||
value=json['value'] if 'value' in json else None,
|
||||
related_nodes=[AXRelatedNode.from_json(i) for i in json['relatedNodes']] if 'relatedNodes' in json else None,
|
||||
sources=[AXValueSource.from_json(i) for i in json['sources']] if 'sources' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class AXPropertyName(enum.Enum):
|
||||
'''
|
||||
Values of AXProperty name:
|
||||
- from 'busy' to 'roledescription': states which apply to every AX node
|
||||
- from 'live' to 'root': attributes which apply to nodes in live regions
|
||||
- from 'autocomplete' to 'valuetext': attributes which apply to widgets
|
||||
- from 'checked' to 'selected': states which apply to widgets
|
||||
- from 'activedescendant' to 'owns' - relationships between elements other than parent/child/sibling.
|
||||
'''
|
||||
BUSY = "busy"
|
||||
DISABLED = "disabled"
|
||||
EDITABLE = "editable"
|
||||
FOCUSABLE = "focusable"
|
||||
FOCUSED = "focused"
|
||||
HIDDEN = "hidden"
|
||||
HIDDEN_ROOT = "hiddenRoot"
|
||||
INVALID = "invalid"
|
||||
KEYSHORTCUTS = "keyshortcuts"
|
||||
SETTABLE = "settable"
|
||||
ROLEDESCRIPTION = "roledescription"
|
||||
LIVE = "live"
|
||||
ATOMIC = "atomic"
|
||||
RELEVANT = "relevant"
|
||||
ROOT = "root"
|
||||
AUTOCOMPLETE = "autocomplete"
|
||||
HAS_POPUP = "hasPopup"
|
||||
LEVEL = "level"
|
||||
MULTISELECTABLE = "multiselectable"
|
||||
ORIENTATION = "orientation"
|
||||
MULTILINE = "multiline"
|
||||
READONLY = "readonly"
|
||||
REQUIRED = "required"
|
||||
VALUEMIN = "valuemin"
|
||||
VALUEMAX = "valuemax"
|
||||
VALUETEXT = "valuetext"
|
||||
CHECKED = "checked"
|
||||
EXPANDED = "expanded"
|
||||
MODAL = "modal"
|
||||
PRESSED = "pressed"
|
||||
SELECTED = "selected"
|
||||
ACTIVEDESCENDANT = "activedescendant"
|
||||
CONTROLS = "controls"
|
||||
DESCRIBEDBY = "describedby"
|
||||
DETAILS = "details"
|
||||
ERRORMESSAGE = "errormessage"
|
||||
FLOWTO = "flowto"
|
||||
LABELLEDBY = "labelledby"
|
||||
OWNS = "owns"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXNode:
|
||||
'''
|
||||
A node in the accessibility tree.
|
||||
'''
|
||||
#: Unique identifier for this node.
|
||||
node_id: AXNodeId
|
||||
|
||||
#: Whether this node is ignored for accessibility
|
||||
ignored: bool
|
||||
|
||||
#: Collection of reasons why this node is hidden.
|
||||
ignored_reasons: typing.Optional[typing.List[AXProperty]] = None
|
||||
|
||||
#: This ``Node``'s role, whether explicit or implicit.
|
||||
role: typing.Optional[AXValue] = None
|
||||
|
||||
#: The accessible name for this ``Node``.
|
||||
name: typing.Optional[AXValue] = None
|
||||
|
||||
#: The accessible description for this ``Node``.
|
||||
description: typing.Optional[AXValue] = None
|
||||
|
||||
#: The value for this ``Node``.
|
||||
value: typing.Optional[AXValue] = None
|
||||
|
||||
#: All other properties
|
||||
properties: typing.Optional[typing.List[AXProperty]] = None
|
||||
|
||||
#: ID for this node's parent.
|
||||
parent_id: typing.Optional[AXNodeId] = None
|
||||
|
||||
#: IDs for each of this node's child nodes.
|
||||
child_ids: typing.Optional[typing.List[AXNodeId]] = None
|
||||
|
||||
#: The backend ID for the associated DOM node, if any.
|
||||
backend_dom_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: The frame ID for the frame associated with this nodes document.
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['ignored'] = self.ignored
|
||||
if self.ignored_reasons is not None:
|
||||
json['ignoredReasons'] = [i.to_json() for i in self.ignored_reasons]
|
||||
if self.role is not None:
|
||||
json['role'] = self.role.to_json()
|
||||
if self.name is not None:
|
||||
json['name'] = self.name.to_json()
|
||||
if self.description is not None:
|
||||
json['description'] = self.description.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value.to_json()
|
||||
if self.properties is not None:
|
||||
json['properties'] = [i.to_json() for i in self.properties]
|
||||
if self.parent_id is not None:
|
||||
json['parentId'] = self.parent_id.to_json()
|
||||
if self.child_ids is not None:
|
||||
json['childIds'] = [i.to_json() for i in self.child_ids]
|
||||
if self.backend_dom_node_id is not None:
|
||||
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
|
||||
if self.frame_id is not None:
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_id=AXNodeId.from_json(json['nodeId']),
|
||||
ignored=bool(json['ignored']),
|
||||
ignored_reasons=[AXProperty.from_json(i) for i in json['ignoredReasons']] if 'ignoredReasons' in json else None,
|
||||
role=AXValue.from_json(json['role']) if 'role' in json else None,
|
||||
name=AXValue.from_json(json['name']) if 'name' in json else None,
|
||||
description=AXValue.from_json(json['description']) if 'description' in json else None,
|
||||
value=AXValue.from_json(json['value']) if 'value' in json else None,
|
||||
properties=[AXProperty.from_json(i) for i in json['properties']] if 'properties' in json else None,
|
||||
parent_id=AXNodeId.from_json(json['parentId']) if 'parentId' in json else None,
|
||||
child_ids=[AXNodeId.from_json(i) for i in json['childIds']] if 'childIds' in json else None,
|
||||
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']) if 'backendDOMNodeId' in json else None,
|
||||
frame_id=page.FrameId.from_json(json['frameId']) if 'frameId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the accessibility domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the accessibility domain which causes ``AXNodeId``'s to remain consistent between method calls.
|
||||
This turns on accessibility for the page, which can impact performance until accessibility is disabled.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_partial_ax_tree(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None,
|
||||
fetch_relatives: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches the accessibility node and partial accessibility tree for this DOM node, if it exists.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node to get the partial accessibility tree for.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node to get the partial accessibility tree for.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get the partial accessibility tree for.
|
||||
:param fetch_relatives: *(Optional)* Whether to fetch this nodes ancestors, siblings and children. Defaults to true.
|
||||
:returns: The ``Accessibility.AXNode`` for this DOM node, if it exists, plus its ancestors, siblings and children, if requested.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
if fetch_relatives is not None:
|
||||
params['fetchRelatives'] = fetch_relatives
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getPartialAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_full_ax_tree(
|
||||
depth: typing.Optional[int] = None,
|
||||
max_depth: typing.Optional[int] = None,
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches the entire accessibility tree for the root Document
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param depth: *(Optional)* The maximum depth at which descendants of the root node should be retrieved. If omitted, the full tree is returned.
|
||||
:param max_depth: *(Optional)* Deprecated. This parameter has been renamed to ```depth```. If depth is not provided, max_depth will be used.
|
||||
:param frame_id: *(Optional)* The frame for whose document the AX tree should be retrieved. If omited, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if depth is not None:
|
||||
params['depth'] = depth
|
||||
if max_depth is not None:
|
||||
params['max_depth'] = max_depth
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getFullAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_root_ax_node(
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,AXNode]:
|
||||
'''
|
||||
Fetches the root node.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getRootAXNode',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return AXNode.from_json(json['node'])
|
||||
|
||||
|
||||
def get_ax_node_and_ancestors(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches a node and all ancestors up to and including the root.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node to get.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node to get.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getAXNodeAndAncestors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_child_ax_nodes(
|
||||
id_: AXNodeId,
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches a particular accessibility node by AXNodeId.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param id_:
|
||||
:param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_.to_json()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getChildAXNodes',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def query_ax_tree(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None,
|
||||
accessible_name: typing.Optional[str] = None,
|
||||
role: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Query a DOM node's accessibility subtree for accessible name and role.
|
||||
This command computes the name and role for all nodes in the subtree, including those that are
|
||||
ignored for accessibility, and returns those that mactch the specified name and role. If no DOM
|
||||
node is specified, or the DOM node does not exist, the command returns an error. If neither
|
||||
``accessibleName`` or ``role`` is specified, it returns all the accessibility nodes in the subtree.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node for the root to query.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node for the root to query.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper for the root to query.
|
||||
:param accessible_name: *(Optional)* Find nodes with this computed name.
|
||||
:param role: *(Optional)* Find nodes with this computed role.
|
||||
:returns: A list of ``Accessibility.AXNode`` matching the specified attributes, including nodes that are ignored for accessibility.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
if accessible_name is not None:
|
||||
params['accessibleName'] = accessible_name
|
||||
if role is not None:
|
||||
params['role'] = role
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.queryAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
@event_class('Accessibility.loadComplete')
|
||||
@dataclass
|
||||
class LoadComplete:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The loadComplete event mirrors the load complete event sent by the browser to assistive
|
||||
technology when the web page has finished loading.
|
||||
'''
|
||||
#: New document root node.
|
||||
root: AXNode
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LoadComplete:
|
||||
return cls(
|
||||
root=AXNode.from_json(json['root'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Accessibility.nodesUpdated')
|
||||
@dataclass
|
||||
class NodesUpdated:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The nodesUpdated event is sent every time a previously requested node has changed the in tree.
|
||||
'''
|
||||
#: Updated node data.
|
||||
nodes: typing.List[AXNode]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesUpdated:
|
||||
return cls(
|
||||
nodes=[AXNode.from_json(i) for i in json['nodes']]
|
||||
)
|
||||
@@ -0,0 +1,415 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Animation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class Animation:
|
||||
'''
|
||||
Animation instance.
|
||||
'''
|
||||
#: ``Animation``'s id.
|
||||
id_: str
|
||||
|
||||
#: ``Animation``'s name.
|
||||
name: str
|
||||
|
||||
#: ``Animation``'s internal paused state.
|
||||
paused_state: bool
|
||||
|
||||
#: ``Animation``'s play state.
|
||||
play_state: str
|
||||
|
||||
#: ``Animation``'s playback rate.
|
||||
playback_rate: float
|
||||
|
||||
#: ``Animation``'s start time.
|
||||
start_time: float
|
||||
|
||||
#: ``Animation``'s current time.
|
||||
current_time: float
|
||||
|
||||
#: Animation type of ``Animation``.
|
||||
type_: str
|
||||
|
||||
#: ``Animation``'s source animation node.
|
||||
source: typing.Optional[AnimationEffect] = None
|
||||
|
||||
#: A unique ID for ``Animation`` representing the sources that triggered this CSS
|
||||
#: animation/transition.
|
||||
css_id: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_
|
||||
json['name'] = self.name
|
||||
json['pausedState'] = self.paused_state
|
||||
json['playState'] = self.play_state
|
||||
json['playbackRate'] = self.playback_rate
|
||||
json['startTime'] = self.start_time
|
||||
json['currentTime'] = self.current_time
|
||||
json['type'] = self.type_
|
||||
if self.source is not None:
|
||||
json['source'] = self.source.to_json()
|
||||
if self.css_id is not None:
|
||||
json['cssId'] = self.css_id
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
name=str(json['name']),
|
||||
paused_state=bool(json['pausedState']),
|
||||
play_state=str(json['playState']),
|
||||
playback_rate=float(json['playbackRate']),
|
||||
start_time=float(json['startTime']),
|
||||
current_time=float(json['currentTime']),
|
||||
type_=str(json['type']),
|
||||
source=AnimationEffect.from_json(json['source']) if 'source' in json else None,
|
||||
css_id=str(json['cssId']) if 'cssId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnimationEffect:
|
||||
'''
|
||||
AnimationEffect instance
|
||||
'''
|
||||
#: ``AnimationEffect``'s delay.
|
||||
delay: float
|
||||
|
||||
#: ``AnimationEffect``'s end delay.
|
||||
end_delay: float
|
||||
|
||||
#: ``AnimationEffect``'s iteration start.
|
||||
iteration_start: float
|
||||
|
||||
#: ``AnimationEffect``'s iterations.
|
||||
iterations: float
|
||||
|
||||
#: ``AnimationEffect``'s iteration duration.
|
||||
duration: float
|
||||
|
||||
#: ``AnimationEffect``'s playback direction.
|
||||
direction: str
|
||||
|
||||
#: ``AnimationEffect``'s fill mode.
|
||||
fill: str
|
||||
|
||||
#: ``AnimationEffect``'s timing function.
|
||||
easing: str
|
||||
|
||||
#: ``AnimationEffect``'s target node.
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: ``AnimationEffect``'s keyframes.
|
||||
keyframes_rule: typing.Optional[KeyframesRule] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['delay'] = self.delay
|
||||
json['endDelay'] = self.end_delay
|
||||
json['iterationStart'] = self.iteration_start
|
||||
json['iterations'] = self.iterations
|
||||
json['duration'] = self.duration
|
||||
json['direction'] = self.direction
|
||||
json['fill'] = self.fill
|
||||
json['easing'] = self.easing
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.keyframes_rule is not None:
|
||||
json['keyframesRule'] = self.keyframes_rule.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
delay=float(json['delay']),
|
||||
end_delay=float(json['endDelay']),
|
||||
iteration_start=float(json['iterationStart']),
|
||||
iterations=float(json['iterations']),
|
||||
duration=float(json['duration']),
|
||||
direction=str(json['direction']),
|
||||
fill=str(json['fill']),
|
||||
easing=str(json['easing']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
keyframes_rule=KeyframesRule.from_json(json['keyframesRule']) if 'keyframesRule' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyframesRule:
|
||||
'''
|
||||
Keyframes Rule
|
||||
'''
|
||||
#: List of animation keyframes.
|
||||
keyframes: typing.List[KeyframeStyle]
|
||||
|
||||
#: CSS keyframed animation's name.
|
||||
name: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['keyframes'] = [i.to_json() for i in self.keyframes]
|
||||
if self.name is not None:
|
||||
json['name'] = self.name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
keyframes=[KeyframeStyle.from_json(i) for i in json['keyframes']],
|
||||
name=str(json['name']) if 'name' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyframeStyle:
|
||||
'''
|
||||
Keyframe Style
|
||||
'''
|
||||
#: Keyframe's time offset.
|
||||
offset: str
|
||||
|
||||
#: ``AnimationEffect``'s timing function.
|
||||
easing: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['offset'] = self.offset
|
||||
json['easing'] = self.easing
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
offset=str(json['offset']),
|
||||
easing=str(json['easing']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables animation domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables animation domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_current_time(
|
||||
id_: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Returns the current time of the an animation.
|
||||
|
||||
:param id_: Id of animation.
|
||||
:returns: Current time of the page.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.getCurrentTime',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['currentTime'])
|
||||
|
||||
|
||||
def get_playback_rate() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Gets the playback rate of the document timeline.
|
||||
|
||||
:returns: Playback rate for animations on page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.getPlaybackRate',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['playbackRate'])
|
||||
|
||||
|
||||
def release_animations(
|
||||
animations: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Releases a set of animations to no longer be manipulated.
|
||||
|
||||
:param animations: List of animation ids to seek.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.releaseAnimations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def resolve_animation(
|
||||
animation_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,runtime.RemoteObject]:
|
||||
'''
|
||||
Gets the remote object of the Animation.
|
||||
|
||||
:param animation_id: Animation id.
|
||||
:returns: Corresponding remote object.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animationId'] = animation_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.resolveAnimation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return runtime.RemoteObject.from_json(json['remoteObject'])
|
||||
|
||||
|
||||
def seek_animations(
|
||||
animations: typing.List[str],
|
||||
current_time: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Seek a set of animations to a particular time within each animation.
|
||||
|
||||
:param animations: List of animation ids to seek.
|
||||
:param current_time: Set the current time of each animation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
params['currentTime'] = current_time
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.seekAnimations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_paused(
|
||||
animations: typing.List[str],
|
||||
paused: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the paused state of a set of animations.
|
||||
|
||||
:param animations: Animations to set the pause state of.
|
||||
:param paused: Paused state to set to.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
params['paused'] = paused
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setPaused',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_playback_rate(
|
||||
playback_rate: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the playback rate of the document timeline.
|
||||
|
||||
:param playback_rate: Playback rate for animations on page
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['playbackRate'] = playback_rate
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setPlaybackRate',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_timing(
|
||||
animation_id: str,
|
||||
duration: float,
|
||||
delay: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the timing of an animation node.
|
||||
|
||||
:param animation_id: Animation id.
|
||||
:param duration: Duration of the animation.
|
||||
:param delay: Delay of the animation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animationId'] = animation_id
|
||||
params['duration'] = duration
|
||||
params['delay'] = delay
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setTiming',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Animation.animationCanceled')
|
||||
@dataclass
|
||||
class AnimationCanceled:
|
||||
'''
|
||||
Event for when an animation has been cancelled.
|
||||
'''
|
||||
#: Id of the animation that was cancelled.
|
||||
id_: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationCanceled:
|
||||
return cls(
|
||||
id_=str(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationCreated')
|
||||
@dataclass
|
||||
class AnimationCreated:
|
||||
'''
|
||||
Event for each animation that has been created.
|
||||
'''
|
||||
#: Id of the animation that was created.
|
||||
id_: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationCreated:
|
||||
return cls(
|
||||
id_=str(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationStarted')
|
||||
@dataclass
|
||||
class AnimationStarted:
|
||||
'''
|
||||
Event for animation that has been started.
|
||||
'''
|
||||
#: Animation that was started.
|
||||
animation: Animation
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationStarted:
|
||||
return cls(
|
||||
animation=Animation.from_json(json['animation'])
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,208 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: BackgroundService (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
from . import service_worker
|
||||
|
||||
|
||||
class ServiceName(enum.Enum):
|
||||
'''
|
||||
The Background Service that will be associated with the commands/events.
|
||||
Every Background Service operates independently, but they share the same
|
||||
API.
|
||||
'''
|
||||
BACKGROUND_FETCH = "backgroundFetch"
|
||||
BACKGROUND_SYNC = "backgroundSync"
|
||||
PUSH_MESSAGING = "pushMessaging"
|
||||
NOTIFICATIONS = "notifications"
|
||||
PAYMENT_HANDLER = "paymentHandler"
|
||||
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventMetadata:
|
||||
'''
|
||||
A key-value pair for additional event information to pass along.
|
||||
'''
|
||||
key: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=str(json['key']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BackgroundServiceEvent:
|
||||
#: Timestamp of the event (in seconds).
|
||||
timestamp: network.TimeSinceEpoch
|
||||
|
||||
#: The origin this event belongs to.
|
||||
origin: str
|
||||
|
||||
#: The Service Worker ID that initiated the event.
|
||||
service_worker_registration_id: service_worker.RegistrationID
|
||||
|
||||
#: The Background Service this event belongs to.
|
||||
service: ServiceName
|
||||
|
||||
#: A description of the event.
|
||||
event_name: str
|
||||
|
||||
#: An identifier that groups related events together.
|
||||
instance_id: str
|
||||
|
||||
#: A list of event-specific information.
|
||||
event_metadata: typing.List[EventMetadata]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
json['origin'] = self.origin
|
||||
json['serviceWorkerRegistrationId'] = self.service_worker_registration_id.to_json()
|
||||
json['service'] = self.service.to_json()
|
||||
json['eventName'] = self.event_name
|
||||
json['instanceId'] = self.instance_id
|
||||
json['eventMetadata'] = [i.to_json() for i in self.event_metadata]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
timestamp=network.TimeSinceEpoch.from_json(json['timestamp']),
|
||||
origin=str(json['origin']),
|
||||
service_worker_registration_id=service_worker.RegistrationID.from_json(json['serviceWorkerRegistrationId']),
|
||||
service=ServiceName.from_json(json['service']),
|
||||
event_name=str(json['eventName']),
|
||||
instance_id=str(json['instanceId']),
|
||||
event_metadata=[EventMetadata.from_json(i) for i in json['eventMetadata']],
|
||||
)
|
||||
|
||||
|
||||
def start_observing(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables event updates for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.startObserving',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_observing(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables event updates for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.stopObserving',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_recording(
|
||||
should_record: bool,
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set the recording state for the service.
|
||||
|
||||
:param should_record:
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['shouldRecord'] = should_record
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.setRecording',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_events(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all stored data for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.clearEvents',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('BackgroundService.recordingStateChanged')
|
||||
@dataclass
|
||||
class RecordingStateChanged:
|
||||
'''
|
||||
Called when the recording state for the service has been updated.
|
||||
'''
|
||||
is_recording: bool
|
||||
service: ServiceName
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RecordingStateChanged:
|
||||
return cls(
|
||||
is_recording=bool(json['isRecording']),
|
||||
service=ServiceName.from_json(json['service'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('BackgroundService.backgroundServiceEventReceived')
|
||||
@dataclass
|
||||
class BackgroundServiceEventReceived:
|
||||
'''
|
||||
Called with all existing backgroundServiceEvents when enabled, and all new
|
||||
events afterwards if enabled and recording.
|
||||
'''
|
||||
background_service_event: BackgroundServiceEvent
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEventReceived:
|
||||
return cls(
|
||||
background_service_event=BackgroundServiceEvent.from_json(json['backgroundServiceEvent'])
|
||||
)
|
||||
@@ -0,0 +1,697 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Browser
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import page
|
||||
from . import target
|
||||
|
||||
|
||||
class BrowserContextID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> BrowserContextID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'BrowserContextID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class WindowID(int):
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> WindowID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'WindowID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class WindowState(enum.Enum):
|
||||
'''
|
||||
The state of the browser window.
|
||||
'''
|
||||
NORMAL = "normal"
|
||||
MINIMIZED = "minimized"
|
||||
MAXIMIZED = "maximized"
|
||||
FULLSCREEN = "fullscreen"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Bounds:
|
||||
'''
|
||||
Browser window bounds information
|
||||
'''
|
||||
#: The offset from the left edge of the screen to the window in pixels.
|
||||
left: typing.Optional[int] = None
|
||||
|
||||
#: The offset from the top edge of the screen to the window in pixels.
|
||||
top: typing.Optional[int] = None
|
||||
|
||||
#: The window width in pixels.
|
||||
width: typing.Optional[int] = None
|
||||
|
||||
#: The window height in pixels.
|
||||
height: typing.Optional[int] = None
|
||||
|
||||
#: The window state. Default to normal.
|
||||
window_state: typing.Optional[WindowState] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.left is not None:
|
||||
json['left'] = self.left
|
||||
if self.top is not None:
|
||||
json['top'] = self.top
|
||||
if self.width is not None:
|
||||
json['width'] = self.width
|
||||
if self.height is not None:
|
||||
json['height'] = self.height
|
||||
if self.window_state is not None:
|
||||
json['windowState'] = self.window_state.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
left=int(json['left']) if 'left' in json else None,
|
||||
top=int(json['top']) if 'top' in json else None,
|
||||
width=int(json['width']) if 'width' in json else None,
|
||||
height=int(json['height']) if 'height' in json else None,
|
||||
window_state=WindowState.from_json(json['windowState']) if 'windowState' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class PermissionType(enum.Enum):
|
||||
ACCESSIBILITY_EVENTS = "accessibilityEvents"
|
||||
AUDIO_CAPTURE = "audioCapture"
|
||||
BACKGROUND_SYNC = "backgroundSync"
|
||||
BACKGROUND_FETCH = "backgroundFetch"
|
||||
CLIPBOARD_READ_WRITE = "clipboardReadWrite"
|
||||
CLIPBOARD_SANITIZED_WRITE = "clipboardSanitizedWrite"
|
||||
DISPLAY_CAPTURE = "displayCapture"
|
||||
DURABLE_STORAGE = "durableStorage"
|
||||
FLASH = "flash"
|
||||
GEOLOCATION = "geolocation"
|
||||
MIDI = "midi"
|
||||
MIDI_SYSEX = "midiSysex"
|
||||
NFC = "nfc"
|
||||
NOTIFICATIONS = "notifications"
|
||||
PAYMENT_HANDLER = "paymentHandler"
|
||||
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
|
||||
PROTECTED_MEDIA_IDENTIFIER = "protectedMediaIdentifier"
|
||||
SENSORS = "sensors"
|
||||
VIDEO_CAPTURE = "videoCapture"
|
||||
VIDEO_CAPTURE_PAN_TILT_ZOOM = "videoCapturePanTiltZoom"
|
||||
IDLE_DETECTION = "idleDetection"
|
||||
WAKE_LOCK_SCREEN = "wakeLockScreen"
|
||||
WAKE_LOCK_SYSTEM = "wakeLockSystem"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class PermissionSetting(enum.Enum):
|
||||
GRANTED = "granted"
|
||||
DENIED = "denied"
|
||||
PROMPT = "prompt"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PermissionDescriptor:
|
||||
'''
|
||||
Definition of PermissionDescriptor defined in the Permissions API:
|
||||
https://w3c.github.io/permissions/#dictdef-permissiondescriptor.
|
||||
'''
|
||||
#: Name of permission.
|
||||
#: See https://cs.chromium.org/chromium/src/third_party/blink/renderer/modules/permissions/permission_descriptor.idl for valid permission names.
|
||||
name: str
|
||||
|
||||
#: For "midi" permission, may also specify sysex control.
|
||||
sysex: typing.Optional[bool] = None
|
||||
|
||||
#: For "push" permission, may specify userVisibleOnly.
|
||||
#: Note that userVisibleOnly = true is the only currently supported type.
|
||||
user_visible_only: typing.Optional[bool] = None
|
||||
|
||||
#: For "clipboard" permission, may specify allowWithoutSanitization.
|
||||
allow_without_sanitization: typing.Optional[bool] = None
|
||||
|
||||
#: For "camera" permission, may specify panTiltZoom.
|
||||
pan_tilt_zoom: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
if self.sysex is not None:
|
||||
json['sysex'] = self.sysex
|
||||
if self.user_visible_only is not None:
|
||||
json['userVisibleOnly'] = self.user_visible_only
|
||||
if self.allow_without_sanitization is not None:
|
||||
json['allowWithoutSanitization'] = self.allow_without_sanitization
|
||||
if self.pan_tilt_zoom is not None:
|
||||
json['panTiltZoom'] = self.pan_tilt_zoom
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
sysex=bool(json['sysex']) if 'sysex' in json else None,
|
||||
user_visible_only=bool(json['userVisibleOnly']) if 'userVisibleOnly' in json else None,
|
||||
allow_without_sanitization=bool(json['allowWithoutSanitization']) if 'allowWithoutSanitization' in json else None,
|
||||
pan_tilt_zoom=bool(json['panTiltZoom']) if 'panTiltZoom' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class BrowserCommandId(enum.Enum):
|
||||
'''
|
||||
Browser command ids used by executeBrowserCommand.
|
||||
'''
|
||||
OPEN_TAB_SEARCH = "openTabSearch"
|
||||
CLOSE_TAB_SEARCH = "closeTabSearch"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Bucket:
|
||||
'''
|
||||
Chrome histogram bucket.
|
||||
'''
|
||||
#: Minimum value (inclusive).
|
||||
low: int
|
||||
|
||||
#: Maximum value (exclusive).
|
||||
high: int
|
||||
|
||||
#: Number of samples.
|
||||
count: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['low'] = self.low
|
||||
json['high'] = self.high
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
low=int(json['low']),
|
||||
high=int(json['high']),
|
||||
count=int(json['count']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Histogram:
|
||||
'''
|
||||
Chrome histogram.
|
||||
'''
|
||||
#: Name.
|
||||
name: str
|
||||
|
||||
#: Sum of sample values.
|
||||
sum_: int
|
||||
|
||||
#: Total number of samples.
|
||||
count: int
|
||||
|
||||
#: Buckets.
|
||||
buckets: typing.List[Bucket]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['sum'] = self.sum_
|
||||
json['count'] = self.count
|
||||
json['buckets'] = [i.to_json() for i in self.buckets]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
sum_=int(json['sum']),
|
||||
count=int(json['count']),
|
||||
buckets=[Bucket.from_json(i) for i in json['buckets']],
|
||||
)
|
||||
|
||||
|
||||
def set_permission(
|
||||
permission: PermissionDescriptor,
|
||||
setting: PermissionSetting,
|
||||
origin: typing.Optional[str] = None,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set permission settings for given origin.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param permission: Descriptor of permission to override.
|
||||
:param setting: Setting of the permission.
|
||||
:param origin: *(Optional)* Origin the permission applies to, all origins if not specified.
|
||||
:param browser_context_id: *(Optional)* Context to override. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['permission'] = permission.to_json()
|
||||
params['setting'] = setting.to_json()
|
||||
if origin is not None:
|
||||
params['origin'] = origin
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setPermission',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def grant_permissions(
|
||||
permissions: typing.List[PermissionType],
|
||||
origin: typing.Optional[str] = None,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Grant specific permissions to the given origin and reject all others.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param permissions:
|
||||
:param origin: *(Optional)* Origin the permission applies to, all origins if not specified.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to override permissions. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['permissions'] = [i.to_json() for i in permissions]
|
||||
if origin is not None:
|
||||
params['origin'] = origin
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.grantPermissions',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def reset_permissions(
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Reset all permission management for all origins.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param browser_context_id: *(Optional)* BrowserContext to reset permissions. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.resetPermissions',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_download_behavior(
|
||||
behavior: str,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None,
|
||||
download_path: typing.Optional[str] = None,
|
||||
events_enabled: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set the behavior when downloading a file.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param behavior: Whether to allow all or deny all download requests, or use default Chrome behavior if available (otherwise deny). ``allowAndName`` allows download and names files according to their dowmload guids.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to set download behavior. When omitted, default browser context is used.
|
||||
:param download_path: *(Optional)* The default path to save downloaded files to. This is required if behavior is set to 'allow' or 'allowAndName'.
|
||||
:param events_enabled: *(Optional)* Whether to emit download events (defaults to false).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['behavior'] = behavior
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
if download_path is not None:
|
||||
params['downloadPath'] = download_path
|
||||
if events_enabled is not None:
|
||||
params['eventsEnabled'] = events_enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setDownloadBehavior',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def cancel_download(
|
||||
guid: str,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Cancel a download if in progress
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param guid: Global unique identifier of the download.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to perform the action in. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['guid'] = guid
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.cancelDownload',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def close() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Close browser gracefully.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.close',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def crash() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Crashes browser on the main thread.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.crash',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def crash_gpu_process() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Crashes GPU process.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.crashGpuProcess',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_version() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, str, str, str, str]]:
|
||||
'''
|
||||
Returns version information.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **protocolVersion** - Protocol version.
|
||||
1. **product** - Product name.
|
||||
2. **revision** - Product revision.
|
||||
3. **userAgent** - User-Agent.
|
||||
4. **jsVersion** - V8 version.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getVersion',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['protocolVersion']),
|
||||
str(json['product']),
|
||||
str(json['revision']),
|
||||
str(json['userAgent']),
|
||||
str(json['jsVersion'])
|
||||
)
|
||||
|
||||
|
||||
def get_browser_command_line() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Returns the command line switches for the browser process if, and only if
|
||||
--enable-automation is on the commandline.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: Commandline parameters
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getBrowserCommandLine',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['arguments']]
|
||||
|
||||
|
||||
def get_histograms(
|
||||
query: typing.Optional[str] = None,
|
||||
delta: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Histogram]]:
|
||||
'''
|
||||
Get Chrome histograms.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param query: *(Optional)* Requested substring in name. Only histograms which have query as a substring in their name are extracted. An empty or absent query returns all histograms.
|
||||
:param delta: *(Optional)* If true, retrieve delta since last call.
|
||||
:returns: Histograms.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if query is not None:
|
||||
params['query'] = query
|
||||
if delta is not None:
|
||||
params['delta'] = delta
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getHistograms',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Histogram.from_json(i) for i in json['histograms']]
|
||||
|
||||
|
||||
def get_histogram(
|
||||
name: str,
|
||||
delta: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Histogram]:
|
||||
'''
|
||||
Get a Chrome histogram by name.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param name: Requested histogram name.
|
||||
:param delta: *(Optional)* If true, retrieve delta since last call.
|
||||
:returns: Histogram.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['name'] = name
|
||||
if delta is not None:
|
||||
params['delta'] = delta
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getHistogram',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Histogram.from_json(json['histogram'])
|
||||
|
||||
|
||||
def get_window_bounds(
|
||||
window_id: WindowID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Bounds]:
|
||||
'''
|
||||
Get position and size of the browser window.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param window_id: Browser window id.
|
||||
:returns: Bounds information of the window. When window state is 'minimized', the restored window position and size are returned.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['windowId'] = window_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getWindowBounds',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Bounds.from_json(json['bounds'])
|
||||
|
||||
|
||||
def get_window_for_target(
|
||||
target_id: typing.Optional[target.TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[WindowID, Bounds]]:
|
||||
'''
|
||||
Get the browser window that contains the devtools target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id: *(Optional)* Devtools agent host id. If called as a part of the session, associated targetId is used.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **windowId** - Browser window id.
|
||||
1. **bounds** - Bounds information of the window. When window state is 'minimized', the restored window position and size are returned.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getWindowForTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
WindowID.from_json(json['windowId']),
|
||||
Bounds.from_json(json['bounds'])
|
||||
)
|
||||
|
||||
|
||||
def set_window_bounds(
|
||||
window_id: WindowID,
|
||||
bounds: Bounds
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set position and/or size of the browser window.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param window_id: Browser window id.
|
||||
:param bounds: New window bounds. The 'minimized', 'maximized' and 'fullscreen' states cannot be combined with 'left', 'top', 'width' or 'height'. Leaves unspecified fields unchanged.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['windowId'] = window_id.to_json()
|
||||
params['bounds'] = bounds.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setWindowBounds',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dock_tile(
|
||||
badge_label: typing.Optional[str] = None,
|
||||
image: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set dock tile details, platform-specific.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param badge_label: *(Optional)*
|
||||
:param image: *(Optional)* Png encoded image.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if badge_label is not None:
|
||||
params['badgeLabel'] = badge_label
|
||||
if image is not None:
|
||||
params['image'] = image
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setDockTile',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def execute_browser_command(
|
||||
command_id: BrowserCommandId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Invoke custom browser commands used by telemetry.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param command_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['commandId'] = command_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.executeBrowserCommand',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Browser.downloadWillBegin')
|
||||
@dataclass
|
||||
class DownloadWillBegin:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Fired when page is about to start a download.
|
||||
'''
|
||||
#: Id of the frame that caused the download to begin.
|
||||
frame_id: page.FrameId
|
||||
#: Global unique identifier of the download.
|
||||
guid: str
|
||||
#: URL of the resource being downloaded.
|
||||
url: str
|
||||
#: Suggested file name of the resource (the actual name of the file saved on disk may differ).
|
||||
suggested_filename: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DownloadWillBegin:
|
||||
return cls(
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
guid=str(json['guid']),
|
||||
url=str(json['url']),
|
||||
suggested_filename=str(json['suggestedFilename'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Browser.downloadProgress')
|
||||
@dataclass
|
||||
class DownloadProgress:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Fired when download makes progress. Last call has ``done`` == true.
|
||||
'''
|
||||
#: Global unique identifier of the download.
|
||||
guid: str
|
||||
#: Total expected bytes to download.
|
||||
total_bytes: float
|
||||
#: Total bytes received.
|
||||
received_bytes: float
|
||||
#: Download status.
|
||||
state: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DownloadProgress:
|
||||
return cls(
|
||||
guid=str(json['guid']),
|
||||
total_bytes=float(json['totalBytes']),
|
||||
received_bytes=float(json['receivedBytes']),
|
||||
state=str(json['state'])
|
||||
)
|
||||
@@ -0,0 +1,287 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: CacheStorage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class CacheId(str):
|
||||
'''
|
||||
Unique identifier of the Cache object.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> CacheId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'CacheId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class CachedResponseType(enum.Enum):
|
||||
'''
|
||||
type of HTTP response cached
|
||||
'''
|
||||
BASIC = "basic"
|
||||
CORS = "cors"
|
||||
DEFAULT = "default"
|
||||
ERROR = "error"
|
||||
OPAQUE_RESPONSE = "opaqueResponse"
|
||||
OPAQUE_REDIRECT = "opaqueRedirect"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataEntry:
|
||||
'''
|
||||
Data entry.
|
||||
'''
|
||||
#: Request URL.
|
||||
request_url: str
|
||||
|
||||
#: Request method.
|
||||
request_method: str
|
||||
|
||||
#: Request headers
|
||||
request_headers: typing.List[Header]
|
||||
|
||||
#: Number of seconds since epoch.
|
||||
response_time: float
|
||||
|
||||
#: HTTP response status code.
|
||||
response_status: int
|
||||
|
||||
#: HTTP response status text.
|
||||
response_status_text: str
|
||||
|
||||
#: HTTP response type
|
||||
response_type: CachedResponseType
|
||||
|
||||
#: Response headers
|
||||
response_headers: typing.List[Header]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['requestURL'] = self.request_url
|
||||
json['requestMethod'] = self.request_method
|
||||
json['requestHeaders'] = [i.to_json() for i in self.request_headers]
|
||||
json['responseTime'] = self.response_time
|
||||
json['responseStatus'] = self.response_status
|
||||
json['responseStatusText'] = self.response_status_text
|
||||
json['responseType'] = self.response_type.to_json()
|
||||
json['responseHeaders'] = [i.to_json() for i in self.response_headers]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
request_url=str(json['requestURL']),
|
||||
request_method=str(json['requestMethod']),
|
||||
request_headers=[Header.from_json(i) for i in json['requestHeaders']],
|
||||
response_time=float(json['responseTime']),
|
||||
response_status=int(json['responseStatus']),
|
||||
response_status_text=str(json['responseStatusText']),
|
||||
response_type=CachedResponseType.from_json(json['responseType']),
|
||||
response_headers=[Header.from_json(i) for i in json['responseHeaders']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cache:
|
||||
'''
|
||||
Cache identifier.
|
||||
'''
|
||||
#: An opaque unique id of the cache.
|
||||
cache_id: CacheId
|
||||
|
||||
#: Security origin of the cache.
|
||||
security_origin: str
|
||||
|
||||
#: The name of the cache.
|
||||
cache_name: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['cacheId'] = self.cache_id.to_json()
|
||||
json['securityOrigin'] = self.security_origin
|
||||
json['cacheName'] = self.cache_name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
cache_id=CacheId.from_json(json['cacheId']),
|
||||
security_origin=str(json['securityOrigin']),
|
||||
cache_name=str(json['cacheName']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Header:
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CachedResponse:
|
||||
'''
|
||||
Cached response
|
||||
'''
|
||||
#: Entry content, base64-encoded.
|
||||
body: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['body'] = self.body
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
body=str(json['body']),
|
||||
)
|
||||
|
||||
|
||||
def delete_cache(
|
||||
cache_id: CacheId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a cache.
|
||||
|
||||
:param cache_id: Id of cache for deletion.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.deleteCache',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_entry(
|
||||
cache_id: CacheId,
|
||||
request: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a cache entry.
|
||||
|
||||
:param cache_id: Id of cache where the entry will be deleted.
|
||||
:param request: URL spec of the request.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
params['request'] = request
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.deleteEntry',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_cache_names(
|
||||
security_origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Cache]]:
|
||||
'''
|
||||
Requests cache names.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:returns: Caches for the security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestCacheNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Cache.from_json(i) for i in json['caches']]
|
||||
|
||||
|
||||
def request_cached_response(
|
||||
cache_id: CacheId,
|
||||
request_url: str,
|
||||
request_headers: typing.List[Header]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,CachedResponse]:
|
||||
'''
|
||||
Fetches cache entry.
|
||||
|
||||
:param cache_id: Id of cache that contains the entry.
|
||||
:param request_url: URL spec of the request.
|
||||
:param request_headers: headers of the request.
|
||||
:returns: Response read from the cache.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
params['requestURL'] = request_url
|
||||
params['requestHeaders'] = [i.to_json() for i in request_headers]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestCachedResponse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return CachedResponse.from_json(json['response'])
|
||||
|
||||
|
||||
def request_entries(
|
||||
cache_id: CacheId,
|
||||
skip_count: typing.Optional[int] = None,
|
||||
page_size: typing.Optional[int] = None,
|
||||
path_filter: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DataEntry], float]]:
|
||||
'''
|
||||
Requests data from cache.
|
||||
|
||||
:param cache_id: ID of cache to get entries from.
|
||||
:param skip_count: *(Optional)* Number of records to skip.
|
||||
:param page_size: *(Optional)* Number of records to fetch.
|
||||
:param path_filter: *(Optional)* If present, only return the entries containing this substring in the path
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **cacheDataEntries** - Array of object store data entries.
|
||||
1. **returnCount** - Count of returned entries from this storage. If pathFilter is empty, it is the count of all entries from this storage.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
if skip_count is not None:
|
||||
params['skipCount'] = skip_count
|
||||
if page_size is not None:
|
||||
params['pageSize'] = page_size
|
||||
if path_filter is not None:
|
||||
params['pathFilter'] = path_filter
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestEntries',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DataEntry.from_json(i) for i in json['cacheDataEntries']],
|
||||
float(json['returnCount'])
|
||||
)
|
||||
@@ -0,0 +1,170 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Cast (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Sink:
|
||||
name: str
|
||||
|
||||
id_: str
|
||||
|
||||
#: Text describing the current session. Present only if there is an active
|
||||
#: session on the sink.
|
||||
session: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['id'] = self.id_
|
||||
if self.session is not None:
|
||||
json['session'] = self.session
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
id_=str(json['id']),
|
||||
session=str(json['session']) if 'session' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
presentation_url: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts observing for sinks that can be used for tab mirroring, and if set,
|
||||
sinks compatible with ``presentationUrl`` as well. When sinks are found, a
|
||||
``sinksUpdated`` event is fired.
|
||||
Also starts observing for issue messages. When an issue is added or removed,
|
||||
an ``issueUpdated`` event is fired.
|
||||
|
||||
:param presentation_url: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if presentation_url is not None:
|
||||
params['presentationUrl'] = presentation_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stops observing for sinks and issues.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_sink_to_use(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets a sink to be used when the web page requests the browser to choose a
|
||||
sink via Presentation API, Remote Playback API, or Cast SDK.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.setSinkToUse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_desktop_mirroring(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts mirroring the desktop to the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.startDesktopMirroring',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_tab_mirroring(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts mirroring the tab to the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.startTabMirroring',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_casting(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stops the active Cast session on the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.stopCasting',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Cast.sinksUpdated')
|
||||
@dataclass
|
||||
class SinksUpdated:
|
||||
'''
|
||||
This is fired whenever the list of available sinks changes. A sink is a
|
||||
device or a software surface that you can cast to.
|
||||
'''
|
||||
sinks: typing.List[Sink]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> SinksUpdated:
|
||||
return cls(
|
||||
sinks=[Sink.from_json(i) for i in json['sinks']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Cast.issueUpdated')
|
||||
@dataclass
|
||||
class IssueUpdated:
|
||||
'''
|
||||
This is fired whenever the outstanding issue/error message changes.
|
||||
``issueMessage`` is empty if there is no issue.
|
||||
'''
|
||||
issue_message: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> IssueUpdated:
|
||||
return cls(
|
||||
issue_message=str(json['issueMessage'])
|
||||
)
|
||||
@@ -0,0 +1,105 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Console
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class ConsoleMessage:
|
||||
'''
|
||||
Console message.
|
||||
'''
|
||||
#: Message source.
|
||||
source: str
|
||||
|
||||
#: Message severity.
|
||||
level: str
|
||||
|
||||
#: Message text.
|
||||
text: str
|
||||
|
||||
#: URL of the message origin.
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
#: Line number in the resource that generated this message (1-based).
|
||||
line: typing.Optional[int] = None
|
||||
|
||||
#: Column number in the resource that generated this message (1-based).
|
||||
column: typing.Optional[int] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['source'] = self.source
|
||||
json['level'] = self.level
|
||||
json['text'] = self.text
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.line is not None:
|
||||
json['line'] = self.line
|
||||
if self.column is not None:
|
||||
json['column'] = self.column
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
source=str(json['source']),
|
||||
level=str(json['level']),
|
||||
text=str(json['text']),
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
line=int(json['line']) if 'line' in json else None,
|
||||
column=int(json['column']) if 'column' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def clear_messages() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Does nothing.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.clearMessages',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables console domain, prevents further console messages from being reported to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables console domain, sends the messages collected so far to the client by means of the
|
||||
``messageAdded`` notification.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Console.messageAdded')
|
||||
@dataclass
|
||||
class MessageAdded:
|
||||
'''
|
||||
Issued when new console message is added.
|
||||
'''
|
||||
#: Console message that has been added.
|
||||
message: ConsoleMessage
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> MessageAdded:
|
||||
return cls(
|
||||
message=ConsoleMessage.from_json(json['message'])
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,162 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Database (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class DatabaseId(str):
|
||||
'''
|
||||
Unique identifier of Database object.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> DatabaseId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'DatabaseId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class Database:
|
||||
'''
|
||||
Database object.
|
||||
'''
|
||||
#: Database ID.
|
||||
id_: DatabaseId
|
||||
|
||||
#: Database domain.
|
||||
domain: str
|
||||
|
||||
#: Database name.
|
||||
name: str
|
||||
|
||||
#: Database version.
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_.to_json()
|
||||
json['domain'] = self.domain
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=DatabaseId.from_json(json['id']),
|
||||
domain=str(json['domain']),
|
||||
name=str(json['name']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Error:
|
||||
'''
|
||||
Database error.
|
||||
'''
|
||||
#: Error message.
|
||||
message: str
|
||||
|
||||
#: Error code.
|
||||
code: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['message'] = self.message
|
||||
json['code'] = self.code
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
message=str(json['message']),
|
||||
code=int(json['code']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables database tracking, prevents database events from being sent to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables database tracking, database events will now be delivered to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def execute_sql(
|
||||
database_id: DatabaseId,
|
||||
query: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.Optional[typing.List[str]], typing.Optional[typing.List[typing.Any]], typing.Optional[Error]]]:
|
||||
'''
|
||||
:param database_id:
|
||||
:param query:
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **columnNames** -
|
||||
1. **values** -
|
||||
2. **sqlError** -
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['databaseId'] = database_id.to_json()
|
||||
params['query'] = query
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.executeSQL',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[str(i) for i in json['columnNames']] if 'columnNames' in json else None,
|
||||
[i for i in json['values']] if 'values' in json else None,
|
||||
Error.from_json(json['sqlError']) if 'sqlError' in json else None
|
||||
)
|
||||
|
||||
|
||||
def get_database_table_names(
|
||||
database_id: DatabaseId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
:param database_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['databaseId'] = database_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.getDatabaseTableNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['tableNames']]
|
||||
|
||||
|
||||
@event_class('Database.addDatabase')
|
||||
@dataclass
|
||||
class AddDatabase:
|
||||
database: Database
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AddDatabase:
|
||||
return cls(
|
||||
database=Database.from_json(json['database'])
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,43 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DeviceOrientation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def clear_device_orientation_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden Device Orientation.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceOrientation.clearDeviceOrientationOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_device_orientation_override(
|
||||
alpha: float,
|
||||
beta: float,
|
||||
gamma: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Device Orientation.
|
||||
|
||||
:param alpha: Mock alpha
|
||||
:param beta: Mock beta
|
||||
:param gamma: Mock gamma
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['alpha'] = alpha
|
||||
params['beta'] = beta
|
||||
params['gamma'] = gamma
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceOrientation.setDeviceOrientationOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,312 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMDebugger
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import runtime
|
||||
|
||||
|
||||
class DOMBreakpointType(enum.Enum):
|
||||
'''
|
||||
DOM breakpoint type.
|
||||
'''
|
||||
SUBTREE_MODIFIED = "subtree-modified"
|
||||
ATTRIBUTE_MODIFIED = "attribute-modified"
|
||||
NODE_REMOVED = "node-removed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class CSPViolationType(enum.Enum):
|
||||
'''
|
||||
CSP Violation type.
|
||||
'''
|
||||
TRUSTEDTYPE_SINK_VIOLATION = "trustedtype-sink-violation"
|
||||
TRUSTEDTYPE_POLICY_VIOLATION = "trustedtype-policy-violation"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventListener:
|
||||
'''
|
||||
Object event listener.
|
||||
'''
|
||||
#: ``EventListener``'s type.
|
||||
type_: str
|
||||
|
||||
#: ``EventListener``'s useCapture.
|
||||
use_capture: bool
|
||||
|
||||
#: ``EventListener``'s passive flag.
|
||||
passive: bool
|
||||
|
||||
#: ``EventListener``'s once flag.
|
||||
once: bool
|
||||
|
||||
#: Script id of the handler code.
|
||||
script_id: runtime.ScriptId
|
||||
|
||||
#: Line number in the script (0-based).
|
||||
line_number: int
|
||||
|
||||
#: Column number in the script (0-based).
|
||||
column_number: int
|
||||
|
||||
#: Event handler function value.
|
||||
handler: typing.Optional[runtime.RemoteObject] = None
|
||||
|
||||
#: Event original handler function value.
|
||||
original_handler: typing.Optional[runtime.RemoteObject] = None
|
||||
|
||||
#: Node the listener is added to (if any).
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['useCapture'] = self.use_capture
|
||||
json['passive'] = self.passive
|
||||
json['once'] = self.once
|
||||
json['scriptId'] = self.script_id.to_json()
|
||||
json['lineNumber'] = self.line_number
|
||||
json['columnNumber'] = self.column_number
|
||||
if self.handler is not None:
|
||||
json['handler'] = self.handler.to_json()
|
||||
if self.original_handler is not None:
|
||||
json['originalHandler'] = self.original_handler.to_json()
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
use_capture=bool(json['useCapture']),
|
||||
passive=bool(json['passive']),
|
||||
once=bool(json['once']),
|
||||
script_id=runtime.ScriptId.from_json(json['scriptId']),
|
||||
line_number=int(json['lineNumber']),
|
||||
column_number=int(json['columnNumber']),
|
||||
handler=runtime.RemoteObject.from_json(json['handler']) if 'handler' in json else None,
|
||||
original_handler=runtime.RemoteObject.from_json(json['originalHandler']) if 'originalHandler' in json else None,
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def get_event_listeners(
|
||||
object_id: runtime.RemoteObjectId,
|
||||
depth: typing.Optional[int] = None,
|
||||
pierce: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[EventListener]]:
|
||||
'''
|
||||
Returns event listeners of the given object.
|
||||
|
||||
:param object_id: Identifier of the object to return listeners for.
|
||||
:param depth: *(Optional)* The maximum depth at which Node children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0.
|
||||
:param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). Reports listeners for all contexts if pierce is enabled.
|
||||
:returns: Array of relevant listeners.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
if depth is not None:
|
||||
params['depth'] = depth
|
||||
if pierce is not None:
|
||||
params['pierce'] = pierce
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.getEventListeners',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [EventListener.from_json(i) for i in json['listeners']]
|
||||
|
||||
|
||||
def remove_dom_breakpoint(
|
||||
node_id: dom.NodeId,
|
||||
type_: DOMBreakpointType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes DOM breakpoint that was set using ``setDOMBreakpoint``.
|
||||
|
||||
:param node_id: Identifier of the node to remove breakpoint from.
|
||||
:param type_: Type of the breakpoint to remove.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['nodeId'] = node_id.to_json()
|
||||
params['type'] = type_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeDOMBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_event_listener_breakpoint(
|
||||
event_name: str,
|
||||
target_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular DOM event.
|
||||
|
||||
:param event_name: Event name.
|
||||
:param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
if target_name is not None:
|
||||
params['targetName'] = target_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeEventListenerBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular native event.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_xhr_breakpoint(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint from XMLHttpRequest.
|
||||
|
||||
:param url: Resource URL substring.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeXHRBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_break_on_csp_violation(
|
||||
violation_types: typing.List[CSPViolationType]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular CSP violations.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param violation_types: CSP Violations to stop upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['violationTypes'] = [i.to_json() for i in violation_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setBreakOnCSPViolation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dom_breakpoint(
|
||||
node_id: dom.NodeId,
|
||||
type_: DOMBreakpointType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular operation with DOM.
|
||||
|
||||
:param node_id: Identifier of the node to set breakpoint on.
|
||||
:param type_: Type of the operation to stop upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['nodeId'] = node_id.to_json()
|
||||
params['type'] = type_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setDOMBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_event_listener_breakpoint(
|
||||
event_name: str,
|
||||
target_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular DOM event.
|
||||
|
||||
:param event_name: DOM Event name to stop on (any DOM event will do).
|
||||
:param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name to stop on. If equal to ```"*"``` or not provided, will stop on any EventTarget.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
if target_name is not None:
|
||||
params['targetName'] = target_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setEventListenerBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular native event.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_xhr_breakpoint(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on XMLHttpRequest.
|
||||
|
||||
:param url: Resource URL substring. All XHRs having this substring in the URL will get stopped upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setXHRBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
@@ -0,0 +1,863 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMSnapshot (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import dom_debugger
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class DOMNode:
|
||||
'''
|
||||
A Node in the DOM tree.
|
||||
'''
|
||||
#: ``Node``'s nodeType.
|
||||
node_type: int
|
||||
|
||||
#: ``Node``'s nodeName.
|
||||
node_name: str
|
||||
|
||||
#: ``Node``'s nodeValue.
|
||||
node_value: str
|
||||
|
||||
#: ``Node``'s id, corresponds to DOM.Node.backendNodeId.
|
||||
backend_node_id: dom.BackendNodeId
|
||||
|
||||
#: Only set for textarea elements, contains the text value.
|
||||
text_value: typing.Optional[str] = None
|
||||
|
||||
#: Only set for input elements, contains the input's associated text value.
|
||||
input_value: typing.Optional[str] = None
|
||||
|
||||
#: Only set for radio and checkbox input elements, indicates if the element has been checked
|
||||
input_checked: typing.Optional[bool] = None
|
||||
|
||||
#: Only set for option elements, indicates if the element has been selected
|
||||
option_selected: typing.Optional[bool] = None
|
||||
|
||||
#: The indexes of the node's child nodes in the ``domNodes`` array returned by ``getSnapshot``, if
|
||||
#: any.
|
||||
child_node_indexes: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Attributes of an ``Element`` node.
|
||||
attributes: typing.Optional[typing.List[NameValue]] = None
|
||||
|
||||
#: Indexes of pseudo elements associated with this node in the ``domNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
pseudo_element_indexes: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The index of the node's related layout tree node in the ``layoutTreeNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
layout_node_index: typing.Optional[int] = None
|
||||
|
||||
#: Document URL that ``Document`` or ``FrameOwner`` node points to.
|
||||
document_url: typing.Optional[str] = None
|
||||
|
||||
#: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion.
|
||||
base_url: typing.Optional[str] = None
|
||||
|
||||
#: Only set for documents, contains the document's content language.
|
||||
content_language: typing.Optional[str] = None
|
||||
|
||||
#: Only set for documents, contains the document's character set encoding.
|
||||
document_encoding: typing.Optional[str] = None
|
||||
|
||||
#: ``DocumentType`` node's publicId.
|
||||
public_id: typing.Optional[str] = None
|
||||
|
||||
#: ``DocumentType`` node's systemId.
|
||||
system_id: typing.Optional[str] = None
|
||||
|
||||
#: Frame ID for frame owner elements and also for the document node.
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
#: The index of a frame owner element's content document in the ``domNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
content_document_index: typing.Optional[int] = None
|
||||
|
||||
#: Type of a pseudo element node.
|
||||
pseudo_type: typing.Optional[dom.PseudoType] = None
|
||||
|
||||
#: Shadow root type.
|
||||
shadow_root_type: typing.Optional[dom.ShadowRootType] = None
|
||||
|
||||
#: Whether this DOM node responds to mouse clicks. This includes nodes that have had click
|
||||
#: event listeners attached via JavaScript as well as anchor tags that naturally navigate when
|
||||
#: clicked.
|
||||
is_clickable: typing.Optional[bool] = None
|
||||
|
||||
#: Details of the node's event listeners, if any.
|
||||
event_listeners: typing.Optional[typing.List[dom_debugger.EventListener]] = None
|
||||
|
||||
#: The selected url for nodes with a srcset attribute.
|
||||
current_source_url: typing.Optional[str] = None
|
||||
|
||||
#: The url of the script (if any) that generates this node.
|
||||
origin_url: typing.Optional[str] = None
|
||||
|
||||
#: Scroll offsets, set when this node is a Document.
|
||||
scroll_offset_x: typing.Optional[float] = None
|
||||
|
||||
scroll_offset_y: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeType'] = self.node_type
|
||||
json['nodeName'] = self.node_name
|
||||
json['nodeValue'] = self.node_value
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.text_value is not None:
|
||||
json['textValue'] = self.text_value
|
||||
if self.input_value is not None:
|
||||
json['inputValue'] = self.input_value
|
||||
if self.input_checked is not None:
|
||||
json['inputChecked'] = self.input_checked
|
||||
if self.option_selected is not None:
|
||||
json['optionSelected'] = self.option_selected
|
||||
if self.child_node_indexes is not None:
|
||||
json['childNodeIndexes'] = [i for i in self.child_node_indexes]
|
||||
if self.attributes is not None:
|
||||
json['attributes'] = [i.to_json() for i in self.attributes]
|
||||
if self.pseudo_element_indexes is not None:
|
||||
json['pseudoElementIndexes'] = [i for i in self.pseudo_element_indexes]
|
||||
if self.layout_node_index is not None:
|
||||
json['layoutNodeIndex'] = self.layout_node_index
|
||||
if self.document_url is not None:
|
||||
json['documentURL'] = self.document_url
|
||||
if self.base_url is not None:
|
||||
json['baseURL'] = self.base_url
|
||||
if self.content_language is not None:
|
||||
json['contentLanguage'] = self.content_language
|
||||
if self.document_encoding is not None:
|
||||
json['documentEncoding'] = self.document_encoding
|
||||
if self.public_id is not None:
|
||||
json['publicId'] = self.public_id
|
||||
if self.system_id is not None:
|
||||
json['systemId'] = self.system_id
|
||||
if self.frame_id is not None:
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
if self.content_document_index is not None:
|
||||
json['contentDocumentIndex'] = self.content_document_index
|
||||
if self.pseudo_type is not None:
|
||||
json['pseudoType'] = self.pseudo_type.to_json()
|
||||
if self.shadow_root_type is not None:
|
||||
json['shadowRootType'] = self.shadow_root_type.to_json()
|
||||
if self.is_clickable is not None:
|
||||
json['isClickable'] = self.is_clickable
|
||||
if self.event_listeners is not None:
|
||||
json['eventListeners'] = [i.to_json() for i in self.event_listeners]
|
||||
if self.current_source_url is not None:
|
||||
json['currentSourceURL'] = self.current_source_url
|
||||
if self.origin_url is not None:
|
||||
json['originURL'] = self.origin_url
|
||||
if self.scroll_offset_x is not None:
|
||||
json['scrollOffsetX'] = self.scroll_offset_x
|
||||
if self.scroll_offset_y is not None:
|
||||
json['scrollOffsetY'] = self.scroll_offset_y
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_type=int(json['nodeType']),
|
||||
node_name=str(json['nodeName']),
|
||||
node_value=str(json['nodeValue']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']),
|
||||
text_value=str(json['textValue']) if 'textValue' in json else None,
|
||||
input_value=str(json['inputValue']) if 'inputValue' in json else None,
|
||||
input_checked=bool(json['inputChecked']) if 'inputChecked' in json else None,
|
||||
option_selected=bool(json['optionSelected']) if 'optionSelected' in json else None,
|
||||
child_node_indexes=[int(i) for i in json['childNodeIndexes']] if 'childNodeIndexes' in json else None,
|
||||
attributes=[NameValue.from_json(i) for i in json['attributes']] if 'attributes' in json else None,
|
||||
pseudo_element_indexes=[int(i) for i in json['pseudoElementIndexes']] if 'pseudoElementIndexes' in json else None,
|
||||
layout_node_index=int(json['layoutNodeIndex']) if 'layoutNodeIndex' in json else None,
|
||||
document_url=str(json['documentURL']) if 'documentURL' in json else None,
|
||||
base_url=str(json['baseURL']) if 'baseURL' in json else None,
|
||||
content_language=str(json['contentLanguage']) if 'contentLanguage' in json else None,
|
||||
document_encoding=str(json['documentEncoding']) if 'documentEncoding' in json else None,
|
||||
public_id=str(json['publicId']) if 'publicId' in json else None,
|
||||
system_id=str(json['systemId']) if 'systemId' in json else None,
|
||||
frame_id=page.FrameId.from_json(json['frameId']) if 'frameId' in json else None,
|
||||
content_document_index=int(json['contentDocumentIndex']) if 'contentDocumentIndex' in json else None,
|
||||
pseudo_type=dom.PseudoType.from_json(json['pseudoType']) if 'pseudoType' in json else None,
|
||||
shadow_root_type=dom.ShadowRootType.from_json(json['shadowRootType']) if 'shadowRootType' in json else None,
|
||||
is_clickable=bool(json['isClickable']) if 'isClickable' in json else None,
|
||||
event_listeners=[dom_debugger.EventListener.from_json(i) for i in json['eventListeners']] if 'eventListeners' in json else None,
|
||||
current_source_url=str(json['currentSourceURL']) if 'currentSourceURL' in json else None,
|
||||
origin_url=str(json['originURL']) if 'originURL' in json else None,
|
||||
scroll_offset_x=float(json['scrollOffsetX']) if 'scrollOffsetX' in json else None,
|
||||
scroll_offset_y=float(json['scrollOffsetY']) if 'scrollOffsetY' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InlineTextBox:
|
||||
'''
|
||||
Details of post layout rendered text positions. The exact layout should not be regarded as
|
||||
stable and may change between versions.
|
||||
'''
|
||||
#: The bounding box in document coordinates. Note that scroll offset of the document is ignored.
|
||||
bounding_box: dom.Rect
|
||||
|
||||
#: The starting index in characters, for this post layout textbox substring. Characters that
|
||||
#: would be represented as a surrogate pair in UTF-16 have length 2.
|
||||
start_character_index: int
|
||||
|
||||
#: The number of characters in this post layout textbox substring. Characters that would be
|
||||
#: represented as a surrogate pair in UTF-16 have length 2.
|
||||
num_characters: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['boundingBox'] = self.bounding_box.to_json()
|
||||
json['startCharacterIndex'] = self.start_character_index
|
||||
json['numCharacters'] = self.num_characters
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
bounding_box=dom.Rect.from_json(json['boundingBox']),
|
||||
start_character_index=int(json['startCharacterIndex']),
|
||||
num_characters=int(json['numCharacters']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutTreeNode:
|
||||
'''
|
||||
Details of an element in the DOM tree with a LayoutObject.
|
||||
'''
|
||||
#: The index of the related DOM node in the ``domNodes`` array returned by ``getSnapshot``.
|
||||
dom_node_index: int
|
||||
|
||||
#: The bounding box in document coordinates. Note that scroll offset of the document is ignored.
|
||||
bounding_box: dom.Rect
|
||||
|
||||
#: Contents of the LayoutText, if any.
|
||||
layout_text: typing.Optional[str] = None
|
||||
|
||||
#: The post-layout inline text nodes, if any.
|
||||
inline_text_nodes: typing.Optional[typing.List[InlineTextBox]] = None
|
||||
|
||||
#: Index into the ``computedStyles`` array returned by ``getSnapshot``.
|
||||
style_index: typing.Optional[int] = None
|
||||
|
||||
#: Global paint order index, which is determined by the stacking order of the nodes. Nodes
|
||||
#: that are painted together will have the same index. Only provided if includePaintOrder in
|
||||
#: getSnapshot was true.
|
||||
paint_order: typing.Optional[int] = None
|
||||
|
||||
#: Set to true to indicate the element begins a new stacking context.
|
||||
is_stacking_context: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['domNodeIndex'] = self.dom_node_index
|
||||
json['boundingBox'] = self.bounding_box.to_json()
|
||||
if self.layout_text is not None:
|
||||
json['layoutText'] = self.layout_text
|
||||
if self.inline_text_nodes is not None:
|
||||
json['inlineTextNodes'] = [i.to_json() for i in self.inline_text_nodes]
|
||||
if self.style_index is not None:
|
||||
json['styleIndex'] = self.style_index
|
||||
if self.paint_order is not None:
|
||||
json['paintOrder'] = self.paint_order
|
||||
if self.is_stacking_context is not None:
|
||||
json['isStackingContext'] = self.is_stacking_context
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
dom_node_index=int(json['domNodeIndex']),
|
||||
bounding_box=dom.Rect.from_json(json['boundingBox']),
|
||||
layout_text=str(json['layoutText']) if 'layoutText' in json else None,
|
||||
inline_text_nodes=[InlineTextBox.from_json(i) for i in json['inlineTextNodes']] if 'inlineTextNodes' in json else None,
|
||||
style_index=int(json['styleIndex']) if 'styleIndex' in json else None,
|
||||
paint_order=int(json['paintOrder']) if 'paintOrder' in json else None,
|
||||
is_stacking_context=bool(json['isStackingContext']) if 'isStackingContext' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComputedStyle:
|
||||
'''
|
||||
A subset of the full ComputedStyle as defined by the request whitelist.
|
||||
'''
|
||||
#: Name/value pairs of computed style properties.
|
||||
properties: typing.List[NameValue]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['properties'] = [i.to_json() for i in self.properties]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
properties=[NameValue.from_json(i) for i in json['properties']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NameValue:
|
||||
'''
|
||||
A name/value pair.
|
||||
'''
|
||||
#: Attribute/property name.
|
||||
name: str
|
||||
|
||||
#: Attribute/property value.
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
class StringIndex(int):
|
||||
'''
|
||||
Index of the string in the strings table.
|
||||
'''
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> StringIndex:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'StringIndex({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ArrayOfStrings(list):
|
||||
'''
|
||||
Index of the string in the strings table.
|
||||
'''
|
||||
def to_json(self) -> typing.List[StringIndex]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[StringIndex]) -> ArrayOfStrings:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'ArrayOfStrings({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareStringData:
|
||||
'''
|
||||
Data that is only present on rare nodes.
|
||||
'''
|
||||
index: typing.List[int]
|
||||
|
||||
value: typing.List[StringIndex]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
json['value'] = [i.to_json() for i in self.value]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
value=[StringIndex.from_json(i) for i in json['value']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareBooleanData:
|
||||
index: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareIntegerData:
|
||||
index: typing.List[int]
|
||||
|
||||
value: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
json['value'] = [i for i in self.value]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
value=[int(i) for i in json['value']],
|
||||
)
|
||||
|
||||
|
||||
class Rectangle(list):
|
||||
def to_json(self) -> typing.List[float]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[float]) -> Rectangle:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Rectangle({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class DocumentSnapshot:
|
||||
'''
|
||||
Document snapshot.
|
||||
'''
|
||||
#: Document URL that ``Document`` or ``FrameOwner`` node points to.
|
||||
document_url: StringIndex
|
||||
|
||||
#: Document title.
|
||||
title: StringIndex
|
||||
|
||||
#: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion.
|
||||
base_url: StringIndex
|
||||
|
||||
#: Contains the document's content language.
|
||||
content_language: StringIndex
|
||||
|
||||
#: Contains the document's character set encoding.
|
||||
encoding_name: StringIndex
|
||||
|
||||
#: ``DocumentType`` node's publicId.
|
||||
public_id: StringIndex
|
||||
|
||||
#: ``DocumentType`` node's systemId.
|
||||
system_id: StringIndex
|
||||
|
||||
#: Frame ID for frame owner elements and also for the document node.
|
||||
frame_id: StringIndex
|
||||
|
||||
#: A table with dom nodes.
|
||||
nodes: NodeTreeSnapshot
|
||||
|
||||
#: The nodes in the layout tree.
|
||||
layout: LayoutTreeSnapshot
|
||||
|
||||
#: The post-layout inline text nodes.
|
||||
text_boxes: TextBoxSnapshot
|
||||
|
||||
#: Horizontal scroll offset.
|
||||
scroll_offset_x: typing.Optional[float] = None
|
||||
|
||||
#: Vertical scroll offset.
|
||||
scroll_offset_y: typing.Optional[float] = None
|
||||
|
||||
#: Document content width.
|
||||
content_width: typing.Optional[float] = None
|
||||
|
||||
#: Document content height.
|
||||
content_height: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['documentURL'] = self.document_url.to_json()
|
||||
json['title'] = self.title.to_json()
|
||||
json['baseURL'] = self.base_url.to_json()
|
||||
json['contentLanguage'] = self.content_language.to_json()
|
||||
json['encodingName'] = self.encoding_name.to_json()
|
||||
json['publicId'] = self.public_id.to_json()
|
||||
json['systemId'] = self.system_id.to_json()
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
json['nodes'] = self.nodes.to_json()
|
||||
json['layout'] = self.layout.to_json()
|
||||
json['textBoxes'] = self.text_boxes.to_json()
|
||||
if self.scroll_offset_x is not None:
|
||||
json['scrollOffsetX'] = self.scroll_offset_x
|
||||
if self.scroll_offset_y is not None:
|
||||
json['scrollOffsetY'] = self.scroll_offset_y
|
||||
if self.content_width is not None:
|
||||
json['contentWidth'] = self.content_width
|
||||
if self.content_height is not None:
|
||||
json['contentHeight'] = self.content_height
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
document_url=StringIndex.from_json(json['documentURL']),
|
||||
title=StringIndex.from_json(json['title']),
|
||||
base_url=StringIndex.from_json(json['baseURL']),
|
||||
content_language=StringIndex.from_json(json['contentLanguage']),
|
||||
encoding_name=StringIndex.from_json(json['encodingName']),
|
||||
public_id=StringIndex.from_json(json['publicId']),
|
||||
system_id=StringIndex.from_json(json['systemId']),
|
||||
frame_id=StringIndex.from_json(json['frameId']),
|
||||
nodes=NodeTreeSnapshot.from_json(json['nodes']),
|
||||
layout=LayoutTreeSnapshot.from_json(json['layout']),
|
||||
text_boxes=TextBoxSnapshot.from_json(json['textBoxes']),
|
||||
scroll_offset_x=float(json['scrollOffsetX']) if 'scrollOffsetX' in json else None,
|
||||
scroll_offset_y=float(json['scrollOffsetY']) if 'scrollOffsetY' in json else None,
|
||||
content_width=float(json['contentWidth']) if 'contentWidth' in json else None,
|
||||
content_height=float(json['contentHeight']) if 'contentHeight' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeTreeSnapshot:
|
||||
'''
|
||||
Table containing nodes.
|
||||
'''
|
||||
#: Parent node index.
|
||||
parent_index: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: ``Node``'s nodeType.
|
||||
node_type: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Type of the shadow root the ``Node`` is in. String values are equal to the ``ShadowRootType`` enum.
|
||||
shadow_root_type: typing.Optional[RareStringData] = None
|
||||
|
||||
#: ``Node``'s nodeName.
|
||||
node_name: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: ``Node``'s nodeValue.
|
||||
node_value: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: ``Node``'s id, corresponds to DOM.Node.backendNodeId.
|
||||
backend_node_id: typing.Optional[typing.List[dom.BackendNodeId]] = None
|
||||
|
||||
#: Attributes of an ``Element`` node. Flatten name, value pairs.
|
||||
attributes: typing.Optional[typing.List[ArrayOfStrings]] = None
|
||||
|
||||
#: Only set for textarea elements, contains the text value.
|
||||
text_value: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Only set for input elements, contains the input's associated text value.
|
||||
input_value: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Only set for radio and checkbox input elements, indicates if the element has been checked
|
||||
input_checked: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: Only set for option elements, indicates if the element has been selected
|
||||
option_selected: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: The index of the document in the list of the snapshot documents.
|
||||
content_document_index: typing.Optional[RareIntegerData] = None
|
||||
|
||||
#: Type of a pseudo element node.
|
||||
pseudo_type: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Whether this DOM node responds to mouse clicks. This includes nodes that have had click
|
||||
#: event listeners attached via JavaScript as well as anchor tags that naturally navigate when
|
||||
#: clicked.
|
||||
is_clickable: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: The selected url for nodes with a srcset attribute.
|
||||
current_source_url: typing.Optional[RareStringData] = None
|
||||
|
||||
#: The url of the script (if any) that generates this node.
|
||||
origin_url: typing.Optional[RareStringData] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.parent_index is not None:
|
||||
json['parentIndex'] = [i for i in self.parent_index]
|
||||
if self.node_type is not None:
|
||||
json['nodeType'] = [i for i in self.node_type]
|
||||
if self.shadow_root_type is not None:
|
||||
json['shadowRootType'] = self.shadow_root_type.to_json()
|
||||
if self.node_name is not None:
|
||||
json['nodeName'] = [i.to_json() for i in self.node_name]
|
||||
if self.node_value is not None:
|
||||
json['nodeValue'] = [i.to_json() for i in self.node_value]
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = [i.to_json() for i in self.backend_node_id]
|
||||
if self.attributes is not None:
|
||||
json['attributes'] = [i.to_json() for i in self.attributes]
|
||||
if self.text_value is not None:
|
||||
json['textValue'] = self.text_value.to_json()
|
||||
if self.input_value is not None:
|
||||
json['inputValue'] = self.input_value.to_json()
|
||||
if self.input_checked is not None:
|
||||
json['inputChecked'] = self.input_checked.to_json()
|
||||
if self.option_selected is not None:
|
||||
json['optionSelected'] = self.option_selected.to_json()
|
||||
if self.content_document_index is not None:
|
||||
json['contentDocumentIndex'] = self.content_document_index.to_json()
|
||||
if self.pseudo_type is not None:
|
||||
json['pseudoType'] = self.pseudo_type.to_json()
|
||||
if self.is_clickable is not None:
|
||||
json['isClickable'] = self.is_clickable.to_json()
|
||||
if self.current_source_url is not None:
|
||||
json['currentSourceURL'] = self.current_source_url.to_json()
|
||||
if self.origin_url is not None:
|
||||
json['originURL'] = self.origin_url.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
parent_index=[int(i) for i in json['parentIndex']] if 'parentIndex' in json else None,
|
||||
node_type=[int(i) for i in json['nodeType']] if 'nodeType' in json else None,
|
||||
shadow_root_type=RareStringData.from_json(json['shadowRootType']) if 'shadowRootType' in json else None,
|
||||
node_name=[StringIndex.from_json(i) for i in json['nodeName']] if 'nodeName' in json else None,
|
||||
node_value=[StringIndex.from_json(i) for i in json['nodeValue']] if 'nodeValue' in json else None,
|
||||
backend_node_id=[dom.BackendNodeId.from_json(i) for i in json['backendNodeId']] if 'backendNodeId' in json else None,
|
||||
attributes=[ArrayOfStrings.from_json(i) for i in json['attributes']] if 'attributes' in json else None,
|
||||
text_value=RareStringData.from_json(json['textValue']) if 'textValue' in json else None,
|
||||
input_value=RareStringData.from_json(json['inputValue']) if 'inputValue' in json else None,
|
||||
input_checked=RareBooleanData.from_json(json['inputChecked']) if 'inputChecked' in json else None,
|
||||
option_selected=RareBooleanData.from_json(json['optionSelected']) if 'optionSelected' in json else None,
|
||||
content_document_index=RareIntegerData.from_json(json['contentDocumentIndex']) if 'contentDocumentIndex' in json else None,
|
||||
pseudo_type=RareStringData.from_json(json['pseudoType']) if 'pseudoType' in json else None,
|
||||
is_clickable=RareBooleanData.from_json(json['isClickable']) if 'isClickable' in json else None,
|
||||
current_source_url=RareStringData.from_json(json['currentSourceURL']) if 'currentSourceURL' in json else None,
|
||||
origin_url=RareStringData.from_json(json['originURL']) if 'originURL' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutTreeSnapshot:
|
||||
'''
|
||||
Table of details of an element in the DOM tree with a LayoutObject.
|
||||
'''
|
||||
#: Index of the corresponding node in the ``NodeTreeSnapshot`` array returned by ``captureSnapshot``.
|
||||
node_index: typing.List[int]
|
||||
|
||||
#: Array of indexes specifying computed style strings, filtered according to the ``computedStyles`` parameter passed to ``captureSnapshot``.
|
||||
styles: typing.List[ArrayOfStrings]
|
||||
|
||||
#: The absolute position bounding box.
|
||||
bounds: typing.List[Rectangle]
|
||||
|
||||
#: Contents of the LayoutText, if any.
|
||||
text: typing.List[StringIndex]
|
||||
|
||||
#: Stacking context information.
|
||||
stacking_contexts: RareBooleanData
|
||||
|
||||
#: Global paint order index, which is determined by the stacking order of the nodes. Nodes
|
||||
#: that are painted together will have the same index. Only provided if includePaintOrder in
|
||||
#: captureSnapshot was true.
|
||||
paint_orders: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The offset rect of nodes. Only available when includeDOMRects is set to true
|
||||
offset_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The scroll rect of nodes. Only available when includeDOMRects is set to true
|
||||
scroll_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The client rect of nodes. Only available when includeDOMRects is set to true
|
||||
client_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The list of background colors that are blended with colors of overlapping elements.
|
||||
blended_background_colors: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: The list of computed text opacities.
|
||||
text_color_opacities: typing.Optional[typing.List[float]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeIndex'] = [i for i in self.node_index]
|
||||
json['styles'] = [i.to_json() for i in self.styles]
|
||||
json['bounds'] = [i.to_json() for i in self.bounds]
|
||||
json['text'] = [i.to_json() for i in self.text]
|
||||
json['stackingContexts'] = self.stacking_contexts.to_json()
|
||||
if self.paint_orders is not None:
|
||||
json['paintOrders'] = [i for i in self.paint_orders]
|
||||
if self.offset_rects is not None:
|
||||
json['offsetRects'] = [i.to_json() for i in self.offset_rects]
|
||||
if self.scroll_rects is not None:
|
||||
json['scrollRects'] = [i.to_json() for i in self.scroll_rects]
|
||||
if self.client_rects is not None:
|
||||
json['clientRects'] = [i.to_json() for i in self.client_rects]
|
||||
if self.blended_background_colors is not None:
|
||||
json['blendedBackgroundColors'] = [i.to_json() for i in self.blended_background_colors]
|
||||
if self.text_color_opacities is not None:
|
||||
json['textColorOpacities'] = [i for i in self.text_color_opacities]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_index=[int(i) for i in json['nodeIndex']],
|
||||
styles=[ArrayOfStrings.from_json(i) for i in json['styles']],
|
||||
bounds=[Rectangle.from_json(i) for i in json['bounds']],
|
||||
text=[StringIndex.from_json(i) for i in json['text']],
|
||||
stacking_contexts=RareBooleanData.from_json(json['stackingContexts']),
|
||||
paint_orders=[int(i) for i in json['paintOrders']] if 'paintOrders' in json else None,
|
||||
offset_rects=[Rectangle.from_json(i) for i in json['offsetRects']] if 'offsetRects' in json else None,
|
||||
scroll_rects=[Rectangle.from_json(i) for i in json['scrollRects']] if 'scrollRects' in json else None,
|
||||
client_rects=[Rectangle.from_json(i) for i in json['clientRects']] if 'clientRects' in json else None,
|
||||
blended_background_colors=[StringIndex.from_json(i) for i in json['blendedBackgroundColors']] if 'blendedBackgroundColors' in json else None,
|
||||
text_color_opacities=[float(i) for i in json['textColorOpacities']] if 'textColorOpacities' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextBoxSnapshot:
|
||||
'''
|
||||
Table of details of the post layout rendered text positions. The exact layout should not be regarded as
|
||||
stable and may change between versions.
|
||||
'''
|
||||
#: Index of the layout tree node that owns this box collection.
|
||||
layout_index: typing.List[int]
|
||||
|
||||
#: The absolute position bounding box.
|
||||
bounds: typing.List[Rectangle]
|
||||
|
||||
#: The starting index in characters, for this post layout textbox substring. Characters that
|
||||
#: would be represented as a surrogate pair in UTF-16 have length 2.
|
||||
start: typing.List[int]
|
||||
|
||||
#: The number of characters in this post layout textbox substring. Characters that would be
|
||||
#: represented as a surrogate pair in UTF-16 have length 2.
|
||||
length: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['layoutIndex'] = [i for i in self.layout_index]
|
||||
json['bounds'] = [i.to_json() for i in self.bounds]
|
||||
json['start'] = [i for i in self.start]
|
||||
json['length'] = [i for i in self.length]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
layout_index=[int(i) for i in json['layoutIndex']],
|
||||
bounds=[Rectangle.from_json(i) for i in json['bounds']],
|
||||
start=[int(i) for i in json['start']],
|
||||
length=[int(i) for i in json['length']],
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables DOM snapshot agent for the given page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables DOM snapshot agent for the given page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_snapshot(
|
||||
computed_style_whitelist: typing.List[str],
|
||||
include_event_listeners: typing.Optional[bool] = None,
|
||||
include_paint_order: typing.Optional[bool] = None,
|
||||
include_user_agent_shadow_tree: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DOMNode], typing.List[LayoutTreeNode], typing.List[ComputedStyle]]]:
|
||||
'''
|
||||
Returns a document snapshot, including the full DOM tree of the root node (including iframes,
|
||||
template contents, and imported documents) in a flattened array, as well as layout and
|
||||
white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is
|
||||
flattened.
|
||||
|
||||
:param computed_style_whitelist: Whitelist of computed styles to return.
|
||||
:param include_event_listeners: *(Optional)* Whether or not to retrieve details of DOM listeners (default false).
|
||||
:param include_paint_order: *(Optional)* Whether to determine and include the paint order index of LayoutTreeNodes (default false).
|
||||
:param include_user_agent_shadow_tree: *(Optional)* Whether to include UA shadow tree in the snapshot (default false).
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **domNodes** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document.
|
||||
1. **layoutTreeNodes** - The nodes in the layout tree.
|
||||
2. **computedStyles** - Whitelisted ComputedStyle properties for each node in the layout tree.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['computedStyleWhitelist'] = [i for i in computed_style_whitelist]
|
||||
if include_event_listeners is not None:
|
||||
params['includeEventListeners'] = include_event_listeners
|
||||
if include_paint_order is not None:
|
||||
params['includePaintOrder'] = include_paint_order
|
||||
if include_user_agent_shadow_tree is not None:
|
||||
params['includeUserAgentShadowTree'] = include_user_agent_shadow_tree
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.getSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DOMNode.from_json(i) for i in json['domNodes']],
|
||||
[LayoutTreeNode.from_json(i) for i in json['layoutTreeNodes']],
|
||||
[ComputedStyle.from_json(i) for i in json['computedStyles']]
|
||||
)
|
||||
|
||||
|
||||
def capture_snapshot(
|
||||
computed_styles: typing.List[str],
|
||||
include_paint_order: typing.Optional[bool] = None,
|
||||
include_dom_rects: typing.Optional[bool] = None,
|
||||
include_blended_background_colors: typing.Optional[bool] = None,
|
||||
include_text_color_opacities: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DocumentSnapshot], typing.List[str]]]:
|
||||
'''
|
||||
Returns a document snapshot, including the full DOM tree of the root node (including iframes,
|
||||
template contents, and imported documents) in a flattened array, as well as layout and
|
||||
white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is
|
||||
flattened.
|
||||
|
||||
:param computed_styles: Whitelist of computed styles to return.
|
||||
:param include_paint_order: *(Optional)* Whether to include layout object paint orders into the snapshot.
|
||||
:param include_dom_rects: *(Optional)* Whether to include DOM rectangles (offsetRects, clientRects, scrollRects) into the snapshot
|
||||
:param include_blended_background_colors: **(EXPERIMENTAL)** *(Optional)* Whether to include blended background colors in the snapshot (default: false). Blended background color is achieved by blending background colors of all elements that overlap with the current element.
|
||||
:param include_text_color_opacities: **(EXPERIMENTAL)** *(Optional)* Whether to include text color opacity in the snapshot (default: false). An element might have the opacity property set that affects the text color of the element. The final text color opacity is computed based on the opacity of all overlapping elements.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **documents** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document.
|
||||
1. **strings** - Shared string table that all string properties refer to with indexes.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['computedStyles'] = [i for i in computed_styles]
|
||||
if include_paint_order is not None:
|
||||
params['includePaintOrder'] = include_paint_order
|
||||
if include_dom_rects is not None:
|
||||
params['includeDOMRects'] = include_dom_rects
|
||||
if include_blended_background_colors is not None:
|
||||
params['includeBlendedBackgroundColors'] = include_blended_background_colors
|
||||
if include_text_color_opacities is not None:
|
||||
params['includeTextColorOpacities'] = include_text_color_opacities
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.captureSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DocumentSnapshot.from_json(i) for i in json['documents']],
|
||||
[str(i) for i in json['strings']]
|
||||
)
|
||||
@@ -0,0 +1,201 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMStorage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class StorageId:
|
||||
'''
|
||||
DOM Storage identifier.
|
||||
'''
|
||||
#: Security origin for the storage.
|
||||
security_origin: str
|
||||
|
||||
#: Whether the storage is local storage (not session storage).
|
||||
is_local_storage: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['securityOrigin'] = self.security_origin
|
||||
json['isLocalStorage'] = self.is_local_storage
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
security_origin=str(json['securityOrigin']),
|
||||
is_local_storage=bool(json['isLocalStorage']),
|
||||
)
|
||||
|
||||
|
||||
class Item(list):
|
||||
'''
|
||||
DOM Storage item.
|
||||
'''
|
||||
def to_json(self) -> typing.List[str]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[str]) -> Item:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Item({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def clear(
|
||||
storage_id: StorageId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.clear',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables storage tracking, prevents storage events from being sent to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables storage tracking, storage events will now be delivered to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_dom_storage_items(
|
||||
storage_id: StorageId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Item]]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.getDOMStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Item.from_json(i) for i in json['entries']]
|
||||
|
||||
|
||||
def remove_dom_storage_item(
|
||||
storage_id: StorageId,
|
||||
key: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:param key:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
params['key'] = key
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.removeDOMStorageItem',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dom_storage_item(
|
||||
storage_id: StorageId,
|
||||
key: str,
|
||||
value: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:param key:
|
||||
:param value:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
params['key'] = key
|
||||
params['value'] = value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.setDOMStorageItem',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemAdded')
|
||||
@dataclass
|
||||
class DomStorageItemAdded:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
new_value: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemAdded:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key']),
|
||||
new_value=str(json['newValue'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemRemoved')
|
||||
@dataclass
|
||||
class DomStorageItemRemoved:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemRemoved:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemUpdated')
|
||||
@dataclass
|
||||
class DomStorageItemUpdated:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
old_value: str
|
||||
new_value: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemUpdated:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key']),
|
||||
old_value=str(json['oldValue']),
|
||||
new_value=str(json['newValue'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemsCleared')
|
||||
@dataclass
|
||||
class DomStorageItemsCleared:
|
||||
storage_id: StorageId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemsCleared:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId'])
|
||||
)
|
||||
@@ -0,0 +1,785 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Emulation
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScreenOrientation:
|
||||
'''
|
||||
Screen orientation.
|
||||
'''
|
||||
#: Orientation type.
|
||||
type_: str
|
||||
|
||||
#: Orientation angle.
|
||||
angle: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['angle'] = self.angle
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
angle=int(json['angle']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DisplayFeature:
|
||||
#: Orientation of a display feature in relation to screen
|
||||
orientation: str
|
||||
|
||||
#: The offset from the screen origin in either the x (for vertical
|
||||
#: orientation) or y (for horizontal orientation) direction.
|
||||
offset: int
|
||||
|
||||
#: A display feature may mask content such that it is not physically
|
||||
#: displayed - this length along with the offset describes this area.
|
||||
#: A display feature that only splits content will have a 0 mask_length.
|
||||
mask_length: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['orientation'] = self.orientation
|
||||
json['offset'] = self.offset
|
||||
json['maskLength'] = self.mask_length
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
orientation=str(json['orientation']),
|
||||
offset=int(json['offset']),
|
||||
mask_length=int(json['maskLength']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MediaFeature:
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
class VirtualTimePolicy(enum.Enum):
|
||||
'''
|
||||
advance: If the scheduler runs out of immediate work, the virtual time base may fast forward to
|
||||
allow the next delayed task (if any) to run; pause: The virtual time base may not advance;
|
||||
pauseIfNetworkFetchesPending: The virtual time base may not advance if there are any pending
|
||||
resource fetches.
|
||||
'''
|
||||
ADVANCE = "advance"
|
||||
PAUSE = "pause"
|
||||
PAUSE_IF_NETWORK_FETCHES_PENDING = "pauseIfNetworkFetchesPending"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UserAgentBrandVersion:
|
||||
'''
|
||||
Used to specify User Agent Cient Hints to emulate. See https://wicg.github.io/ua-client-hints
|
||||
'''
|
||||
brand: str
|
||||
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['brand'] = self.brand
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
brand=str(json['brand']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UserAgentMetadata:
|
||||
'''
|
||||
Used to specify User Agent Cient Hints to emulate. See https://wicg.github.io/ua-client-hints
|
||||
Missing optional values will be filled in by the target with what it would normally use.
|
||||
'''
|
||||
platform: str
|
||||
|
||||
platform_version: str
|
||||
|
||||
architecture: str
|
||||
|
||||
model: str
|
||||
|
||||
mobile: bool
|
||||
|
||||
brands: typing.Optional[typing.List[UserAgentBrandVersion]] = None
|
||||
|
||||
full_version_list: typing.Optional[typing.List[UserAgentBrandVersion]] = None
|
||||
|
||||
full_version: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['platform'] = self.platform
|
||||
json['platformVersion'] = self.platform_version
|
||||
json['architecture'] = self.architecture
|
||||
json['model'] = self.model
|
||||
json['mobile'] = self.mobile
|
||||
if self.brands is not None:
|
||||
json['brands'] = [i.to_json() for i in self.brands]
|
||||
if self.full_version_list is not None:
|
||||
json['fullVersionList'] = [i.to_json() for i in self.full_version_list]
|
||||
if self.full_version is not None:
|
||||
json['fullVersion'] = self.full_version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
platform=str(json['platform']),
|
||||
platform_version=str(json['platformVersion']),
|
||||
architecture=str(json['architecture']),
|
||||
model=str(json['model']),
|
||||
mobile=bool(json['mobile']),
|
||||
brands=[UserAgentBrandVersion.from_json(i) for i in json['brands']] if 'brands' in json else None,
|
||||
full_version_list=[UserAgentBrandVersion.from_json(i) for i in json['fullVersionList']] if 'fullVersionList' in json else None,
|
||||
full_version=str(json['fullVersion']) if 'fullVersion' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class DisabledImageType(enum.Enum):
|
||||
'''
|
||||
Enum of image types that can be disabled.
|
||||
'''
|
||||
AVIF = "avif"
|
||||
JXL = "jxl"
|
||||
WEBP = "webp"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def can_emulate() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,bool]:
|
||||
'''
|
||||
Tells whether emulation is supported.
|
||||
|
||||
:returns: True if emulation is supported.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.canEmulate',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return bool(json['result'])
|
||||
|
||||
|
||||
def clear_device_metrics_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden device metrics.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.clearDeviceMetricsOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_geolocation_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden Geolocation Position and Error.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.clearGeolocationOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def reset_page_scale_factor() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Requests that page scale factor is reset to initial values.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.resetPageScaleFactor',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_focus_emulation_enabled(
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables or disables simulating a focused and active page.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: Whether to enable to disable focus emulation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setFocusEmulationEnabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_auto_dark_mode_override(
|
||||
enabled: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Automatically render all web contents using a dark theme.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: *(Optional)* Whether to enable or disable automatic dark mode. If not specified, any existing override will be cleared.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if enabled is not None:
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setAutoDarkModeOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_cpu_throttling_rate(
|
||||
rate: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables CPU throttling to emulate slow CPUs.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param rate: Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['rate'] = rate
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setCPUThrottlingRate',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_default_background_color_override(
|
||||
color: typing.Optional[dom.RGBA] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets or clears an override of the default background color of the frame. This override is used
|
||||
if the content does not specify one.
|
||||
|
||||
:param color: *(Optional)* RGBA of the default background color. If not specified, any existing override will be cleared.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if color is not None:
|
||||
params['color'] = color.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDefaultBackgroundColorOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_device_metrics_override(
|
||||
width: int,
|
||||
height: int,
|
||||
device_scale_factor: float,
|
||||
mobile: bool,
|
||||
scale: typing.Optional[float] = None,
|
||||
screen_width: typing.Optional[int] = None,
|
||||
screen_height: typing.Optional[int] = None,
|
||||
position_x: typing.Optional[int] = None,
|
||||
position_y: typing.Optional[int] = None,
|
||||
dont_set_visible_size: typing.Optional[bool] = None,
|
||||
screen_orientation: typing.Optional[ScreenOrientation] = None,
|
||||
viewport: typing.Optional[page.Viewport] = None,
|
||||
display_feature: typing.Optional[DisplayFeature] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the values of device screen dimensions (window.screen.width, window.screen.height,
|
||||
window.innerWidth, window.innerHeight, and "device-width"/"device-height"-related CSS media
|
||||
query results).
|
||||
|
||||
:param width: Overriding width value in pixels (minimum 0, maximum 10000000). 0 disables the override.
|
||||
:param height: Overriding height value in pixels (minimum 0, maximum 10000000). 0 disables the override.
|
||||
:param device_scale_factor: Overriding device scale factor value. 0 disables the override.
|
||||
:param mobile: Whether to emulate mobile device. This includes viewport meta tag, overlay scrollbars, text autosizing and more.
|
||||
:param scale: **(EXPERIMENTAL)** *(Optional)* Scale to apply to resulting view image.
|
||||
:param screen_width: **(EXPERIMENTAL)** *(Optional)* Overriding screen width value in pixels (minimum 0, maximum 10000000).
|
||||
:param screen_height: **(EXPERIMENTAL)** *(Optional)* Overriding screen height value in pixels (minimum 0, maximum 10000000).
|
||||
:param position_x: **(EXPERIMENTAL)** *(Optional)* Overriding view X position on screen in pixels (minimum 0, maximum 10000000).
|
||||
:param position_y: **(EXPERIMENTAL)** *(Optional)* Overriding view Y position on screen in pixels (minimum 0, maximum 10000000).
|
||||
:param dont_set_visible_size: **(EXPERIMENTAL)** *(Optional)* Do not set visible view size, rely upon explicit setVisibleSize call.
|
||||
:param screen_orientation: *(Optional)* Screen orientation override.
|
||||
:param viewport: **(EXPERIMENTAL)** *(Optional)* If set, the visible area of the page will be overridden to this viewport. This viewport change is not observed by the page, e.g. viewport-relative elements do not change positions.
|
||||
:param display_feature: **(EXPERIMENTAL)** *(Optional)* If set, the display feature of a multi-segment screen. If not set, multi-segment support is turned-off.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['width'] = width
|
||||
params['height'] = height
|
||||
params['deviceScaleFactor'] = device_scale_factor
|
||||
params['mobile'] = mobile
|
||||
if scale is not None:
|
||||
params['scale'] = scale
|
||||
if screen_width is not None:
|
||||
params['screenWidth'] = screen_width
|
||||
if screen_height is not None:
|
||||
params['screenHeight'] = screen_height
|
||||
if position_x is not None:
|
||||
params['positionX'] = position_x
|
||||
if position_y is not None:
|
||||
params['positionY'] = position_y
|
||||
if dont_set_visible_size is not None:
|
||||
params['dontSetVisibleSize'] = dont_set_visible_size
|
||||
if screen_orientation is not None:
|
||||
params['screenOrientation'] = screen_orientation.to_json()
|
||||
if viewport is not None:
|
||||
params['viewport'] = viewport.to_json()
|
||||
if display_feature is not None:
|
||||
params['displayFeature'] = display_feature.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDeviceMetricsOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_scrollbars_hidden(
|
||||
hidden: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param hidden: Whether scrollbars should be always hidden.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['hidden'] = hidden
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setScrollbarsHidden',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_document_cookie_disabled(
|
||||
disabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param disabled: Whether document.coookie API should be disabled.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['disabled'] = disabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDocumentCookieDisabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_emit_touch_events_for_mouse(
|
||||
enabled: bool,
|
||||
configuration: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: Whether touch emulation based on mouse input should be enabled.
|
||||
:param configuration: *(Optional)* Touch/gesture events configuration. Default: current platform.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
if configuration is not None:
|
||||
params['configuration'] = configuration
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setEmitTouchEventsForMouse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_emulated_media(
|
||||
media: typing.Optional[str] = None,
|
||||
features: typing.Optional[typing.List[MediaFeature]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Emulates the given media type or media feature for CSS media queries.
|
||||
|
||||
:param media: *(Optional)* Media type to emulate. Empty string disables the override.
|
||||
:param features: *(Optional)* Media features to emulate.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if media is not None:
|
||||
params['media'] = media
|
||||
if features is not None:
|
||||
params['features'] = [i.to_json() for i in features]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setEmulatedMedia',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_emulated_vision_deficiency(
|
||||
type_: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Emulates the given vision deficiency.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param type_: Vision deficiency to emulate.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setEmulatedVisionDeficiency',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_geolocation_override(
|
||||
latitude: typing.Optional[float] = None,
|
||||
longitude: typing.Optional[float] = None,
|
||||
accuracy: typing.Optional[float] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Geolocation Position or Error. Omitting any of the parameters emulates position
|
||||
unavailable.
|
||||
|
||||
:param latitude: *(Optional)* Mock latitude
|
||||
:param longitude: *(Optional)* Mock longitude
|
||||
:param accuracy: *(Optional)* Mock accuracy
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if latitude is not None:
|
||||
params['latitude'] = latitude
|
||||
if longitude is not None:
|
||||
params['longitude'] = longitude
|
||||
if accuracy is not None:
|
||||
params['accuracy'] = accuracy
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setGeolocationOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_idle_override(
|
||||
is_user_active: bool,
|
||||
is_screen_unlocked: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Idle state.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param is_user_active: Mock isUserActive
|
||||
:param is_screen_unlocked: Mock isScreenUnlocked
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['isUserActive'] = is_user_active
|
||||
params['isScreenUnlocked'] = is_screen_unlocked
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setIdleOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_idle_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears Idle state overrides.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.clearIdleOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_navigator_overrides(
|
||||
platform: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides value returned by the javascript navigator object.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param platform: The platform navigator.platform should return.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['platform'] = platform
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setNavigatorOverrides',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_page_scale_factor(
|
||||
page_scale_factor: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets a specified page scale factor.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param page_scale_factor: Page scale factor.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['pageScaleFactor'] = page_scale_factor
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setPageScaleFactor',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_script_execution_disabled(
|
||||
value: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Switches script execution in the page.
|
||||
|
||||
:param value: Whether script execution should be disabled in the page.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['value'] = value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setScriptExecutionDisabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_touch_emulation_enabled(
|
||||
enabled: bool,
|
||||
max_touch_points: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables touch on platforms which do not support them.
|
||||
|
||||
:param enabled: Whether the touch event emulation should be enabled.
|
||||
:param max_touch_points: *(Optional)* Maximum touch points supported. Defaults to one.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
if max_touch_points is not None:
|
||||
params['maxTouchPoints'] = max_touch_points
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setTouchEmulationEnabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_virtual_time_policy(
|
||||
policy: VirtualTimePolicy,
|
||||
budget: typing.Optional[float] = None,
|
||||
max_virtual_time_task_starvation_count: typing.Optional[int] = None,
|
||||
initial_virtual_time: typing.Optional[network.TimeSinceEpoch] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Turns on virtual time for all frames (replacing real-time with a synthetic time source) and sets
|
||||
the current virtual time policy. Note this supersedes any previous time budget.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param policy:
|
||||
:param budget: *(Optional)* If set, after this many virtual milliseconds have elapsed virtual time will be paused and a virtualTimeBudgetExpired event is sent.
|
||||
:param max_virtual_time_task_starvation_count: *(Optional)* If set this specifies the maximum number of tasks that can be run before virtual is forced forwards to prevent deadlock.
|
||||
:param initial_virtual_time: *(Optional)* If set, base::Time::Now will be overridden to initially return this value.
|
||||
:returns: Absolute timestamp at which virtual time was first enabled (up time in milliseconds).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['policy'] = policy.to_json()
|
||||
if budget is not None:
|
||||
params['budget'] = budget
|
||||
if max_virtual_time_task_starvation_count is not None:
|
||||
params['maxVirtualTimeTaskStarvationCount'] = max_virtual_time_task_starvation_count
|
||||
if initial_virtual_time is not None:
|
||||
params['initialVirtualTime'] = initial_virtual_time.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setVirtualTimePolicy',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['virtualTimeTicksBase'])
|
||||
|
||||
|
||||
def set_locale_override(
|
||||
locale: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides default host system locale with the specified one.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param locale: *(Optional)* ICU style C locale (e.g. "en_US"). If not specified or empty, disables the override and restores default host system locale.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if locale is not None:
|
||||
params['locale'] = locale
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setLocaleOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_timezone_override(
|
||||
timezone_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides default host system timezone with the specified one.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param timezone_id: The timezone identifier. If empty, disables the override and restores default host system timezone.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['timezoneId'] = timezone_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setTimezoneOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_visible_size(
|
||||
width: int,
|
||||
height: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Resizes the frame/viewport of the page. Note that this does not affect the frame's container
|
||||
(e.g. browser window). Can be used to produce screenshots of the specified size. Not supported
|
||||
on Android.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param width: Frame width (DIP).
|
||||
:param height: Frame height (DIP).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['width'] = width
|
||||
params['height'] = height
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setVisibleSize',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_disabled_image_types(
|
||||
image_types: typing.List[DisabledImageType]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param image_types: Image types to disable.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['imageTypes'] = [i.to_json() for i in image_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDisabledImageTypes',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_user_agent_override(
|
||||
user_agent: str,
|
||||
accept_language: typing.Optional[str] = None,
|
||||
platform: typing.Optional[str] = None,
|
||||
user_agent_metadata: typing.Optional[UserAgentMetadata] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Allows overriding user agent with the given string.
|
||||
|
||||
:param user_agent: User agent to use.
|
||||
:param accept_language: *(Optional)* Browser langugage to emulate.
|
||||
:param platform: *(Optional)* The platform navigator.platform should return.
|
||||
:param user_agent_metadata: **(EXPERIMENTAL)** *(Optional)* To be sent in Sec-CH-UA-* headers and returned in navigator.userAgentData
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['userAgent'] = user_agent
|
||||
if accept_language is not None:
|
||||
params['acceptLanguage'] = accept_language
|
||||
if platform is not None:
|
||||
params['platform'] = platform
|
||||
if user_agent_metadata is not None:
|
||||
params['userAgentMetadata'] = user_agent_metadata.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setUserAgentOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Emulation.virtualTimeBudgetExpired')
|
||||
@dataclass
|
||||
class VirtualTimeBudgetExpired:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Notification sent after the virtual time budget for the current VirtualTimePolicy has run out.
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> VirtualTimeBudgetExpired:
|
||||
return cls(
|
||||
|
||||
)
|
||||
@@ -0,0 +1,44 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: EventBreakpoints (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def set_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular native event.
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.setInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular native event.
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.removeInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
@@ -0,0 +1,490 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Fetch
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import io
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
class RequestId(str):
|
||||
'''
|
||||
Unique request identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> RequestId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'RequestId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class RequestStage(enum.Enum):
|
||||
'''
|
||||
Stages of the request to handle. Request will intercept before the request is
|
||||
sent. Response will intercept after the response is received (but before response
|
||||
body is received).
|
||||
'''
|
||||
REQUEST = "Request"
|
||||
RESPONSE = "Response"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RequestPattern:
|
||||
#: Wildcards (``'*'`` -> zero or more, ``'?'`` -> exactly one) are allowed. Escape character is
|
||||
#: backslash. Omitting is equivalent to ``"*"``.
|
||||
url_pattern: typing.Optional[str] = None
|
||||
|
||||
#: If set, only requests for matching resource types will be intercepted.
|
||||
resource_type: typing.Optional[network.ResourceType] = None
|
||||
|
||||
#: Stage at which to begin intercepting requests. Default is Request.
|
||||
request_stage: typing.Optional[RequestStage] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.url_pattern is not None:
|
||||
json['urlPattern'] = self.url_pattern
|
||||
if self.resource_type is not None:
|
||||
json['resourceType'] = self.resource_type.to_json()
|
||||
if self.request_stage is not None:
|
||||
json['requestStage'] = self.request_stage.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
url_pattern=str(json['urlPattern']) if 'urlPattern' in json else None,
|
||||
resource_type=network.ResourceType.from_json(json['resourceType']) if 'resourceType' in json else None,
|
||||
request_stage=RequestStage.from_json(json['requestStage']) if 'requestStage' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class HeaderEntry:
|
||||
'''
|
||||
Response HTTP header entry
|
||||
'''
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AuthChallenge:
|
||||
'''
|
||||
Authorization challenge for HTTP status code 401 or 407.
|
||||
'''
|
||||
#: Origin of the challenger.
|
||||
origin: str
|
||||
|
||||
#: The authentication scheme used, such as basic or digest
|
||||
scheme: str
|
||||
|
||||
#: The realm of the challenge. May be empty.
|
||||
realm: str
|
||||
|
||||
#: Source of the authentication challenge.
|
||||
source: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['origin'] = self.origin
|
||||
json['scheme'] = self.scheme
|
||||
json['realm'] = self.realm
|
||||
if self.source is not None:
|
||||
json['source'] = self.source
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
origin=str(json['origin']),
|
||||
scheme=str(json['scheme']),
|
||||
realm=str(json['realm']),
|
||||
source=str(json['source']) if 'source' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AuthChallengeResponse:
|
||||
'''
|
||||
Response to an AuthChallenge.
|
||||
'''
|
||||
#: The decision on what to do in response to the authorization challenge. Default means
|
||||
#: deferring to the default behavior of the net stack, which will likely either the Cancel
|
||||
#: authentication or display a popup dialog box.
|
||||
response: str
|
||||
|
||||
#: The username to provide, possibly empty. Should only be set if response is
|
||||
#: ProvideCredentials.
|
||||
username: typing.Optional[str] = None
|
||||
|
||||
#: The password to provide, possibly empty. Should only be set if response is
|
||||
#: ProvideCredentials.
|
||||
password: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['response'] = self.response
|
||||
if self.username is not None:
|
||||
json['username'] = self.username
|
||||
if self.password is not None:
|
||||
json['password'] = self.password
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
response=str(json['response']),
|
||||
username=str(json['username']) if 'username' in json else None,
|
||||
password=str(json['password']) if 'password' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the fetch domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable(
|
||||
patterns: typing.Optional[typing.List[RequestPattern]] = None,
|
||||
handle_auth_requests: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables issuing of requestPaused events. A request will be paused until client
|
||||
calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth.
|
||||
|
||||
:param patterns: *(Optional)* If specified, only requests matching any of these patterns will produce fetchRequested event and will be paused until clients response. If not set, all requests will be affected.
|
||||
:param handle_auth_requests: *(Optional)* If true, authRequired events will be issued and requests will be paused expecting a call to continueWithAuth.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if patterns is not None:
|
||||
params['patterns'] = [i.to_json() for i in patterns]
|
||||
if handle_auth_requests is not None:
|
||||
params['handleAuthRequests'] = handle_auth_requests
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def fail_request(
|
||||
request_id: RequestId,
|
||||
error_reason: network.ErrorReason
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Causes the request to fail with specified reason.
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param error_reason: Causes the request to fail with the given reason.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
params['errorReason'] = error_reason.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.failRequest',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def fulfill_request(
|
||||
request_id: RequestId,
|
||||
response_code: int,
|
||||
response_headers: typing.Optional[typing.List[HeaderEntry]] = None,
|
||||
binary_response_headers: typing.Optional[str] = None,
|
||||
body: typing.Optional[str] = None,
|
||||
response_phrase: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Provides response to the request.
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param response_code: An HTTP response code.
|
||||
:param response_headers: *(Optional)* Response headers.
|
||||
:param binary_response_headers: *(Optional)* Alternative way of specifying response headers as a \0-separated series of name: value pairs. Prefer the above method unless you need to represent some non-UTF8 values that can't be transmitted over the protocol as text.
|
||||
:param body: *(Optional)* A response body. If absent, original response body will be used if the request is intercepted at the response stage and empty body will be used if the request is intercepted at the request stage.
|
||||
:param response_phrase: *(Optional)* A textual representation of responseCode. If absent, a standard phrase matching responseCode is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
params['responseCode'] = response_code
|
||||
if response_headers is not None:
|
||||
params['responseHeaders'] = [i.to_json() for i in response_headers]
|
||||
if binary_response_headers is not None:
|
||||
params['binaryResponseHeaders'] = binary_response_headers
|
||||
if body is not None:
|
||||
params['body'] = body
|
||||
if response_phrase is not None:
|
||||
params['responsePhrase'] = response_phrase
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.fulfillRequest',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def continue_request(
|
||||
request_id: RequestId,
|
||||
url: typing.Optional[str] = None,
|
||||
method: typing.Optional[str] = None,
|
||||
post_data: typing.Optional[str] = None,
|
||||
headers: typing.Optional[typing.List[HeaderEntry]] = None,
|
||||
intercept_response: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Continues the request, optionally modifying some of its parameters.
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param url: *(Optional)* If set, the request url will be modified in a way that's not observable by page.
|
||||
:param method: *(Optional)* If set, the request method is overridden.
|
||||
:param post_data: *(Optional)* If set, overrides the post data in the request.
|
||||
:param headers: *(Optional)* If set, overrides the request headers.
|
||||
:param intercept_response: **(EXPERIMENTAL)** *(Optional)* If set, overrides response interception behavior for this request.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
if url is not None:
|
||||
params['url'] = url
|
||||
if method is not None:
|
||||
params['method'] = method
|
||||
if post_data is not None:
|
||||
params['postData'] = post_data
|
||||
if headers is not None:
|
||||
params['headers'] = [i.to_json() for i in headers]
|
||||
if intercept_response is not None:
|
||||
params['interceptResponse'] = intercept_response
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.continueRequest',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def continue_with_auth(
|
||||
request_id: RequestId,
|
||||
auth_challenge_response: AuthChallengeResponse
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Continues a request supplying authChallengeResponse following authRequired event.
|
||||
|
||||
:param request_id: An id the client received in authRequired event.
|
||||
:param auth_challenge_response: Response to with an authChallenge.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
params['authChallengeResponse'] = auth_challenge_response.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.continueWithAuth',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def continue_response(
|
||||
request_id: RequestId,
|
||||
response_code: typing.Optional[int] = None,
|
||||
response_phrase: typing.Optional[str] = None,
|
||||
response_headers: typing.Optional[typing.List[HeaderEntry]] = None,
|
||||
binary_response_headers: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Continues loading of the paused response, optionally modifying the
|
||||
response headers. If either responseCode or headers are modified, all of them
|
||||
must be present.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param response_code: *(Optional)* An HTTP response code. If absent, original response code will be used.
|
||||
:param response_phrase: *(Optional)* A textual representation of responseCode. If absent, a standard phrase matching responseCode is used.
|
||||
:param response_headers: *(Optional)* Response headers. If absent, original response headers will be used.
|
||||
:param binary_response_headers: *(Optional)* Alternative way of specifying response headers as a \0-separated series of name: value pairs. Prefer the above method unless you need to represent some non-UTF8 values that can't be transmitted over the protocol as text.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
if response_code is not None:
|
||||
params['responseCode'] = response_code
|
||||
if response_phrase is not None:
|
||||
params['responsePhrase'] = response_phrase
|
||||
if response_headers is not None:
|
||||
params['responseHeaders'] = [i.to_json() for i in response_headers]
|
||||
if binary_response_headers is not None:
|
||||
params['binaryResponseHeaders'] = binary_response_headers
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.continueResponse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_response_body(
|
||||
request_id: RequestId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, bool]]:
|
||||
'''
|
||||
Causes the body of the response to be received from the server and
|
||||
returned as a single string. May only be issued for a request that
|
||||
is paused in the Response stage and is mutually exclusive with
|
||||
takeResponseBodyForInterceptionAsStream. Calling other methods that
|
||||
affect the request or disabling fetch domain before body is received
|
||||
results in an undefined behavior.
|
||||
|
||||
:param request_id: Identifier for the intercepted request to get body for.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **body** - Response body.
|
||||
1. **base64Encoded** - True, if content was sent as base64.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.getResponseBody',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['body']),
|
||||
bool(json['base64Encoded'])
|
||||
)
|
||||
|
||||
|
||||
def take_response_body_as_stream(
|
||||
request_id: RequestId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,io.StreamHandle]:
|
||||
'''
|
||||
Returns a handle to the stream representing the response body.
|
||||
The request must be paused in the HeadersReceived stage.
|
||||
Note that after this command the request can't be continued
|
||||
as is -- client either needs to cancel it or to provide the
|
||||
response body.
|
||||
The stream only supports sequential read, IO.read will fail if the position
|
||||
is specified.
|
||||
This method is mutually exclusive with getResponseBody.
|
||||
Calling other methods that affect the request or disabling fetch
|
||||
domain before body is received results in an undefined behavior.
|
||||
|
||||
:param request_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.takeResponseBodyAsStream',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return io.StreamHandle.from_json(json['stream'])
|
||||
|
||||
|
||||
@event_class('Fetch.requestPaused')
|
||||
@dataclass
|
||||
class RequestPaused:
|
||||
'''
|
||||
Issued when the domain is enabled and the request URL matches the
|
||||
specified filter. The request is paused until the client responds
|
||||
with one of continueRequest, failRequest or fulfillRequest.
|
||||
The stage of the request can be determined by presence of responseErrorReason
|
||||
and responseStatusCode -- the request is at the response stage if either
|
||||
of these fields is present and in the request stage otherwise.
|
||||
'''
|
||||
#: Each request the page makes will have a unique id.
|
||||
request_id: RequestId
|
||||
#: The details of the request.
|
||||
request: network.Request
|
||||
#: The id of the frame that initiated the request.
|
||||
frame_id: page.FrameId
|
||||
#: How the requested resource will be used.
|
||||
resource_type: network.ResourceType
|
||||
#: Response error if intercepted at response stage.
|
||||
response_error_reason: typing.Optional[network.ErrorReason]
|
||||
#: Response code if intercepted at response stage.
|
||||
response_status_code: typing.Optional[int]
|
||||
#: Response status text if intercepted at response stage.
|
||||
response_status_text: typing.Optional[str]
|
||||
#: Response headers if intercepted at the response stage.
|
||||
response_headers: typing.Optional[typing.List[HeaderEntry]]
|
||||
#: If the intercepted request had a corresponding Network.requestWillBeSent event fired for it,
|
||||
#: then this networkId will be the same as the requestId present in the requestWillBeSent event.
|
||||
network_id: typing.Optional[RequestId]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RequestPaused:
|
||||
return cls(
|
||||
request_id=RequestId.from_json(json['requestId']),
|
||||
request=network.Request.from_json(json['request']),
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
resource_type=network.ResourceType.from_json(json['resourceType']),
|
||||
response_error_reason=network.ErrorReason.from_json(json['responseErrorReason']) if 'responseErrorReason' in json else None,
|
||||
response_status_code=int(json['responseStatusCode']) if 'responseStatusCode' in json else None,
|
||||
response_status_text=str(json['responseStatusText']) if 'responseStatusText' in json else None,
|
||||
response_headers=[HeaderEntry.from_json(i) for i in json['responseHeaders']] if 'responseHeaders' in json else None,
|
||||
network_id=RequestId.from_json(json['networkId']) if 'networkId' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Fetch.authRequired')
|
||||
@dataclass
|
||||
class AuthRequired:
|
||||
'''
|
||||
Issued when the domain is enabled with handleAuthRequests set to true.
|
||||
The request is paused until client responds with continueWithAuth.
|
||||
'''
|
||||
#: Each request the page makes will have a unique id.
|
||||
request_id: RequestId
|
||||
#: The details of the request.
|
||||
request: network.Request
|
||||
#: The id of the frame that initiated the request.
|
||||
frame_id: page.FrameId
|
||||
#: How the requested resource will be used.
|
||||
resource_type: network.ResourceType
|
||||
#: Details of the Authorization Challenge encountered.
|
||||
#: If this is set, client should respond with continueRequest that
|
||||
#: contains AuthChallengeResponse.
|
||||
auth_challenge: AuthChallenge
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AuthRequired:
|
||||
return cls(
|
||||
request_id=RequestId.from_json(json['requestId']),
|
||||
request=network.Request.from_json(json['request']),
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
resource_type=network.ResourceType.from_json(json['resourceType']),
|
||||
auth_challenge=AuthChallenge.from_json(json['authChallenge'])
|
||||
)
|
||||
@@ -0,0 +1,116 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: HeadlessExperimental (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class ScreenshotParams:
|
||||
'''
|
||||
Encoding options for a screenshot.
|
||||
'''
|
||||
#: Image compression format (defaults to png).
|
||||
format_: typing.Optional[str] = None
|
||||
|
||||
#: Compression quality from range [0..100] (jpeg only).
|
||||
quality: typing.Optional[int] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.format_ is not None:
|
||||
json['format'] = self.format_
|
||||
if self.quality is not None:
|
||||
json['quality'] = self.quality
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
format_=str(json['format']) if 'format' in json else None,
|
||||
quality=int(json['quality']) if 'quality' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def begin_frame(
|
||||
frame_time_ticks: typing.Optional[float] = None,
|
||||
interval: typing.Optional[float] = None,
|
||||
no_display_updates: typing.Optional[bool] = None,
|
||||
screenshot: typing.Optional[ScreenshotParams] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[bool, typing.Optional[str]]]:
|
||||
'''
|
||||
Sends a BeginFrame to the target and returns when the frame was completed. Optionally captures a
|
||||
screenshot from the resulting frame. Requires that the target was created with enabled
|
||||
BeginFrameControl. Designed for use with --run-all-compositor-stages-before-draw, see also
|
||||
https://goo.gl/3zHXhB for more background.
|
||||
|
||||
:param frame_time_ticks: *(Optional)* Timestamp of this BeginFrame in Renderer TimeTicks (milliseconds of uptime). If not set, the current time will be used.
|
||||
:param interval: *(Optional)* The interval between BeginFrames that is reported to the compositor, in milliseconds. Defaults to a 60 frames/second interval, i.e. about 16.666 milliseconds.
|
||||
:param no_display_updates: *(Optional)* Whether updates should not be committed and drawn onto the display. False by default. If true, only side effects of the BeginFrame will be run, such as layout and animations, but any visual updates may not be visible on the display or in screenshots.
|
||||
:param screenshot: *(Optional)* If set, a screenshot of the frame will be captured and returned in the response. Otherwise, no screenshot will be captured. Note that capturing a screenshot can fail, for example, during renderer initialization. In such a case, no screenshot data will be returned.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **hasDamage** - Whether the BeginFrame resulted in damage and, thus, a new frame was committed to the display. Reported for diagnostic uses, may be removed in the future.
|
||||
1. **screenshotData** - *(Optional)* Base64-encoded image data of the screenshot, if one was requested and successfully taken.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if frame_time_ticks is not None:
|
||||
params['frameTimeTicks'] = frame_time_ticks
|
||||
if interval is not None:
|
||||
params['interval'] = interval
|
||||
if no_display_updates is not None:
|
||||
params['noDisplayUpdates'] = no_display_updates
|
||||
if screenshot is not None:
|
||||
params['screenshot'] = screenshot.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeadlessExperimental.beginFrame',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
bool(json['hasDamage']),
|
||||
str(json['screenshotData']) if 'screenshotData' in json else None
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables headless events for the target.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeadlessExperimental.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables headless events for the target.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeadlessExperimental.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('HeadlessExperimental.needsBeginFramesChanged')
|
||||
@dataclass
|
||||
class NeedsBeginFramesChanged:
|
||||
'''
|
||||
Issued when the target starts or stops needing BeginFrames.
|
||||
Deprecated. Issue beginFrame unconditionally instead and use result from
|
||||
beginFrame to detect whether the frames were suppressed.
|
||||
'''
|
||||
#: True if BeginFrames are needed, false otherwise.
|
||||
needs_begin_frames: bool
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NeedsBeginFramesChanged:
|
||||
return cls(
|
||||
needs_begin_frames=bool(json['needsBeginFrames'])
|
||||
)
|
||||
@@ -0,0 +1,379 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: HeapProfiler (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import runtime
|
||||
|
||||
|
||||
class HeapSnapshotObjectId(str):
|
||||
'''
|
||||
Heap snapshot object id.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> HeapSnapshotObjectId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'HeapSnapshotObjectId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingHeapProfileNode:
|
||||
'''
|
||||
Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.
|
||||
'''
|
||||
#: Function location.
|
||||
call_frame: runtime.CallFrame
|
||||
|
||||
#: Allocations size in bytes for the node excluding children.
|
||||
self_size: float
|
||||
|
||||
#: Node id. Ids are unique across all profiles collected between startSampling and stopSampling.
|
||||
id_: int
|
||||
|
||||
#: Child nodes.
|
||||
children: typing.List[SamplingHeapProfileNode]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['callFrame'] = self.call_frame.to_json()
|
||||
json['selfSize'] = self.self_size
|
||||
json['id'] = self.id_
|
||||
json['children'] = [i.to_json() for i in self.children]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
call_frame=runtime.CallFrame.from_json(json['callFrame']),
|
||||
self_size=float(json['selfSize']),
|
||||
id_=int(json['id']),
|
||||
children=[SamplingHeapProfileNode.from_json(i) for i in json['children']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingHeapProfileSample:
|
||||
'''
|
||||
A single sample from a sampling profile.
|
||||
'''
|
||||
#: Allocation size in bytes attributed to the sample.
|
||||
size: float
|
||||
|
||||
#: Id of the corresponding profile tree node.
|
||||
node_id: int
|
||||
|
||||
#: Time-ordered sample ordinal number. It is unique across all profiles retrieved
|
||||
#: between startSampling and stopSampling.
|
||||
ordinal: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['size'] = self.size
|
||||
json['nodeId'] = self.node_id
|
||||
json['ordinal'] = self.ordinal
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
size=float(json['size']),
|
||||
node_id=int(json['nodeId']),
|
||||
ordinal=float(json['ordinal']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingHeapProfile:
|
||||
'''
|
||||
Sampling profile.
|
||||
'''
|
||||
head: SamplingHeapProfileNode
|
||||
|
||||
samples: typing.List[SamplingHeapProfileSample]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['head'] = self.head.to_json()
|
||||
json['samples'] = [i.to_json() for i in self.samples]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
head=SamplingHeapProfileNode.from_json(json['head']),
|
||||
samples=[SamplingHeapProfileSample.from_json(i) for i in json['samples']],
|
||||
)
|
||||
|
||||
|
||||
def add_inspected_heap_object(
|
||||
heap_object_id: HeapSnapshotObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables console to refer to the node with given id via $x (see Command Line API for more details
|
||||
$x functions).
|
||||
|
||||
:param heap_object_id: Heap snapshot object id to be accessible by means of $x command line API.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['heapObjectId'] = heap_object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.addInspectedHeapObject',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def collect_garbage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.collectGarbage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_heap_object_id(
|
||||
object_id: runtime.RemoteObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,HeapSnapshotObjectId]:
|
||||
'''
|
||||
:param object_id: Identifier of the object to get heap object id for.
|
||||
:returns: Id of the heap snapshot object corresponding to the passed remote object id.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.getHeapObjectId',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return HeapSnapshotObjectId.from_json(json['heapSnapshotObjectId'])
|
||||
|
||||
|
||||
def get_object_by_heap_object_id(
|
||||
object_id: HeapSnapshotObjectId,
|
||||
object_group: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,runtime.RemoteObject]:
|
||||
'''
|
||||
:param object_id:
|
||||
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects.
|
||||
:returns: Evaluation result.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
if object_group is not None:
|
||||
params['objectGroup'] = object_group
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.getObjectByHeapObjectId',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return runtime.RemoteObject.from_json(json['result'])
|
||||
|
||||
|
||||
def get_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingHeapProfile]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: Return the sampling profile being collected.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.getSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingHeapProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def start_sampling(
|
||||
sampling_interval: typing.Optional[float] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param sampling_interval: *(Optional)* Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if sampling_interval is not None:
|
||||
params['samplingInterval'] = sampling_interval
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.startSampling',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_tracking_heap_objects(
|
||||
track_allocations: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param track_allocations: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if track_allocations is not None:
|
||||
params['trackAllocations'] = track_allocations
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.startTrackingHeapObjects',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_sampling() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingHeapProfile]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: Recorded sampling heap profile.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.stopSampling',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingHeapProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def stop_tracking_heap_objects(
|
||||
report_progress: typing.Optional[bool] = None,
|
||||
treat_global_objects_as_roots: typing.Optional[bool] = None,
|
||||
capture_numeric_value: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param report_progress: *(Optional)* If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped.
|
||||
:param treat_global_objects_as_roots: *(Optional)*
|
||||
:param capture_numeric_value: *(Optional)* If true, numerical values are included in the snapshot
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if report_progress is not None:
|
||||
params['reportProgress'] = report_progress
|
||||
if treat_global_objects_as_roots is not None:
|
||||
params['treatGlobalObjectsAsRoots'] = treat_global_objects_as_roots
|
||||
if capture_numeric_value is not None:
|
||||
params['captureNumericValue'] = capture_numeric_value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.stopTrackingHeapObjects',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def take_heap_snapshot(
|
||||
report_progress: typing.Optional[bool] = None,
|
||||
treat_global_objects_as_roots: typing.Optional[bool] = None,
|
||||
capture_numeric_value: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param report_progress: *(Optional)* If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.
|
||||
:param treat_global_objects_as_roots: *(Optional)* If true, a raw snapshot without artificial roots will be generated
|
||||
:param capture_numeric_value: *(Optional)* If true, numerical values are included in the snapshot
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if report_progress is not None:
|
||||
params['reportProgress'] = report_progress
|
||||
if treat_global_objects_as_roots is not None:
|
||||
params['treatGlobalObjectsAsRoots'] = treat_global_objects_as_roots
|
||||
if capture_numeric_value is not None:
|
||||
params['captureNumericValue'] = capture_numeric_value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.takeHeapSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('HeapProfiler.addHeapSnapshotChunk')
|
||||
@dataclass
|
||||
class AddHeapSnapshotChunk:
|
||||
chunk: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AddHeapSnapshotChunk:
|
||||
return cls(
|
||||
chunk=str(json['chunk'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.heapStatsUpdate')
|
||||
@dataclass
|
||||
class HeapStatsUpdate:
|
||||
'''
|
||||
If heap objects tracking has been started then backend may send update for one or more fragments
|
||||
'''
|
||||
#: An array of triplets. Each triplet describes a fragment. The first integer is the fragment
|
||||
#: index, the second integer is a total count of objects for the fragment, the third integer is
|
||||
#: a total size of the objects for the fragment.
|
||||
stats_update: typing.List[int]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> HeapStatsUpdate:
|
||||
return cls(
|
||||
stats_update=[int(i) for i in json['statsUpdate']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.lastSeenObjectId')
|
||||
@dataclass
|
||||
class LastSeenObjectId:
|
||||
'''
|
||||
If heap objects tracking has been started then backend regularly sends a current value for last
|
||||
seen object id and corresponding timestamp. If the were changes in the heap since last event
|
||||
then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.
|
||||
'''
|
||||
last_seen_object_id: int
|
||||
timestamp: float
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LastSeenObjectId:
|
||||
return cls(
|
||||
last_seen_object_id=int(json['lastSeenObjectId']),
|
||||
timestamp=float(json['timestamp'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.reportHeapSnapshotProgress')
|
||||
@dataclass
|
||||
class ReportHeapSnapshotProgress:
|
||||
done: int
|
||||
total: int
|
||||
finished: typing.Optional[bool]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ReportHeapSnapshotProgress:
|
||||
return cls(
|
||||
done=int(json['done']),
|
||||
total=int(json['total']),
|
||||
finished=bool(json['finished']) if 'finished' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.resetProfiles')
|
||||
@dataclass
|
||||
class ResetProfiles:
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ResetProfiles:
|
||||
return cls(
|
||||
|
||||
)
|
||||
@@ -0,0 +1,461 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: IndexedDB (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatabaseWithObjectStores:
|
||||
'''
|
||||
Database with an array of object stores.
|
||||
'''
|
||||
#: Database name.
|
||||
name: str
|
||||
|
||||
#: Database version (type is not 'integer', as the standard
|
||||
#: requires the version number to be 'unsigned long long')
|
||||
version: float
|
||||
|
||||
#: Object stores in this database.
|
||||
object_stores: typing.List[ObjectStore]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
json['objectStores'] = [i.to_json() for i in self.object_stores]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
version=float(json['version']),
|
||||
object_stores=[ObjectStore.from_json(i) for i in json['objectStores']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectStore:
|
||||
'''
|
||||
Object store.
|
||||
'''
|
||||
#: Object store name.
|
||||
name: str
|
||||
|
||||
#: Object store key path.
|
||||
key_path: KeyPath
|
||||
|
||||
#: If true, object store has auto increment flag set.
|
||||
auto_increment: bool
|
||||
|
||||
#: Indexes in this object store.
|
||||
indexes: typing.List[ObjectStoreIndex]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['keyPath'] = self.key_path.to_json()
|
||||
json['autoIncrement'] = self.auto_increment
|
||||
json['indexes'] = [i.to_json() for i in self.indexes]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
key_path=KeyPath.from_json(json['keyPath']),
|
||||
auto_increment=bool(json['autoIncrement']),
|
||||
indexes=[ObjectStoreIndex.from_json(i) for i in json['indexes']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectStoreIndex:
|
||||
'''
|
||||
Object store index.
|
||||
'''
|
||||
#: Index name.
|
||||
name: str
|
||||
|
||||
#: Index key path.
|
||||
key_path: KeyPath
|
||||
|
||||
#: If true, index is unique.
|
||||
unique: bool
|
||||
|
||||
#: If true, index allows multiple entries for a key.
|
||||
multi_entry: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['keyPath'] = self.key_path.to_json()
|
||||
json['unique'] = self.unique
|
||||
json['multiEntry'] = self.multi_entry
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
key_path=KeyPath.from_json(json['keyPath']),
|
||||
unique=bool(json['unique']),
|
||||
multi_entry=bool(json['multiEntry']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Key:
|
||||
'''
|
||||
Key.
|
||||
'''
|
||||
#: Key type.
|
||||
type_: str
|
||||
|
||||
#: Number value.
|
||||
number: typing.Optional[float] = None
|
||||
|
||||
#: String value.
|
||||
string: typing.Optional[str] = None
|
||||
|
||||
#: Date value.
|
||||
date: typing.Optional[float] = None
|
||||
|
||||
#: Array value.
|
||||
array: typing.Optional[typing.List[Key]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
if self.number is not None:
|
||||
json['number'] = self.number
|
||||
if self.string is not None:
|
||||
json['string'] = self.string
|
||||
if self.date is not None:
|
||||
json['date'] = self.date
|
||||
if self.array is not None:
|
||||
json['array'] = [i.to_json() for i in self.array]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
number=float(json['number']) if 'number' in json else None,
|
||||
string=str(json['string']) if 'string' in json else None,
|
||||
date=float(json['date']) if 'date' in json else None,
|
||||
array=[Key.from_json(i) for i in json['array']] if 'array' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyRange:
|
||||
'''
|
||||
Key range.
|
||||
'''
|
||||
#: If true lower bound is open.
|
||||
lower_open: bool
|
||||
|
||||
#: If true upper bound is open.
|
||||
upper_open: bool
|
||||
|
||||
#: Lower bound.
|
||||
lower: typing.Optional[Key] = None
|
||||
|
||||
#: Upper bound.
|
||||
upper: typing.Optional[Key] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['lowerOpen'] = self.lower_open
|
||||
json['upperOpen'] = self.upper_open
|
||||
if self.lower is not None:
|
||||
json['lower'] = self.lower.to_json()
|
||||
if self.upper is not None:
|
||||
json['upper'] = self.upper.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
lower_open=bool(json['lowerOpen']),
|
||||
upper_open=bool(json['upperOpen']),
|
||||
lower=Key.from_json(json['lower']) if 'lower' in json else None,
|
||||
upper=Key.from_json(json['upper']) if 'upper' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataEntry:
|
||||
'''
|
||||
Data entry.
|
||||
'''
|
||||
#: Key object.
|
||||
key: runtime.RemoteObject
|
||||
|
||||
#: Primary key object.
|
||||
primary_key: runtime.RemoteObject
|
||||
|
||||
#: Value object.
|
||||
value: runtime.RemoteObject
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key.to_json()
|
||||
json['primaryKey'] = self.primary_key.to_json()
|
||||
json['value'] = self.value.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=runtime.RemoteObject.from_json(json['key']),
|
||||
primary_key=runtime.RemoteObject.from_json(json['primaryKey']),
|
||||
value=runtime.RemoteObject.from_json(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyPath:
|
||||
'''
|
||||
Key path.
|
||||
'''
|
||||
#: Key path type.
|
||||
type_: str
|
||||
|
||||
#: String value.
|
||||
string: typing.Optional[str] = None
|
||||
|
||||
#: Array value.
|
||||
array: typing.Optional[typing.List[str]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
if self.string is not None:
|
||||
json['string'] = self.string
|
||||
if self.array is not None:
|
||||
json['array'] = [i for i in self.array]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
string=str(json['string']) if 'string' in json else None,
|
||||
array=[str(i) for i in json['array']] if 'array' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def clear_object_store(
|
||||
security_origin: str,
|
||||
database_name: str,
|
||||
object_store_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all entries from an object store.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:param database_name: Database name.
|
||||
:param object_store_name: Object store name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.clearObjectStore',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_database(
|
||||
security_origin: str,
|
||||
database_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a database.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:param database_name: Database name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
params['databaseName'] = database_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.deleteDatabase',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_object_store_entries(
|
||||
security_origin: str,
|
||||
database_name: str,
|
||||
object_store_name: str,
|
||||
key_range: KeyRange
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Delete a range of entries from an object store
|
||||
|
||||
:param security_origin:
|
||||
:param database_name:
|
||||
:param object_store_name:
|
||||
:param key_range: Range of entry keys to delete
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
params['keyRange'] = key_range.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.deleteObjectStoreEntries',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables events from backend.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables events from backend.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_data(
|
||||
security_origin: str,
|
||||
database_name: str,
|
||||
object_store_name: str,
|
||||
index_name: str,
|
||||
skip_count: int,
|
||||
page_size: int,
|
||||
key_range: typing.Optional[KeyRange] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DataEntry], bool]]:
|
||||
'''
|
||||
Requests data from object store or index.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:param database_name: Database name.
|
||||
:param object_store_name: Object store name.
|
||||
:param index_name: Index name, empty string for object store data requests.
|
||||
:param skip_count: Number of records to skip.
|
||||
:param page_size: Number of records to fetch.
|
||||
:param key_range: *(Optional)* Key range.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **objectStoreDataEntries** - Array of object store data entries.
|
||||
1. **hasMore** - If true, there are more entries to fetch in the given range.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
params['indexName'] = index_name
|
||||
params['skipCount'] = skip_count
|
||||
params['pageSize'] = page_size
|
||||
if key_range is not None:
|
||||
params['keyRange'] = key_range.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.requestData',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DataEntry.from_json(i) for i in json['objectStoreDataEntries']],
|
||||
bool(json['hasMore'])
|
||||
)
|
||||
|
||||
|
||||
def get_metadata(
|
||||
security_origin: str,
|
||||
database_name: str,
|
||||
object_store_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[float, float]]:
|
||||
'''
|
||||
Gets metadata of an object store
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:param database_name: Database name.
|
||||
:param object_store_name: Object store name.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **entriesCount** - the entries count
|
||||
1. **keyGeneratorValue** - the current value of key generator, to become the next inserted key into the object store. Valid if objectStore.autoIncrement is true.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.getMetadata',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
float(json['entriesCount']),
|
||||
float(json['keyGeneratorValue'])
|
||||
)
|
||||
|
||||
|
||||
def request_database(
|
||||
security_origin: str,
|
||||
database_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,DatabaseWithObjectStores]:
|
||||
'''
|
||||
Requests database with given name in given frame.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:param database_name: Database name.
|
||||
:returns: Database with an array of object stores.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
params['databaseName'] = database_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.requestDatabase',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return DatabaseWithObjectStores.from_json(json['databaseWithObjectStores'])
|
||||
|
||||
|
||||
def request_database_names(
|
||||
security_origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Requests database names for given security origin.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:returns: Database names for origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.requestDatabaseNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['databaseNames']]
|
||||
@@ -0,0 +1,689 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Input
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class TouchPoint:
|
||||
#: X coordinate of the event relative to the main frame's viewport in CSS pixels.
|
||||
x: float
|
||||
|
||||
#: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to
|
||||
#: the top of the viewport and Y increases as it proceeds towards the bottom of the viewport.
|
||||
y: float
|
||||
|
||||
#: X radius of the touch area (default: 1.0).
|
||||
radius_x: typing.Optional[float] = None
|
||||
|
||||
#: Y radius of the touch area (default: 1.0).
|
||||
radius_y: typing.Optional[float] = None
|
||||
|
||||
#: Rotation angle (default: 0.0).
|
||||
rotation_angle: typing.Optional[float] = None
|
||||
|
||||
#: Force (default: 1.0).
|
||||
force: typing.Optional[float] = None
|
||||
|
||||
#: The normalized tangential pressure, which has a range of [-1,1] (default: 0).
|
||||
tangential_pressure: typing.Optional[float] = None
|
||||
|
||||
#: The plane angle between the Y-Z plane and the plane containing both the stylus axis and the Y axis, in degrees of the range [-90,90], a positive tiltX is to the right (default: 0)
|
||||
tilt_x: typing.Optional[int] = None
|
||||
|
||||
#: The plane angle between the X-Z plane and the plane containing both the stylus axis and the X axis, in degrees of the range [-90,90], a positive tiltY is towards the user (default: 0).
|
||||
tilt_y: typing.Optional[int] = None
|
||||
|
||||
#: The clockwise rotation of a pen stylus around its own major axis, in degrees in the range [0,359] (default: 0).
|
||||
twist: typing.Optional[int] = None
|
||||
|
||||
#: Identifier used to track touch sources between events, must be unique within an event.
|
||||
id_: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['x'] = self.x
|
||||
json['y'] = self.y
|
||||
if self.radius_x is not None:
|
||||
json['radiusX'] = self.radius_x
|
||||
if self.radius_y is not None:
|
||||
json['radiusY'] = self.radius_y
|
||||
if self.rotation_angle is not None:
|
||||
json['rotationAngle'] = self.rotation_angle
|
||||
if self.force is not None:
|
||||
json['force'] = self.force
|
||||
if self.tangential_pressure is not None:
|
||||
json['tangentialPressure'] = self.tangential_pressure
|
||||
if self.tilt_x is not None:
|
||||
json['tiltX'] = self.tilt_x
|
||||
if self.tilt_y is not None:
|
||||
json['tiltY'] = self.tilt_y
|
||||
if self.twist is not None:
|
||||
json['twist'] = self.twist
|
||||
if self.id_ is not None:
|
||||
json['id'] = self.id_
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
x=float(json['x']),
|
||||
y=float(json['y']),
|
||||
radius_x=float(json['radiusX']) if 'radiusX' in json else None,
|
||||
radius_y=float(json['radiusY']) if 'radiusY' in json else None,
|
||||
rotation_angle=float(json['rotationAngle']) if 'rotationAngle' in json else None,
|
||||
force=float(json['force']) if 'force' in json else None,
|
||||
tangential_pressure=float(json['tangentialPressure']) if 'tangentialPressure' in json else None,
|
||||
tilt_x=int(json['tiltX']) if 'tiltX' in json else None,
|
||||
tilt_y=int(json['tiltY']) if 'tiltY' in json else None,
|
||||
twist=int(json['twist']) if 'twist' in json else None,
|
||||
id_=float(json['id']) if 'id' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class GestureSourceType(enum.Enum):
|
||||
DEFAULT = "default"
|
||||
TOUCH = "touch"
|
||||
MOUSE = "mouse"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class MouseButton(enum.Enum):
|
||||
NONE = "none"
|
||||
LEFT = "left"
|
||||
MIDDLE = "middle"
|
||||
RIGHT = "right"
|
||||
BACK = "back"
|
||||
FORWARD = "forward"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class TimeSinceEpoch(float):
|
||||
'''
|
||||
UTC time in seconds, counted from January 1, 1970.
|
||||
'''
|
||||
def to_json(self) -> float:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: float) -> TimeSinceEpoch:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TimeSinceEpoch({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class DragDataItem:
|
||||
#: Mime type of the dragged data.
|
||||
mime_type: str
|
||||
|
||||
#: Depending of the value of ``mimeType``, it contains the dragged link,
|
||||
#: text, HTML markup or any other data.
|
||||
data: str
|
||||
|
||||
#: Title associated with a link. Only valid when ``mimeType`` == "text/uri-list".
|
||||
title: typing.Optional[str] = None
|
||||
|
||||
#: Stores the base URL for the contained markup. Only valid when ``mimeType``
|
||||
#: == "text/html".
|
||||
base_url: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['mimeType'] = self.mime_type
|
||||
json['data'] = self.data
|
||||
if self.title is not None:
|
||||
json['title'] = self.title
|
||||
if self.base_url is not None:
|
||||
json['baseURL'] = self.base_url
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
mime_type=str(json['mimeType']),
|
||||
data=str(json['data']),
|
||||
title=str(json['title']) if 'title' in json else None,
|
||||
base_url=str(json['baseURL']) if 'baseURL' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DragData:
|
||||
items: typing.List[DragDataItem]
|
||||
|
||||
#: Bit field representing allowed drag operations. Copy = 1, Link = 2, Move = 16
|
||||
drag_operations_mask: int
|
||||
|
||||
#: List of filenames that should be included when dropping
|
||||
files: typing.Optional[typing.List[str]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['items'] = [i.to_json() for i in self.items]
|
||||
json['dragOperationsMask'] = self.drag_operations_mask
|
||||
if self.files is not None:
|
||||
json['files'] = [i for i in self.files]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
items=[DragDataItem.from_json(i) for i in json['items']],
|
||||
drag_operations_mask=int(json['dragOperationsMask']),
|
||||
files=[str(i) for i in json['files']] if 'files' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def dispatch_drag_event(
|
||||
type_: str,
|
||||
x: float,
|
||||
y: float,
|
||||
data: DragData,
|
||||
modifiers: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a drag event into the page.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param type_: Type of the drag event.
|
||||
:param x: X coordinate of the event relative to the main frame's viewport in CSS pixels.
|
||||
:param y: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to the top of the viewport and Y increases as it proceeds towards the bottom of the viewport.
|
||||
:param data:
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
params['data'] = data.to_json()
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchDragEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_key_event(
|
||||
type_: str,
|
||||
modifiers: typing.Optional[int] = None,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None,
|
||||
text: typing.Optional[str] = None,
|
||||
unmodified_text: typing.Optional[str] = None,
|
||||
key_identifier: typing.Optional[str] = None,
|
||||
code: typing.Optional[str] = None,
|
||||
key: typing.Optional[str] = None,
|
||||
windows_virtual_key_code: typing.Optional[int] = None,
|
||||
native_virtual_key_code: typing.Optional[int] = None,
|
||||
auto_repeat: typing.Optional[bool] = None,
|
||||
is_keypad: typing.Optional[bool] = None,
|
||||
is_system_key: typing.Optional[bool] = None,
|
||||
location: typing.Optional[int] = None,
|
||||
commands: typing.Optional[typing.List[str]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a key event to the page.
|
||||
|
||||
:param type_: Type of the key event.
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param timestamp: *(Optional)* Time at which the event occurred.
|
||||
:param text: *(Optional)* Text as generated by processing a virtual key code with a keyboard layout. Not needed for for ```keyUp```` and ````rawKeyDown```` events (default: "")
|
||||
:param unmodified_text: *(Optional)* Text that would have been generated by the keyboard if no modifiers were pressed (except for shift). Useful for shortcut (accelerator) key handling (default: "").
|
||||
:param key_identifier: *(Optional)* Unique key identifier (e.g., 'U+0041') (default: "").
|
||||
:param code: *(Optional)* Unique DOM defined string value for each physical key (e.g., 'KeyA') (default: "").
|
||||
:param key: *(Optional)* Unique DOM defined string value describing the meaning of the key in the context of active modifiers, keyboard layout, etc (e.g., 'AltGr') (default: "").
|
||||
:param windows_virtual_key_code: *(Optional)* Windows virtual key code (default: 0).
|
||||
:param native_virtual_key_code: *(Optional)* Native virtual key code (default: 0).
|
||||
:param auto_repeat: *(Optional)* Whether the event was generated from auto repeat (default: false).
|
||||
:param is_keypad: *(Optional)* Whether the event was generated from the keypad (default: false).
|
||||
:param is_system_key: *(Optional)* Whether the event was a system key event (default: false).
|
||||
:param location: *(Optional)* Whether the event was from the left or right side of the keyboard. 1=Left, 2=Right (default: 0).
|
||||
:param commands: **(EXPERIMENTAL)** *(Optional)* Editing commands to send with the key event (e.g., 'selectAll') (default: []). These are related to but not equal the command names used in ````document.execCommand``` and NSStandardKeyBindingResponding. See https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/core/editing/commands/editor_command_names.h for valid command names.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
if text is not None:
|
||||
params['text'] = text
|
||||
if unmodified_text is not None:
|
||||
params['unmodifiedText'] = unmodified_text
|
||||
if key_identifier is not None:
|
||||
params['keyIdentifier'] = key_identifier
|
||||
if code is not None:
|
||||
params['code'] = code
|
||||
if key is not None:
|
||||
params['key'] = key
|
||||
if windows_virtual_key_code is not None:
|
||||
params['windowsVirtualKeyCode'] = windows_virtual_key_code
|
||||
if native_virtual_key_code is not None:
|
||||
params['nativeVirtualKeyCode'] = native_virtual_key_code
|
||||
if auto_repeat is not None:
|
||||
params['autoRepeat'] = auto_repeat
|
||||
if is_keypad is not None:
|
||||
params['isKeypad'] = is_keypad
|
||||
if is_system_key is not None:
|
||||
params['isSystemKey'] = is_system_key
|
||||
if location is not None:
|
||||
params['location'] = location
|
||||
if commands is not None:
|
||||
params['commands'] = [i for i in commands]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchKeyEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def insert_text(
|
||||
text: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
This method emulates inserting text that doesn't come from a key press,
|
||||
for example an emoji keyboard or an IME.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param text: The text to insert.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['text'] = text
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.insertText',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def ime_set_composition(
|
||||
text: str,
|
||||
selection_start: int,
|
||||
selection_end: int,
|
||||
replacement_start: typing.Optional[int] = None,
|
||||
replacement_end: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
This method sets the current candidate text for ime.
|
||||
Use imeCommitComposition to commit the final text.
|
||||
Use imeSetComposition with empty string as text to cancel composition.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param text: The text to insert
|
||||
:param selection_start: selection start
|
||||
:param selection_end: selection end
|
||||
:param replacement_start: *(Optional)* replacement start
|
||||
:param replacement_end: *(Optional)* replacement end
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['text'] = text
|
||||
params['selectionStart'] = selection_start
|
||||
params['selectionEnd'] = selection_end
|
||||
if replacement_start is not None:
|
||||
params['replacementStart'] = replacement_start
|
||||
if replacement_end is not None:
|
||||
params['replacementEnd'] = replacement_end
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.imeSetComposition',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_mouse_event(
|
||||
type_: str,
|
||||
x: float,
|
||||
y: float,
|
||||
modifiers: typing.Optional[int] = None,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None,
|
||||
button: typing.Optional[MouseButton] = None,
|
||||
buttons: typing.Optional[int] = None,
|
||||
click_count: typing.Optional[int] = None,
|
||||
force: typing.Optional[float] = None,
|
||||
tangential_pressure: typing.Optional[float] = None,
|
||||
tilt_x: typing.Optional[int] = None,
|
||||
tilt_y: typing.Optional[int] = None,
|
||||
twist: typing.Optional[int] = None,
|
||||
delta_x: typing.Optional[float] = None,
|
||||
delta_y: typing.Optional[float] = None,
|
||||
pointer_type: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a mouse event to the page.
|
||||
|
||||
:param type_: Type of the mouse event.
|
||||
:param x: X coordinate of the event relative to the main frame's viewport in CSS pixels.
|
||||
:param y: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to the top of the viewport and Y increases as it proceeds towards the bottom of the viewport.
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param timestamp: *(Optional)* Time at which the event occurred.
|
||||
:param button: *(Optional)* Mouse button (default: "none").
|
||||
:param buttons: *(Optional)* A number indicating which buttons are pressed on the mouse when a mouse event is triggered. Left=1, Right=2, Middle=4, Back=8, Forward=16, None=0.
|
||||
:param click_count: *(Optional)* Number of times the mouse button was clicked (default: 0).
|
||||
:param force: **(EXPERIMENTAL)** *(Optional)* The normalized pressure, which has a range of [0,1] (default: 0).
|
||||
:param tangential_pressure: **(EXPERIMENTAL)** *(Optional)* The normalized tangential pressure, which has a range of [-1,1] (default: 0).
|
||||
:param tilt_x: **(EXPERIMENTAL)** *(Optional)* The plane angle between the Y-Z plane and the plane containing both the stylus axis and the Y axis, in degrees of the range [-90,90], a positive tiltX is to the right (default: 0).
|
||||
:param tilt_y: **(EXPERIMENTAL)** *(Optional)* The plane angle between the X-Z plane and the plane containing both the stylus axis and the X axis, in degrees of the range [-90,90], a positive tiltY is towards the user (default: 0).
|
||||
:param twist: **(EXPERIMENTAL)** *(Optional)* The clockwise rotation of a pen stylus around its own major axis, in degrees in the range [0,359] (default: 0).
|
||||
:param delta_x: *(Optional)* X delta in CSS pixels for mouse wheel event (default: 0).
|
||||
:param delta_y: *(Optional)* Y delta in CSS pixels for mouse wheel event (default: 0).
|
||||
:param pointer_type: *(Optional)* Pointer type (default: "mouse").
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
if button is not None:
|
||||
params['button'] = button.to_json()
|
||||
if buttons is not None:
|
||||
params['buttons'] = buttons
|
||||
if click_count is not None:
|
||||
params['clickCount'] = click_count
|
||||
if force is not None:
|
||||
params['force'] = force
|
||||
if tangential_pressure is not None:
|
||||
params['tangentialPressure'] = tangential_pressure
|
||||
if tilt_x is not None:
|
||||
params['tiltX'] = tilt_x
|
||||
if tilt_y is not None:
|
||||
params['tiltY'] = tilt_y
|
||||
if twist is not None:
|
||||
params['twist'] = twist
|
||||
if delta_x is not None:
|
||||
params['deltaX'] = delta_x
|
||||
if delta_y is not None:
|
||||
params['deltaY'] = delta_y
|
||||
if pointer_type is not None:
|
||||
params['pointerType'] = pointer_type
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchMouseEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_touch_event(
|
||||
type_: str,
|
||||
touch_points: typing.List[TouchPoint],
|
||||
modifiers: typing.Optional[int] = None,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a touch event to the page.
|
||||
|
||||
:param type_: Type of the touch event. TouchEnd and TouchCancel must not contain any touch points, while TouchStart and TouchMove must contains at least one.
|
||||
:param touch_points: Active touch points on the touch device. One event per any changed point (compared to previous touch event in a sequence) is generated, emulating pressing/moving/releasing points one by one.
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param timestamp: *(Optional)* Time at which the event occurred.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['touchPoints'] = [i.to_json() for i in touch_points]
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchTouchEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def emulate_touch_from_mouse_event(
|
||||
type_: str,
|
||||
x: int,
|
||||
y: int,
|
||||
button: MouseButton,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None,
|
||||
delta_x: typing.Optional[float] = None,
|
||||
delta_y: typing.Optional[float] = None,
|
||||
modifiers: typing.Optional[int] = None,
|
||||
click_count: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Emulates touch event from the mouse event parameters.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param type_: Type of the mouse event.
|
||||
:param x: X coordinate of the mouse pointer in DIP.
|
||||
:param y: Y coordinate of the mouse pointer in DIP.
|
||||
:param button: Mouse button. Only "none", "left", "right" are supported.
|
||||
:param timestamp: *(Optional)* Time at which the event occurred (default: current time).
|
||||
:param delta_x: *(Optional)* X delta in DIP for mouse wheel event (default: 0).
|
||||
:param delta_y: *(Optional)* Y delta in DIP for mouse wheel event (default: 0).
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param click_count: *(Optional)* Number of times the mouse button was clicked (default: 0).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
params['button'] = button.to_json()
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
if delta_x is not None:
|
||||
params['deltaX'] = delta_x
|
||||
if delta_y is not None:
|
||||
params['deltaY'] = delta_y
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if click_count is not None:
|
||||
params['clickCount'] = click_count
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.emulateTouchFromMouseEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_ignore_input_events(
|
||||
ignore: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Ignores input events (useful while auditing page).
|
||||
|
||||
:param ignore: Ignores input events processing when set to true.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['ignore'] = ignore
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.setIgnoreInputEvents',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_intercept_drags(
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Prevents default drag and drop behavior and instead emits ``Input.dragIntercepted`` events.
|
||||
Drag and drop behavior can be directly controlled via ``Input.dispatchDragEvent``.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.setInterceptDrags',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def synthesize_pinch_gesture(
|
||||
x: float,
|
||||
y: float,
|
||||
scale_factor: float,
|
||||
relative_speed: typing.Optional[int] = None,
|
||||
gesture_source_type: typing.Optional[GestureSourceType] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Synthesizes a pinch gesture over a time period by issuing appropriate touch events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param x: X coordinate of the start of the gesture in CSS pixels.
|
||||
:param y: Y coordinate of the start of the gesture in CSS pixels.
|
||||
:param scale_factor: Relative scale factor after zooming (>1.0 zooms in, <1.0 zooms out).
|
||||
:param relative_speed: *(Optional)* Relative pointer speed in pixels per second (default: 800).
|
||||
:param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
params['scaleFactor'] = scale_factor
|
||||
if relative_speed is not None:
|
||||
params['relativeSpeed'] = relative_speed
|
||||
if gesture_source_type is not None:
|
||||
params['gestureSourceType'] = gesture_source_type.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.synthesizePinchGesture',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def synthesize_scroll_gesture(
|
||||
x: float,
|
||||
y: float,
|
||||
x_distance: typing.Optional[float] = None,
|
||||
y_distance: typing.Optional[float] = None,
|
||||
x_overscroll: typing.Optional[float] = None,
|
||||
y_overscroll: typing.Optional[float] = None,
|
||||
prevent_fling: typing.Optional[bool] = None,
|
||||
speed: typing.Optional[int] = None,
|
||||
gesture_source_type: typing.Optional[GestureSourceType] = None,
|
||||
repeat_count: typing.Optional[int] = None,
|
||||
repeat_delay_ms: typing.Optional[int] = None,
|
||||
interaction_marker_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Synthesizes a scroll gesture over a time period by issuing appropriate touch events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param x: X coordinate of the start of the gesture in CSS pixels.
|
||||
:param y: Y coordinate of the start of the gesture in CSS pixels.
|
||||
:param x_distance: *(Optional)* The distance to scroll along the X axis (positive to scroll left).
|
||||
:param y_distance: *(Optional)* The distance to scroll along the Y axis (positive to scroll up).
|
||||
:param x_overscroll: *(Optional)* The number of additional pixels to scroll back along the X axis, in addition to the given distance.
|
||||
:param y_overscroll: *(Optional)* The number of additional pixels to scroll back along the Y axis, in addition to the given distance.
|
||||
:param prevent_fling: *(Optional)* Prevent fling (default: true).
|
||||
:param speed: *(Optional)* Swipe speed in pixels per second (default: 800).
|
||||
:param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
|
||||
:param repeat_count: *(Optional)* The number of times to repeat the gesture (default: 0).
|
||||
:param repeat_delay_ms: *(Optional)* The number of milliseconds delay between each repeat. (default: 250).
|
||||
:param interaction_marker_name: *(Optional)* The name of the interaction markers to generate, if not empty (default: "").
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
if x_distance is not None:
|
||||
params['xDistance'] = x_distance
|
||||
if y_distance is not None:
|
||||
params['yDistance'] = y_distance
|
||||
if x_overscroll is not None:
|
||||
params['xOverscroll'] = x_overscroll
|
||||
if y_overscroll is not None:
|
||||
params['yOverscroll'] = y_overscroll
|
||||
if prevent_fling is not None:
|
||||
params['preventFling'] = prevent_fling
|
||||
if speed is not None:
|
||||
params['speed'] = speed
|
||||
if gesture_source_type is not None:
|
||||
params['gestureSourceType'] = gesture_source_type.to_json()
|
||||
if repeat_count is not None:
|
||||
params['repeatCount'] = repeat_count
|
||||
if repeat_delay_ms is not None:
|
||||
params['repeatDelayMs'] = repeat_delay_ms
|
||||
if interaction_marker_name is not None:
|
||||
params['interactionMarkerName'] = interaction_marker_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.synthesizeScrollGesture',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def synthesize_tap_gesture(
|
||||
x: float,
|
||||
y: float,
|
||||
duration: typing.Optional[int] = None,
|
||||
tap_count: typing.Optional[int] = None,
|
||||
gesture_source_type: typing.Optional[GestureSourceType] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Synthesizes a tap gesture over a time period by issuing appropriate touch events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param x: X coordinate of the start of the gesture in CSS pixels.
|
||||
:param y: Y coordinate of the start of the gesture in CSS pixels.
|
||||
:param duration: *(Optional)* Duration between touchdown and touchup events in ms (default: 50).
|
||||
:param tap_count: *(Optional)* Number of times to perform the tap (e.g. 2 for double tap, default: 1).
|
||||
:param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
if duration is not None:
|
||||
params['duration'] = duration
|
||||
if tap_count is not None:
|
||||
params['tapCount'] = tap_count
|
||||
if gesture_source_type is not None:
|
||||
params['gestureSourceType'] = gesture_source_type.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.synthesizeTapGesture',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Input.dragIntercepted')
|
||||
@dataclass
|
||||
class DragIntercepted:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Emitted only when ``Input.setInterceptDrags`` is enabled. Use this data with ``Input.dispatchDragEvent`` to
|
||||
restore normal drag and drop behavior.
|
||||
'''
|
||||
data: DragData
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DragIntercepted:
|
||||
return cls(
|
||||
data=DragData.from_json(json['data'])
|
||||
)
|
||||
@@ -0,0 +1,76 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Inspector (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables inspector domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Inspector.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables inspector domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Inspector.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Inspector.detached')
|
||||
@dataclass
|
||||
class Detached:
|
||||
'''
|
||||
Fired when remote debugging connection is about to be terminated. Contains detach reason.
|
||||
'''
|
||||
#: The reason why connection has been terminated.
|
||||
reason: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> Detached:
|
||||
return cls(
|
||||
reason=str(json['reason'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Inspector.targetCrashed')
|
||||
@dataclass
|
||||
class TargetCrashed:
|
||||
'''
|
||||
Fired when debugging target has crashed
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetCrashed:
|
||||
return cls(
|
||||
|
||||
)
|
||||
|
||||
|
||||
@event_class('Inspector.targetReloadedAfterCrash')
|
||||
@dataclass
|
||||
class TargetReloadedAfterCrash:
|
||||
'''
|
||||
Fired when debugging target has reloaded after crash
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetReloadedAfterCrash:
|
||||
return cls(
|
||||
|
||||
)
|
||||
@@ -0,0 +1,99 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: IO
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import runtime
|
||||
|
||||
|
||||
class StreamHandle(str):
|
||||
'''
|
||||
This is either obtained from another method or specified as ``blob:<uuid>`` where
|
||||
``<uuid>`` is an UUID of a Blob.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> StreamHandle:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'StreamHandle({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def close(
|
||||
handle: StreamHandle
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Close the stream, discard any temporary backing storage.
|
||||
|
||||
:param handle: Handle of the stream to close.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['handle'] = handle.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IO.close',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def read(
|
||||
handle: StreamHandle,
|
||||
offset: typing.Optional[int] = None,
|
||||
size: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.Optional[bool], str, bool]]:
|
||||
'''
|
||||
Read a chunk of the stream
|
||||
|
||||
:param handle: Handle of the stream to read.
|
||||
:param offset: *(Optional)* Seek to the specified offset before reading (if not specificed, proceed with offset following the last read). Some types of streams may only support sequential reads.
|
||||
:param size: *(Optional)* Maximum number of bytes to read (left upon the agent discretion if not specified).
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **base64Encoded** - *(Optional)* Set if the data is base64-encoded
|
||||
1. **data** - Data that were read.
|
||||
2. **eof** - Set if the end-of-file condition occurred while reading.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['handle'] = handle.to_json()
|
||||
if offset is not None:
|
||||
params['offset'] = offset
|
||||
if size is not None:
|
||||
params['size'] = size
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IO.read',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
bool(json['base64Encoded']) if 'base64Encoded' in json else None,
|
||||
str(json['data']),
|
||||
bool(json['eof'])
|
||||
)
|
||||
|
||||
|
||||
def resolve_blob(
|
||||
object_id: runtime.RemoteObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,str]:
|
||||
'''
|
||||
Return UUID of Blob object specified by a remote object id.
|
||||
|
||||
:param object_id: Object id of a Blob object wrapper.
|
||||
:returns: UUID of the specified Blob.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IO.resolveBlob',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return str(json['uuid'])
|
||||
@@ -0,0 +1,462 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: LayerTree (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
|
||||
|
||||
class LayerId(str):
|
||||
'''
|
||||
Unique Layer identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> LayerId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'LayerId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class SnapshotId(str):
|
||||
'''
|
||||
Unique snapshot identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> SnapshotId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'SnapshotId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScrollRect:
|
||||
'''
|
||||
Rectangle where scrolling happens on the main thread.
|
||||
'''
|
||||
#: Rectangle itself.
|
||||
rect: dom.Rect
|
||||
|
||||
#: Reason for rectangle to force scrolling on the main thread
|
||||
type_: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['rect'] = self.rect.to_json()
|
||||
json['type'] = self.type_
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
rect=dom.Rect.from_json(json['rect']),
|
||||
type_=str(json['type']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StickyPositionConstraint:
|
||||
'''
|
||||
Sticky position constraints.
|
||||
'''
|
||||
#: Layout rectangle of the sticky element before being shifted
|
||||
sticky_box_rect: dom.Rect
|
||||
|
||||
#: Layout rectangle of the containing block of the sticky element
|
||||
containing_block_rect: dom.Rect
|
||||
|
||||
#: The nearest sticky layer that shifts the sticky box
|
||||
nearest_layer_shifting_sticky_box: typing.Optional[LayerId] = None
|
||||
|
||||
#: The nearest sticky layer that shifts the containing block
|
||||
nearest_layer_shifting_containing_block: typing.Optional[LayerId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['stickyBoxRect'] = self.sticky_box_rect.to_json()
|
||||
json['containingBlockRect'] = self.containing_block_rect.to_json()
|
||||
if self.nearest_layer_shifting_sticky_box is not None:
|
||||
json['nearestLayerShiftingStickyBox'] = self.nearest_layer_shifting_sticky_box.to_json()
|
||||
if self.nearest_layer_shifting_containing_block is not None:
|
||||
json['nearestLayerShiftingContainingBlock'] = self.nearest_layer_shifting_containing_block.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
sticky_box_rect=dom.Rect.from_json(json['stickyBoxRect']),
|
||||
containing_block_rect=dom.Rect.from_json(json['containingBlockRect']),
|
||||
nearest_layer_shifting_sticky_box=LayerId.from_json(json['nearestLayerShiftingStickyBox']) if 'nearestLayerShiftingStickyBox' in json else None,
|
||||
nearest_layer_shifting_containing_block=LayerId.from_json(json['nearestLayerShiftingContainingBlock']) if 'nearestLayerShiftingContainingBlock' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PictureTile:
|
||||
'''
|
||||
Serialized fragment of layer picture along with its offset within the layer.
|
||||
'''
|
||||
#: Offset from owning layer left boundary
|
||||
x: float
|
||||
|
||||
#: Offset from owning layer top boundary
|
||||
y: float
|
||||
|
||||
#: Base64-encoded snapshot data.
|
||||
picture: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['x'] = self.x
|
||||
json['y'] = self.y
|
||||
json['picture'] = self.picture
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
x=float(json['x']),
|
||||
y=float(json['y']),
|
||||
picture=str(json['picture']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Layer:
|
||||
'''
|
||||
Information about a compositing layer.
|
||||
'''
|
||||
#: The unique id for this layer.
|
||||
layer_id: LayerId
|
||||
|
||||
#: Offset from parent layer, X coordinate.
|
||||
offset_x: float
|
||||
|
||||
#: Offset from parent layer, Y coordinate.
|
||||
offset_y: float
|
||||
|
||||
#: Layer width.
|
||||
width: float
|
||||
|
||||
#: Layer height.
|
||||
height: float
|
||||
|
||||
#: Indicates how many time this layer has painted.
|
||||
paint_count: int
|
||||
|
||||
#: Indicates whether this layer hosts any content, rather than being used for
|
||||
#: transform/scrolling purposes only.
|
||||
draws_content: bool
|
||||
|
||||
#: The id of parent (not present for root).
|
||||
parent_layer_id: typing.Optional[LayerId] = None
|
||||
|
||||
#: The backend id for the node associated with this layer.
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: Transformation matrix for layer, default is identity matrix
|
||||
transform: typing.Optional[typing.List[float]] = None
|
||||
|
||||
#: Transform anchor point X, absent if no transform specified
|
||||
anchor_x: typing.Optional[float] = None
|
||||
|
||||
#: Transform anchor point Y, absent if no transform specified
|
||||
anchor_y: typing.Optional[float] = None
|
||||
|
||||
#: Transform anchor point Z, absent if no transform specified
|
||||
anchor_z: typing.Optional[float] = None
|
||||
|
||||
#: Set if layer is not visible.
|
||||
invisible: typing.Optional[bool] = None
|
||||
|
||||
#: Rectangles scrolling on main thread only.
|
||||
scroll_rects: typing.Optional[typing.List[ScrollRect]] = None
|
||||
|
||||
#: Sticky position constraint information
|
||||
sticky_position_constraint: typing.Optional[StickyPositionConstraint] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['layerId'] = self.layer_id.to_json()
|
||||
json['offsetX'] = self.offset_x
|
||||
json['offsetY'] = self.offset_y
|
||||
json['width'] = self.width
|
||||
json['height'] = self.height
|
||||
json['paintCount'] = self.paint_count
|
||||
json['drawsContent'] = self.draws_content
|
||||
if self.parent_layer_id is not None:
|
||||
json['parentLayerId'] = self.parent_layer_id.to_json()
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.transform is not None:
|
||||
json['transform'] = [i for i in self.transform]
|
||||
if self.anchor_x is not None:
|
||||
json['anchorX'] = self.anchor_x
|
||||
if self.anchor_y is not None:
|
||||
json['anchorY'] = self.anchor_y
|
||||
if self.anchor_z is not None:
|
||||
json['anchorZ'] = self.anchor_z
|
||||
if self.invisible is not None:
|
||||
json['invisible'] = self.invisible
|
||||
if self.scroll_rects is not None:
|
||||
json['scrollRects'] = [i.to_json() for i in self.scroll_rects]
|
||||
if self.sticky_position_constraint is not None:
|
||||
json['stickyPositionConstraint'] = self.sticky_position_constraint.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
layer_id=LayerId.from_json(json['layerId']),
|
||||
offset_x=float(json['offsetX']),
|
||||
offset_y=float(json['offsetY']),
|
||||
width=float(json['width']),
|
||||
height=float(json['height']),
|
||||
paint_count=int(json['paintCount']),
|
||||
draws_content=bool(json['drawsContent']),
|
||||
parent_layer_id=LayerId.from_json(json['parentLayerId']) if 'parentLayerId' in json else None,
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
transform=[float(i) for i in json['transform']] if 'transform' in json else None,
|
||||
anchor_x=float(json['anchorX']) if 'anchorX' in json else None,
|
||||
anchor_y=float(json['anchorY']) if 'anchorY' in json else None,
|
||||
anchor_z=float(json['anchorZ']) if 'anchorZ' in json else None,
|
||||
invisible=bool(json['invisible']) if 'invisible' in json else None,
|
||||
scroll_rects=[ScrollRect.from_json(i) for i in json['scrollRects']] if 'scrollRects' in json else None,
|
||||
sticky_position_constraint=StickyPositionConstraint.from_json(json['stickyPositionConstraint']) if 'stickyPositionConstraint' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class PaintProfile(list):
|
||||
'''
|
||||
Array of timings, one per paint step.
|
||||
'''
|
||||
def to_json(self) -> typing.List[float]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[float]) -> PaintProfile:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'PaintProfile({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def compositing_reasons(
|
||||
layer_id: LayerId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[str], typing.List[str]]]:
|
||||
'''
|
||||
Provides the reasons why the given layer was composited.
|
||||
|
||||
:param layer_id: The id of the layer for which we want to get the reasons it was composited.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **compositingReasons** - A list of strings specifying reasons for the given layer to become composited.
|
||||
1. **compositingReasonIds** - A list of strings specifying reason IDs for the given layer to become composited.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['layerId'] = layer_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.compositingReasons',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[str(i) for i in json['compositingReasons']],
|
||||
[str(i) for i in json['compositingReasonIds']]
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables compositing tree inspection.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables compositing tree inspection.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def load_snapshot(
|
||||
tiles: typing.List[PictureTile]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SnapshotId]:
|
||||
'''
|
||||
Returns the snapshot identifier.
|
||||
|
||||
:param tiles: An array of tiles composing the snapshot.
|
||||
:returns: The id of the snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['tiles'] = [i.to_json() for i in tiles]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.loadSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SnapshotId.from_json(json['snapshotId'])
|
||||
|
||||
|
||||
def make_snapshot(
|
||||
layer_id: LayerId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SnapshotId]:
|
||||
'''
|
||||
Returns the layer snapshot identifier.
|
||||
|
||||
:param layer_id: The id of the layer.
|
||||
:returns: The id of the layer snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['layerId'] = layer_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.makeSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SnapshotId.from_json(json['snapshotId'])
|
||||
|
||||
|
||||
def profile_snapshot(
|
||||
snapshot_id: SnapshotId,
|
||||
min_repeat_count: typing.Optional[int] = None,
|
||||
min_duration: typing.Optional[float] = None,
|
||||
clip_rect: typing.Optional[dom.Rect] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[PaintProfile]]:
|
||||
'''
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
:param min_repeat_count: *(Optional)* The maximum number of times to replay the snapshot (1, if not specified).
|
||||
:param min_duration: *(Optional)* The minimum duration (in seconds) to replay the snapshot.
|
||||
:param clip_rect: *(Optional)* The clip rectangle to apply when replaying the snapshot.
|
||||
:returns: The array of paint profiles, one per run.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
if min_repeat_count is not None:
|
||||
params['minRepeatCount'] = min_repeat_count
|
||||
if min_duration is not None:
|
||||
params['minDuration'] = min_duration
|
||||
if clip_rect is not None:
|
||||
params['clipRect'] = clip_rect.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.profileSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [PaintProfile.from_json(i) for i in json['timings']]
|
||||
|
||||
|
||||
def release_snapshot(
|
||||
snapshot_id: SnapshotId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Releases layer snapshot captured by the back-end.
|
||||
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.releaseSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def replay_snapshot(
|
||||
snapshot_id: SnapshotId,
|
||||
from_step: typing.Optional[int] = None,
|
||||
to_step: typing.Optional[int] = None,
|
||||
scale: typing.Optional[float] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,str]:
|
||||
'''
|
||||
Replays the layer snapshot and returns the resulting bitmap.
|
||||
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
:param from_step: *(Optional)* The first step to replay from (replay from the very start if not specified).
|
||||
:param to_step: *(Optional)* The last step to replay to (replay till the end if not specified).
|
||||
:param scale: *(Optional)* The scale to apply while replaying (defaults to 1).
|
||||
:returns: A data: URL for resulting image.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
if from_step is not None:
|
||||
params['fromStep'] = from_step
|
||||
if to_step is not None:
|
||||
params['toStep'] = to_step
|
||||
if scale is not None:
|
||||
params['scale'] = scale
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.replaySnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return str(json['dataURL'])
|
||||
|
||||
|
||||
def snapshot_command_log(
|
||||
snapshot_id: SnapshotId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[dict]]:
|
||||
'''
|
||||
Replays the layer snapshot and returns canvas log.
|
||||
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
:returns: The array of canvas function calls.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.snapshotCommandLog',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [dict(i) for i in json['commandLog']]
|
||||
|
||||
|
||||
@event_class('LayerTree.layerPainted')
|
||||
@dataclass
|
||||
class LayerPainted:
|
||||
#: The id of the painted layer.
|
||||
layer_id: LayerId
|
||||
#: Clip rectangle.
|
||||
clip: dom.Rect
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LayerPainted:
|
||||
return cls(
|
||||
layer_id=LayerId.from_json(json['layerId']),
|
||||
clip=dom.Rect.from_json(json['clip'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('LayerTree.layerTreeDidChange')
|
||||
@dataclass
|
||||
class LayerTreeDidChange:
|
||||
#: Layer tree, absent if not in the comspositing mode.
|
||||
layers: typing.Optional[typing.List[Layer]]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LayerTreeDidChange:
|
||||
return cls(
|
||||
layers=[Layer.from_json(i) for i in json['layers']] if 'layers' in json else None
|
||||
)
|
||||
@@ -0,0 +1,188 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Log
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class LogEntry:
|
||||
'''
|
||||
Log entry.
|
||||
'''
|
||||
#: Log entry source.
|
||||
source: str
|
||||
|
||||
#: Log entry severity.
|
||||
level: str
|
||||
|
||||
#: Logged text.
|
||||
text: str
|
||||
|
||||
#: Timestamp when this entry was added.
|
||||
timestamp: runtime.Timestamp
|
||||
|
||||
category: typing.Optional[str] = None
|
||||
|
||||
#: URL of the resource if known.
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
#: Line number in the resource.
|
||||
line_number: typing.Optional[int] = None
|
||||
|
||||
#: JavaScript stack trace.
|
||||
stack_trace: typing.Optional[runtime.StackTrace] = None
|
||||
|
||||
#: Identifier of the network request associated with this entry.
|
||||
network_request_id: typing.Optional[network.RequestId] = None
|
||||
|
||||
#: Identifier of the worker associated with this entry.
|
||||
worker_id: typing.Optional[str] = None
|
||||
|
||||
#: Call arguments.
|
||||
args: typing.Optional[typing.List[runtime.RemoteObject]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['source'] = self.source
|
||||
json['level'] = self.level
|
||||
json['text'] = self.text
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
if self.category is not None:
|
||||
json['category'] = self.category
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.line_number is not None:
|
||||
json['lineNumber'] = self.line_number
|
||||
if self.stack_trace is not None:
|
||||
json['stackTrace'] = self.stack_trace.to_json()
|
||||
if self.network_request_id is not None:
|
||||
json['networkRequestId'] = self.network_request_id.to_json()
|
||||
if self.worker_id is not None:
|
||||
json['workerId'] = self.worker_id
|
||||
if self.args is not None:
|
||||
json['args'] = [i.to_json() for i in self.args]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
source=str(json['source']),
|
||||
level=str(json['level']),
|
||||
text=str(json['text']),
|
||||
timestamp=runtime.Timestamp.from_json(json['timestamp']),
|
||||
category=str(json['category']) if 'category' in json else None,
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
line_number=int(json['lineNumber']) if 'lineNumber' in json else None,
|
||||
stack_trace=runtime.StackTrace.from_json(json['stackTrace']) if 'stackTrace' in json else None,
|
||||
network_request_id=network.RequestId.from_json(json['networkRequestId']) if 'networkRequestId' in json else None,
|
||||
worker_id=str(json['workerId']) if 'workerId' in json else None,
|
||||
args=[runtime.RemoteObject.from_json(i) for i in json['args']] if 'args' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ViolationSetting:
|
||||
'''
|
||||
Violation configuration setting.
|
||||
'''
|
||||
#: Violation type.
|
||||
name: str
|
||||
|
||||
#: Time threshold to trigger upon.
|
||||
threshold: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['threshold'] = self.threshold
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
threshold=float(json['threshold']),
|
||||
)
|
||||
|
||||
|
||||
def clear() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the log.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.clear',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables log domain, prevents further log entries from being reported to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables log domain, sends the entries collected so far to the client by means of the
|
||||
``entryAdded`` notification.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_violations_report(
|
||||
config: typing.List[ViolationSetting]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
start violation reporting.
|
||||
|
||||
:param config: Configuration for violations.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['config'] = [i.to_json() for i in config]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.startViolationsReport',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_violations_report() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stop violation reporting.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.stopViolationsReport',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Log.entryAdded')
|
||||
@dataclass
|
||||
class EntryAdded:
|
||||
'''
|
||||
Issued when new message was logged.
|
||||
'''
|
||||
#: The entry.
|
||||
entry: LogEntry
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> EntryAdded:
|
||||
return cls(
|
||||
entry=LogEntry.from_json(json['entry'])
|
||||
)
|
||||
@@ -0,0 +1,251 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Media (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class PlayerId(str):
|
||||
'''
|
||||
Players will get an ID that is unique within the agent context.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> PlayerId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'PlayerId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class Timestamp(float):
|
||||
def to_json(self) -> float:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: float) -> Timestamp:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Timestamp({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerMessage:
|
||||
'''
|
||||
Have one type per entry in MediaLogRecord::Type
|
||||
Corresponds to kMessage
|
||||
'''
|
||||
#: Keep in sync with MediaLogMessageLevel
|
||||
#: We are currently keeping the message level 'error' separate from the
|
||||
#: PlayerError type because right now they represent different things,
|
||||
#: this one being a DVLOG(ERROR) style log message that gets printed
|
||||
#: based on what log level is selected in the UI, and the other is a
|
||||
#: representation of a media::PipelineStatus object. Soon however we're
|
||||
#: going to be moving away from using PipelineStatus for errors and
|
||||
#: introducing a new error type which should hopefully let us integrate
|
||||
#: the error log level into the PlayerError type.
|
||||
level: str
|
||||
|
||||
message: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['level'] = self.level
|
||||
json['message'] = self.message
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
level=str(json['level']),
|
||||
message=str(json['message']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerProperty:
|
||||
'''
|
||||
Corresponds to kMediaPropertyChange
|
||||
'''
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerEvent:
|
||||
'''
|
||||
Corresponds to kMediaEventTriggered
|
||||
'''
|
||||
timestamp: Timestamp
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
timestamp=Timestamp.from_json(json['timestamp']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerError:
|
||||
'''
|
||||
Corresponds to kMediaError
|
||||
'''
|
||||
type_: str
|
||||
|
||||
#: When this switches to using media::Status instead of PipelineStatus
|
||||
#: we can remove "errorCode" and replace it with the fields from
|
||||
#: a Status instance. This also seems like a duplicate of the error
|
||||
#: level enum - there is a todo bug to have that level removed and
|
||||
#: use this instead. (crbug.com/1068454)
|
||||
error_code: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['errorCode'] = self.error_code
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
error_code=str(json['errorCode']),
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the Media domain
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Media.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the Media domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Media.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Media.playerPropertiesChanged')
|
||||
@dataclass
|
||||
class PlayerPropertiesChanged:
|
||||
'''
|
||||
This can be called multiple times, and can be used to set / override /
|
||||
remove player properties. A null propValue indicates removal.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
properties: typing.List[PlayerProperty]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerPropertiesChanged:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
properties=[PlayerProperty.from_json(i) for i in json['properties']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playerEventsAdded')
|
||||
@dataclass
|
||||
class PlayerEventsAdded:
|
||||
'''
|
||||
Send events as a list, allowing them to be batched on the browser for less
|
||||
congestion. If batched, events must ALWAYS be in chronological order.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
events: typing.List[PlayerEvent]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerEventsAdded:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
events=[PlayerEvent.from_json(i) for i in json['events']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playerMessagesLogged')
|
||||
@dataclass
|
||||
class PlayerMessagesLogged:
|
||||
'''
|
||||
Send a list of any messages that need to be delivered.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
messages: typing.List[PlayerMessage]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerMessagesLogged:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
messages=[PlayerMessage.from_json(i) for i in json['messages']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playerErrorsRaised')
|
||||
@dataclass
|
||||
class PlayerErrorsRaised:
|
||||
'''
|
||||
Send a list of any errors that need to be delivered.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
errors: typing.List[PlayerError]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerErrorsRaised:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
errors=[PlayerError.from_json(i) for i in json['errors']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playersCreated')
|
||||
@dataclass
|
||||
class PlayersCreated:
|
||||
'''
|
||||
Called whenever a player is created, or when a new agent joins and receives
|
||||
a list of active players. If an agent is restored, it will receive the full
|
||||
list of player ids and all events again.
|
||||
'''
|
||||
players: typing.List[PlayerId]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayersCreated:
|
||||
return cls(
|
||||
players=[PlayerId.from_json(i) for i in json['players']]
|
||||
)
|
||||
@@ -0,0 +1,261 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Memory (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class PressureLevel(enum.Enum):
|
||||
'''
|
||||
Memory pressure level.
|
||||
'''
|
||||
MODERATE = "moderate"
|
||||
CRITICAL = "critical"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingProfileNode:
|
||||
'''
|
||||
Heap profile sample.
|
||||
'''
|
||||
#: Size of the sampled allocation.
|
||||
size: float
|
||||
|
||||
#: Total bytes attributed to this sample.
|
||||
total: float
|
||||
|
||||
#: Execution stack at the point of allocation.
|
||||
stack: typing.List[str]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['size'] = self.size
|
||||
json['total'] = self.total
|
||||
json['stack'] = [i for i in self.stack]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
size=float(json['size']),
|
||||
total=float(json['total']),
|
||||
stack=[str(i) for i in json['stack']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingProfile:
|
||||
'''
|
||||
Array of heap profile samples.
|
||||
'''
|
||||
samples: typing.List[SamplingProfileNode]
|
||||
|
||||
modules: typing.List[Module]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['samples'] = [i.to_json() for i in self.samples]
|
||||
json['modules'] = [i.to_json() for i in self.modules]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
samples=[SamplingProfileNode.from_json(i) for i in json['samples']],
|
||||
modules=[Module.from_json(i) for i in json['modules']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Module:
|
||||
'''
|
||||
Executable module information
|
||||
'''
|
||||
#: Name of the module.
|
||||
name: str
|
||||
|
||||
#: UUID of the module.
|
||||
uuid: str
|
||||
|
||||
#: Base address where the module is loaded into memory. Encoded as a decimal
|
||||
#: or hexadecimal (0x prefixed) string.
|
||||
base_address: str
|
||||
|
||||
#: Size of the module in bytes.
|
||||
size: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['uuid'] = self.uuid
|
||||
json['baseAddress'] = self.base_address
|
||||
json['size'] = self.size
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
uuid=str(json['uuid']),
|
||||
base_address=str(json['baseAddress']),
|
||||
size=float(json['size']),
|
||||
)
|
||||
|
||||
|
||||
def get_dom_counters() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[int, int, int]]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **documents** -
|
||||
1. **nodes** -
|
||||
2. **jsEventListeners** -
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getDOMCounters',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
int(json['documents']),
|
||||
int(json['nodes']),
|
||||
int(json['jsEventListeners'])
|
||||
)
|
||||
|
||||
|
||||
def prepare_for_leak_detection() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.prepareForLeakDetection',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def forcibly_purge_java_script_memory() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Simulate OomIntervention by purging V8 memory.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.forciblyPurgeJavaScriptMemory',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_pressure_notifications_suppressed(
|
||||
suppressed: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable/disable suppressing memory pressure notifications in all processes.
|
||||
|
||||
:param suppressed: If true, memory pressure notifications will be suppressed.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['suppressed'] = suppressed
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.setPressureNotificationsSuppressed',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def simulate_pressure_notification(
|
||||
level: PressureLevel
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Simulate a memory pressure notification in all processes.
|
||||
|
||||
:param level: Memory pressure level of the notification.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['level'] = level.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.simulatePressureNotification',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_sampling(
|
||||
sampling_interval: typing.Optional[int] = None,
|
||||
suppress_randomness: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Start collecting native memory profile.
|
||||
|
||||
:param sampling_interval: *(Optional)* Average number of bytes between samples.
|
||||
:param suppress_randomness: *(Optional)* Do not randomize intervals between samples.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if sampling_interval is not None:
|
||||
params['samplingInterval'] = sampling_interval
|
||||
if suppress_randomness is not None:
|
||||
params['suppressRandomness'] = suppress_randomness
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.startSampling',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_sampling() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stop collecting native memory profile.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.stopSampling',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_all_time_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingProfile]:
|
||||
'''
|
||||
Retrieve native memory allocations profile
|
||||
collected since renderer process startup.
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getAllTimeSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def get_browser_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingProfile]:
|
||||
'''
|
||||
Retrieve native memory allocations profile
|
||||
collected since browser process startup.
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getBrowserSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def get_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingProfile]:
|
||||
'''
|
||||
Retrieve native memory allocations profile collected since last
|
||||
``startSampling`` call.
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingProfile.from_json(json['profile'])
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,116 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Performance
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Metric:
|
||||
'''
|
||||
Run-time execution metric.
|
||||
'''
|
||||
#: Metric name.
|
||||
name: str
|
||||
|
||||
#: Metric value.
|
||||
value: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=float(json['value']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable collecting and reporting metrics.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable(
|
||||
time_domain: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable collecting and reporting metrics.
|
||||
|
||||
:param time_domain: *(Optional)* Time domain to use for collecting and reporting duration metrics.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if time_domain is not None:
|
||||
params['timeDomain'] = time_domain
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_time_domain(
|
||||
time_domain: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets time domain to use for collecting and reporting duration metrics.
|
||||
Note that this must be called before enabling metrics collection. Calling
|
||||
this method while metrics collection is enabled returns an error.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param time_domain: Time domain
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['timeDomain'] = time_domain
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.setTimeDomain',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_metrics() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Metric]]:
|
||||
'''
|
||||
Retrieve current values of run-time metrics.
|
||||
|
||||
:returns: Current values for run-time metrics.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.getMetrics',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Metric.from_json(i) for i in json['metrics']]
|
||||
|
||||
|
||||
@event_class('Performance.metrics')
|
||||
@dataclass
|
||||
class Metrics:
|
||||
'''
|
||||
Current values of the metrics.
|
||||
'''
|
||||
#: Current values of the metrics.
|
||||
metrics: typing.List[Metric]
|
||||
#: Timestamp title.
|
||||
title: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> Metrics:
|
||||
return cls(
|
||||
metrics=[Metric.from_json(i) for i in json['metrics']],
|
||||
title=str(json['title'])
|
||||
)
|
||||
@@ -0,0 +1,198 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: PerformanceTimeline (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class LargestContentfulPaint:
|
||||
'''
|
||||
See https://github.com/WICG/LargestContentfulPaint and largest_contentful_paint.idl
|
||||
'''
|
||||
render_time: network.TimeSinceEpoch
|
||||
|
||||
load_time: network.TimeSinceEpoch
|
||||
|
||||
#: The number of pixels being painted.
|
||||
size: float
|
||||
|
||||
#: The id attribute of the element, if available.
|
||||
element_id: typing.Optional[str] = None
|
||||
|
||||
#: The URL of the image (may be trimmed).
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['renderTime'] = self.render_time.to_json()
|
||||
json['loadTime'] = self.load_time.to_json()
|
||||
json['size'] = self.size
|
||||
if self.element_id is not None:
|
||||
json['elementId'] = self.element_id
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.node_id is not None:
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
render_time=network.TimeSinceEpoch.from_json(json['renderTime']),
|
||||
load_time=network.TimeSinceEpoch.from_json(json['loadTime']),
|
||||
size=float(json['size']),
|
||||
element_id=str(json['elementId']) if 'elementId' in json else None,
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
node_id=dom.BackendNodeId.from_json(json['nodeId']) if 'nodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutShiftAttribution:
|
||||
previous_rect: dom.Rect
|
||||
|
||||
current_rect: dom.Rect
|
||||
|
||||
node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['previousRect'] = self.previous_rect.to_json()
|
||||
json['currentRect'] = self.current_rect.to_json()
|
||||
if self.node_id is not None:
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
previous_rect=dom.Rect.from_json(json['previousRect']),
|
||||
current_rect=dom.Rect.from_json(json['currentRect']),
|
||||
node_id=dom.BackendNodeId.from_json(json['nodeId']) if 'nodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutShift:
|
||||
'''
|
||||
See https://wicg.github.io/layout-instability/#sec-layout-shift and layout_shift.idl
|
||||
'''
|
||||
#: Score increment produced by this event.
|
||||
value: float
|
||||
|
||||
had_recent_input: bool
|
||||
|
||||
last_input_time: network.TimeSinceEpoch
|
||||
|
||||
sources: typing.List[LayoutShiftAttribution]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['value'] = self.value
|
||||
json['hadRecentInput'] = self.had_recent_input
|
||||
json['lastInputTime'] = self.last_input_time.to_json()
|
||||
json['sources'] = [i.to_json() for i in self.sources]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
value=float(json['value']),
|
||||
had_recent_input=bool(json['hadRecentInput']),
|
||||
last_input_time=network.TimeSinceEpoch.from_json(json['lastInputTime']),
|
||||
sources=[LayoutShiftAttribution.from_json(i) for i in json['sources']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TimelineEvent:
|
||||
#: Identifies the frame that this event is related to. Empty for non-frame targets.
|
||||
frame_id: page.FrameId
|
||||
|
||||
#: The event type, as specified in https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype
|
||||
#: This determines which of the optional "details" fiedls is present.
|
||||
type_: str
|
||||
|
||||
#: Name may be empty depending on the type.
|
||||
name: str
|
||||
|
||||
#: Time in seconds since Epoch, monotonically increasing within document lifetime.
|
||||
time: network.TimeSinceEpoch
|
||||
|
||||
#: Event duration, if applicable.
|
||||
duration: typing.Optional[float] = None
|
||||
|
||||
lcp_details: typing.Optional[LargestContentfulPaint] = None
|
||||
|
||||
layout_shift_details: typing.Optional[LayoutShift] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
json['type'] = self.type_
|
||||
json['name'] = self.name
|
||||
json['time'] = self.time.to_json()
|
||||
if self.duration is not None:
|
||||
json['duration'] = self.duration
|
||||
if self.lcp_details is not None:
|
||||
json['lcpDetails'] = self.lcp_details.to_json()
|
||||
if self.layout_shift_details is not None:
|
||||
json['layoutShiftDetails'] = self.layout_shift_details.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
type_=str(json['type']),
|
||||
name=str(json['name']),
|
||||
time=network.TimeSinceEpoch.from_json(json['time']),
|
||||
duration=float(json['duration']) if 'duration' in json else None,
|
||||
lcp_details=LargestContentfulPaint.from_json(json['lcpDetails']) if 'lcpDetails' in json else None,
|
||||
layout_shift_details=LayoutShift.from_json(json['layoutShiftDetails']) if 'layoutShiftDetails' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
event_types: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Previously buffered events would be reported before method returns.
|
||||
See also: timelineEventAdded
|
||||
|
||||
:param event_types: The types of event to report, as specified in https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype The specified filter overrides any previous filters, passing empty filter disables recording. Note that not all types exposed to the web platform are currently supported.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventTypes'] = [i for i in event_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PerformanceTimeline.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('PerformanceTimeline.timelineEventAdded')
|
||||
@dataclass
|
||||
class TimelineEventAdded:
|
||||
'''
|
||||
Sent when a performance timeline event is added. See reportPerformanceTimeline method.
|
||||
'''
|
||||
event: TimelineEvent
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TimelineEventAdded:
|
||||
return cls(
|
||||
event=TimelineEvent.from_json(json['event'])
|
||||
)
|
||||
@@ -0,0 +1,532 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Profiler
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import debugger
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProfileNode:
|
||||
'''
|
||||
Profile node. Holds callsite information, execution statistics and child nodes.
|
||||
'''
|
||||
#: Unique id of the node.
|
||||
id_: int
|
||||
|
||||
#: Function location.
|
||||
call_frame: runtime.CallFrame
|
||||
|
||||
#: Number of samples where this node was on top of the call stack.
|
||||
hit_count: typing.Optional[int] = None
|
||||
|
||||
#: Child node ids.
|
||||
children: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The reason of being not optimized. The function may be deoptimized or marked as don't
|
||||
#: optimize.
|
||||
deopt_reason: typing.Optional[str] = None
|
||||
|
||||
#: An array of source position ticks.
|
||||
position_ticks: typing.Optional[typing.List[PositionTickInfo]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_
|
||||
json['callFrame'] = self.call_frame.to_json()
|
||||
if self.hit_count is not None:
|
||||
json['hitCount'] = self.hit_count
|
||||
if self.children is not None:
|
||||
json['children'] = [i for i in self.children]
|
||||
if self.deopt_reason is not None:
|
||||
json['deoptReason'] = self.deopt_reason
|
||||
if self.position_ticks is not None:
|
||||
json['positionTicks'] = [i.to_json() for i in self.position_ticks]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=int(json['id']),
|
||||
call_frame=runtime.CallFrame.from_json(json['callFrame']),
|
||||
hit_count=int(json['hitCount']) if 'hitCount' in json else None,
|
||||
children=[int(i) for i in json['children']] if 'children' in json else None,
|
||||
deopt_reason=str(json['deoptReason']) if 'deoptReason' in json else None,
|
||||
position_ticks=[PositionTickInfo.from_json(i) for i in json['positionTicks']] if 'positionTicks' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Profile:
|
||||
'''
|
||||
Profile.
|
||||
'''
|
||||
#: The list of profile nodes. First item is the root node.
|
||||
nodes: typing.List[ProfileNode]
|
||||
|
||||
#: Profiling start timestamp in microseconds.
|
||||
start_time: float
|
||||
|
||||
#: Profiling end timestamp in microseconds.
|
||||
end_time: float
|
||||
|
||||
#: Ids of samples top nodes.
|
||||
samples: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Time intervals between adjacent samples in microseconds. The first delta is relative to the
|
||||
#: profile startTime.
|
||||
time_deltas: typing.Optional[typing.List[int]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodes'] = [i.to_json() for i in self.nodes]
|
||||
json['startTime'] = self.start_time
|
||||
json['endTime'] = self.end_time
|
||||
if self.samples is not None:
|
||||
json['samples'] = [i for i in self.samples]
|
||||
if self.time_deltas is not None:
|
||||
json['timeDeltas'] = [i for i in self.time_deltas]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
nodes=[ProfileNode.from_json(i) for i in json['nodes']],
|
||||
start_time=float(json['startTime']),
|
||||
end_time=float(json['endTime']),
|
||||
samples=[int(i) for i in json['samples']] if 'samples' in json else None,
|
||||
time_deltas=[int(i) for i in json['timeDeltas']] if 'timeDeltas' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PositionTickInfo:
|
||||
'''
|
||||
Specifies a number of samples attributed to a certain source position.
|
||||
'''
|
||||
#: Source line number (1-based).
|
||||
line: int
|
||||
|
||||
#: Number of samples attributed to the source line.
|
||||
ticks: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['line'] = self.line
|
||||
json['ticks'] = self.ticks
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
line=int(json['line']),
|
||||
ticks=int(json['ticks']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageRange:
|
||||
'''
|
||||
Coverage data for a source range.
|
||||
'''
|
||||
#: JavaScript script source offset for the range start.
|
||||
start_offset: int
|
||||
|
||||
#: JavaScript script source offset for the range end.
|
||||
end_offset: int
|
||||
|
||||
#: Collected execution count of the source range.
|
||||
count: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['startOffset'] = self.start_offset
|
||||
json['endOffset'] = self.end_offset
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
start_offset=int(json['startOffset']),
|
||||
end_offset=int(json['endOffset']),
|
||||
count=int(json['count']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionCoverage:
|
||||
'''
|
||||
Coverage data for a JavaScript function.
|
||||
'''
|
||||
#: JavaScript function name.
|
||||
function_name: str
|
||||
|
||||
#: Source ranges inside the function with coverage data.
|
||||
ranges: typing.List[CoverageRange]
|
||||
|
||||
#: Whether coverage data for this function has block granularity.
|
||||
is_block_coverage: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['functionName'] = self.function_name
|
||||
json['ranges'] = [i.to_json() for i in self.ranges]
|
||||
json['isBlockCoverage'] = self.is_block_coverage
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
function_name=str(json['functionName']),
|
||||
ranges=[CoverageRange.from_json(i) for i in json['ranges']],
|
||||
is_block_coverage=bool(json['isBlockCoverage']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScriptCoverage:
|
||||
'''
|
||||
Coverage data for a JavaScript script.
|
||||
'''
|
||||
#: JavaScript script id.
|
||||
script_id: runtime.ScriptId
|
||||
|
||||
#: JavaScript script name or url.
|
||||
url: str
|
||||
|
||||
#: Functions contained in the script that has coverage data.
|
||||
functions: typing.List[FunctionCoverage]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['scriptId'] = self.script_id.to_json()
|
||||
json['url'] = self.url
|
||||
json['functions'] = [i.to_json() for i in self.functions]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
script_id=runtime.ScriptId.from_json(json['scriptId']),
|
||||
url=str(json['url']),
|
||||
functions=[FunctionCoverage.from_json(i) for i in json['functions']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TypeObject:
|
||||
'''
|
||||
Describes a type collected during runtime.
|
||||
'''
|
||||
#: Name of a type collected with type profiling.
|
||||
name: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TypeProfileEntry:
|
||||
'''
|
||||
Source offset and types for a parameter or return value.
|
||||
'''
|
||||
#: Source offset of the parameter or end of function for return values.
|
||||
offset: int
|
||||
|
||||
#: The types for this parameter or return value.
|
||||
types: typing.List[TypeObject]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['offset'] = self.offset
|
||||
json['types'] = [i.to_json() for i in self.types]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
offset=int(json['offset']),
|
||||
types=[TypeObject.from_json(i) for i in json['types']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScriptTypeProfile:
|
||||
'''
|
||||
Type profile data collected during runtime for a JavaScript script.
|
||||
'''
|
||||
#: JavaScript script id.
|
||||
script_id: runtime.ScriptId
|
||||
|
||||
#: JavaScript script name or url.
|
||||
url: str
|
||||
|
||||
#: Type profile entries for parameters and return values of the functions in the script.
|
||||
entries: typing.List[TypeProfileEntry]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['scriptId'] = self.script_id.to_json()
|
||||
json['url'] = self.url
|
||||
json['entries'] = [i.to_json() for i in self.entries]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
script_id=runtime.ScriptId.from_json(json['scriptId']),
|
||||
url=str(json['url']),
|
||||
entries=[TypeProfileEntry.from_json(i) for i in json['entries']],
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_best_effort_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ScriptCoverage]]:
|
||||
'''
|
||||
Collect coverage data for the current isolate. The coverage data may be incomplete due to
|
||||
garbage collection.
|
||||
|
||||
:returns: Coverage data for the current isolate.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.getBestEffortCoverage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [ScriptCoverage.from_json(i) for i in json['result']]
|
||||
|
||||
|
||||
def set_sampling_interval(
|
||||
interval: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.
|
||||
|
||||
:param interval: New sampling interval in microseconds.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['interval'] = interval
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.setSamplingInterval',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.start',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_precise_coverage(
|
||||
call_count: typing.Optional[bool] = None,
|
||||
detailed: typing.Optional[bool] = None,
|
||||
allow_triggered_updates: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code
|
||||
coverage may be incomplete. Enabling prevents running optimized code and resets execution
|
||||
counters.
|
||||
|
||||
:param call_count: *(Optional)* Collect accurate call counts beyond simple 'covered' or 'not covered'.
|
||||
:param detailed: *(Optional)* Collect block-based coverage.
|
||||
:param allow_triggered_updates: *(Optional)* Allow the backend to send updates on its own initiative
|
||||
:returns: Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if call_count is not None:
|
||||
params['callCount'] = call_count
|
||||
if detailed is not None:
|
||||
params['detailed'] = detailed
|
||||
if allow_triggered_updates is not None:
|
||||
params['allowTriggeredUpdates'] = allow_triggered_updates
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.startPreciseCoverage',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['timestamp'])
|
||||
|
||||
|
||||
def start_type_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable type profile.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.startTypeProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Profile]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: Recorded profile.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.stop',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Profile.from_json(json['profile'])
|
||||
|
||||
|
||||
def stop_precise_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable precise code coverage. Disabling releases unnecessary execution count records and allows
|
||||
executing optimized code.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.stopPreciseCoverage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_type_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable type profile. Disabling releases type profile data collected so far.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.stopTypeProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def take_precise_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[ScriptCoverage], float]]:
|
||||
'''
|
||||
Collect coverage data for the current isolate, and resets execution counters. Precise code
|
||||
coverage needs to have started.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **result** - Coverage data for the current isolate.
|
||||
1. **timestamp** - Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.takePreciseCoverage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[ScriptCoverage.from_json(i) for i in json['result']],
|
||||
float(json['timestamp'])
|
||||
)
|
||||
|
||||
|
||||
def take_type_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ScriptTypeProfile]]:
|
||||
'''
|
||||
Collect type profile.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: Type profile for all scripts since startTypeProfile() was turned on.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.takeTypeProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [ScriptTypeProfile.from_json(i) for i in json['result']]
|
||||
|
||||
|
||||
@event_class('Profiler.consoleProfileFinished')
|
||||
@dataclass
|
||||
class ConsoleProfileFinished:
|
||||
id_: str
|
||||
#: Location of console.profileEnd().
|
||||
location: debugger.Location
|
||||
profile: Profile
|
||||
#: Profile title passed as an argument to console.profile().
|
||||
title: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileFinished:
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
location=debugger.Location.from_json(json['location']),
|
||||
profile=Profile.from_json(json['profile']),
|
||||
title=str(json['title']) if 'title' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Profiler.consoleProfileStarted')
|
||||
@dataclass
|
||||
class ConsoleProfileStarted:
|
||||
'''
|
||||
Sent when new profile recording is started using console.profile() call.
|
||||
'''
|
||||
id_: str
|
||||
#: Location of console.profile().
|
||||
location: debugger.Location
|
||||
#: Profile title passed as an argument to console.profile().
|
||||
title: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileStarted:
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
location=debugger.Location.from_json(json['location']),
|
||||
title=str(json['title']) if 'title' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Profiler.preciseCoverageDeltaUpdate')
|
||||
@dataclass
|
||||
class PreciseCoverageDeltaUpdate:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Reports coverage delta since the last poll (either from an event like this, or from
|
||||
``takePreciseCoverage`` for the current isolate. May only be sent if precise code
|
||||
coverage has been started. This event can be trigged by the embedder to, for example,
|
||||
trigger collection of coverage data immediately at a certain point in time.
|
||||
'''
|
||||
#: Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
|
||||
timestamp: float
|
||||
#: Identifier for distinguishing coverage events.
|
||||
occasion: str
|
||||
#: Coverage data for the current isolate.
|
||||
result: typing.List[ScriptCoverage]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PreciseCoverageDeltaUpdate:
|
||||
return cls(
|
||||
timestamp=float(json['timestamp']),
|
||||
occasion=str(json['occasion']),
|
||||
result=[ScriptCoverage.from_json(i) for i in json['result']]
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,48 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Schema
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Domain:
|
||||
'''
|
||||
Description of the protocol domain.
|
||||
'''
|
||||
#: Domain name.
|
||||
name: str
|
||||
|
||||
#: Domain version.
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
def get_domains() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Domain]]:
|
||||
'''
|
||||
Returns supported domains.
|
||||
|
||||
:returns: List of supported domains.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Schema.getDomains',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Domain.from_json(i) for i in json['domains']]
|
||||
@@ -0,0 +1,509 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Security
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
|
||||
|
||||
class CertificateId(int):
|
||||
'''
|
||||
An internal certificate ID value.
|
||||
'''
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> CertificateId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'CertificateId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class MixedContentType(enum.Enum):
|
||||
'''
|
||||
A description of mixed content (HTTP resources on HTTPS pages), as defined by
|
||||
https://www.w3.org/TR/mixed-content/#categories
|
||||
'''
|
||||
BLOCKABLE = "blockable"
|
||||
OPTIONALLY_BLOCKABLE = "optionally-blockable"
|
||||
NONE = "none"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class SecurityState(enum.Enum):
|
||||
'''
|
||||
The security level of a page or resource.
|
||||
'''
|
||||
UNKNOWN = "unknown"
|
||||
NEUTRAL = "neutral"
|
||||
INSECURE = "insecure"
|
||||
SECURE = "secure"
|
||||
INFO = "info"
|
||||
INSECURE_BROKEN = "insecure-broken"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CertificateSecurityState:
|
||||
'''
|
||||
Details about the security state of the page certificate.
|
||||
'''
|
||||
#: Protocol name (e.g. "TLS 1.2" or "QUIC").
|
||||
protocol: str
|
||||
|
||||
#: Key Exchange used by the connection, or the empty string if not applicable.
|
||||
key_exchange: str
|
||||
|
||||
#: Cipher name.
|
||||
cipher: str
|
||||
|
||||
#: Page certificate.
|
||||
certificate: typing.List[str]
|
||||
|
||||
#: Certificate subject name.
|
||||
subject_name: str
|
||||
|
||||
#: Name of the issuing CA.
|
||||
issuer: str
|
||||
|
||||
#: Certificate valid from date.
|
||||
valid_from: network.TimeSinceEpoch
|
||||
|
||||
#: Certificate valid to (expiration) date
|
||||
valid_to: network.TimeSinceEpoch
|
||||
|
||||
#: True if the certificate uses a weak signature aglorithm.
|
||||
certificate_has_weak_signature: bool
|
||||
|
||||
#: True if the certificate has a SHA1 signature in the chain.
|
||||
certificate_has_sha1_signature: bool
|
||||
|
||||
#: True if modern SSL
|
||||
modern_ssl: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL protocol.
|
||||
obsolete_ssl_protocol: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL key exchange.
|
||||
obsolete_ssl_key_exchange: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL cipher.
|
||||
obsolete_ssl_cipher: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL signature.
|
||||
obsolete_ssl_signature: bool
|
||||
|
||||
#: (EC)DH group used by the connection, if applicable.
|
||||
key_exchange_group: typing.Optional[str] = None
|
||||
|
||||
#: TLS MAC. Note that AEAD ciphers do not have separate MACs.
|
||||
mac: typing.Optional[str] = None
|
||||
|
||||
#: The highest priority network error code, if the certificate has an error.
|
||||
certificate_network_error: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['protocol'] = self.protocol
|
||||
json['keyExchange'] = self.key_exchange
|
||||
json['cipher'] = self.cipher
|
||||
json['certificate'] = [i for i in self.certificate]
|
||||
json['subjectName'] = self.subject_name
|
||||
json['issuer'] = self.issuer
|
||||
json['validFrom'] = self.valid_from.to_json()
|
||||
json['validTo'] = self.valid_to.to_json()
|
||||
json['certificateHasWeakSignature'] = self.certificate_has_weak_signature
|
||||
json['certificateHasSha1Signature'] = self.certificate_has_sha1_signature
|
||||
json['modernSSL'] = self.modern_ssl
|
||||
json['obsoleteSslProtocol'] = self.obsolete_ssl_protocol
|
||||
json['obsoleteSslKeyExchange'] = self.obsolete_ssl_key_exchange
|
||||
json['obsoleteSslCipher'] = self.obsolete_ssl_cipher
|
||||
json['obsoleteSslSignature'] = self.obsolete_ssl_signature
|
||||
if self.key_exchange_group is not None:
|
||||
json['keyExchangeGroup'] = self.key_exchange_group
|
||||
if self.mac is not None:
|
||||
json['mac'] = self.mac
|
||||
if self.certificate_network_error is not None:
|
||||
json['certificateNetworkError'] = self.certificate_network_error
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
protocol=str(json['protocol']),
|
||||
key_exchange=str(json['keyExchange']),
|
||||
cipher=str(json['cipher']),
|
||||
certificate=[str(i) for i in json['certificate']],
|
||||
subject_name=str(json['subjectName']),
|
||||
issuer=str(json['issuer']),
|
||||
valid_from=network.TimeSinceEpoch.from_json(json['validFrom']),
|
||||
valid_to=network.TimeSinceEpoch.from_json(json['validTo']),
|
||||
certificate_has_weak_signature=bool(json['certificateHasWeakSignature']),
|
||||
certificate_has_sha1_signature=bool(json['certificateHasSha1Signature']),
|
||||
modern_ssl=bool(json['modernSSL']),
|
||||
obsolete_ssl_protocol=bool(json['obsoleteSslProtocol']),
|
||||
obsolete_ssl_key_exchange=bool(json['obsoleteSslKeyExchange']),
|
||||
obsolete_ssl_cipher=bool(json['obsoleteSslCipher']),
|
||||
obsolete_ssl_signature=bool(json['obsoleteSslSignature']),
|
||||
key_exchange_group=str(json['keyExchangeGroup']) if 'keyExchangeGroup' in json else None,
|
||||
mac=str(json['mac']) if 'mac' in json else None,
|
||||
certificate_network_error=str(json['certificateNetworkError']) if 'certificateNetworkError' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class SafetyTipStatus(enum.Enum):
|
||||
BAD_REPUTATION = "badReputation"
|
||||
LOOKALIKE = "lookalike"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SafetyTipInfo:
|
||||
#: Describes whether the page triggers any safety tips or reputation warnings. Default is unknown.
|
||||
safety_tip_status: SafetyTipStatus
|
||||
|
||||
#: The URL the safety tip suggested ("Did you mean?"). Only filled in for lookalike matches.
|
||||
safe_url: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['safetyTipStatus'] = self.safety_tip_status.to_json()
|
||||
if self.safe_url is not None:
|
||||
json['safeUrl'] = self.safe_url
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
safety_tip_status=SafetyTipStatus.from_json(json['safetyTipStatus']),
|
||||
safe_url=str(json['safeUrl']) if 'safeUrl' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VisibleSecurityState:
|
||||
'''
|
||||
Security state information about the page.
|
||||
'''
|
||||
#: The security level of the page.
|
||||
security_state: SecurityState
|
||||
|
||||
#: Array of security state issues ids.
|
||||
security_state_issue_ids: typing.List[str]
|
||||
|
||||
#: Security state details about the page certificate.
|
||||
certificate_security_state: typing.Optional[CertificateSecurityState] = None
|
||||
|
||||
#: The type of Safety Tip triggered on the page. Note that this field will be set even if the Safety Tip UI was not actually shown.
|
||||
safety_tip_info: typing.Optional[SafetyTipInfo] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['securityState'] = self.security_state.to_json()
|
||||
json['securityStateIssueIds'] = [i for i in self.security_state_issue_ids]
|
||||
if self.certificate_security_state is not None:
|
||||
json['certificateSecurityState'] = self.certificate_security_state.to_json()
|
||||
if self.safety_tip_info is not None:
|
||||
json['safetyTipInfo'] = self.safety_tip_info.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
security_state=SecurityState.from_json(json['securityState']),
|
||||
security_state_issue_ids=[str(i) for i in json['securityStateIssueIds']],
|
||||
certificate_security_state=CertificateSecurityState.from_json(json['certificateSecurityState']) if 'certificateSecurityState' in json else None,
|
||||
safety_tip_info=SafetyTipInfo.from_json(json['safetyTipInfo']) if 'safetyTipInfo' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SecurityStateExplanation:
|
||||
'''
|
||||
An explanation of an factor contributing to the security state.
|
||||
'''
|
||||
#: Security state representing the severity of the factor being explained.
|
||||
security_state: SecurityState
|
||||
|
||||
#: Title describing the type of factor.
|
||||
title: str
|
||||
|
||||
#: Short phrase describing the type of factor.
|
||||
summary: str
|
||||
|
||||
#: Full text explanation of the factor.
|
||||
description: str
|
||||
|
||||
#: The type of mixed content described by the explanation.
|
||||
mixed_content_type: MixedContentType
|
||||
|
||||
#: Page certificate.
|
||||
certificate: typing.List[str]
|
||||
|
||||
#: Recommendations to fix any issues.
|
||||
recommendations: typing.Optional[typing.List[str]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['securityState'] = self.security_state.to_json()
|
||||
json['title'] = self.title
|
||||
json['summary'] = self.summary
|
||||
json['description'] = self.description
|
||||
json['mixedContentType'] = self.mixed_content_type.to_json()
|
||||
json['certificate'] = [i for i in self.certificate]
|
||||
if self.recommendations is not None:
|
||||
json['recommendations'] = [i for i in self.recommendations]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
security_state=SecurityState.from_json(json['securityState']),
|
||||
title=str(json['title']),
|
||||
summary=str(json['summary']),
|
||||
description=str(json['description']),
|
||||
mixed_content_type=MixedContentType.from_json(json['mixedContentType']),
|
||||
certificate=[str(i) for i in json['certificate']],
|
||||
recommendations=[str(i) for i in json['recommendations']] if 'recommendations' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InsecureContentStatus:
|
||||
'''
|
||||
Information about insecure content on the page.
|
||||
'''
|
||||
#: Always false.
|
||||
ran_mixed_content: bool
|
||||
|
||||
#: Always false.
|
||||
displayed_mixed_content: bool
|
||||
|
||||
#: Always false.
|
||||
contained_mixed_form: bool
|
||||
|
||||
#: Always false.
|
||||
ran_content_with_cert_errors: bool
|
||||
|
||||
#: Always false.
|
||||
displayed_content_with_cert_errors: bool
|
||||
|
||||
#: Always set to unknown.
|
||||
ran_insecure_content_style: SecurityState
|
||||
|
||||
#: Always set to unknown.
|
||||
displayed_insecure_content_style: SecurityState
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['ranMixedContent'] = self.ran_mixed_content
|
||||
json['displayedMixedContent'] = self.displayed_mixed_content
|
||||
json['containedMixedForm'] = self.contained_mixed_form
|
||||
json['ranContentWithCertErrors'] = self.ran_content_with_cert_errors
|
||||
json['displayedContentWithCertErrors'] = self.displayed_content_with_cert_errors
|
||||
json['ranInsecureContentStyle'] = self.ran_insecure_content_style.to_json()
|
||||
json['displayedInsecureContentStyle'] = self.displayed_insecure_content_style.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
ran_mixed_content=bool(json['ranMixedContent']),
|
||||
displayed_mixed_content=bool(json['displayedMixedContent']),
|
||||
contained_mixed_form=bool(json['containedMixedForm']),
|
||||
ran_content_with_cert_errors=bool(json['ranContentWithCertErrors']),
|
||||
displayed_content_with_cert_errors=bool(json['displayedContentWithCertErrors']),
|
||||
ran_insecure_content_style=SecurityState.from_json(json['ranInsecureContentStyle']),
|
||||
displayed_insecure_content_style=SecurityState.from_json(json['displayedInsecureContentStyle']),
|
||||
)
|
||||
|
||||
|
||||
class CertificateErrorAction(enum.Enum):
|
||||
'''
|
||||
The action to take when a certificate error occurs. continue will continue processing the
|
||||
request and cancel will cancel the request.
|
||||
'''
|
||||
CONTINUE = "continue"
|
||||
CANCEL = "cancel"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables tracking security state changes.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables tracking security state changes.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_ignore_certificate_errors(
|
||||
ignore: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable/disable whether all certificate errors should be ignored.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param ignore: If true, all certificate errors will be ignored.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['ignore'] = ignore
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.setIgnoreCertificateErrors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def handle_certificate_error(
|
||||
event_id: int,
|
||||
action: CertificateErrorAction
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Handles a certificate error that fired a certificateError event.
|
||||
|
||||
:param event_id: The ID of the event.
|
||||
:param action: The action to take on the certificate error.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventId'] = event_id
|
||||
params['action'] = action.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.handleCertificateError',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_override_certificate_errors(
|
||||
override: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable/disable overriding certificate errors. If enabled, all certificate error events need to
|
||||
be handled by the DevTools client and should be answered with ``handleCertificateError`` commands.
|
||||
|
||||
:param override: If true, certificate errors will be overridden.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['override'] = override
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.setOverrideCertificateErrors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Security.certificateError')
|
||||
@dataclass
|
||||
class CertificateError:
|
||||
'''
|
||||
There is a certificate error. If overriding certificate errors is enabled, then it should be
|
||||
handled with the ``handleCertificateError`` command. Note: this event does not fire if the
|
||||
certificate error has been allowed internally. Only one client per target should override
|
||||
certificate errors at the same time.
|
||||
'''
|
||||
#: The ID of the event.
|
||||
event_id: int
|
||||
#: The type of the error.
|
||||
error_type: str
|
||||
#: The url that was requested.
|
||||
request_url: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> CertificateError:
|
||||
return cls(
|
||||
event_id=int(json['eventId']),
|
||||
error_type=str(json['errorType']),
|
||||
request_url=str(json['requestURL'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Security.visibleSecurityStateChanged')
|
||||
@dataclass
|
||||
class VisibleSecurityStateChanged:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The security state of the page changed.
|
||||
'''
|
||||
#: Security state information about the page.
|
||||
visible_security_state: VisibleSecurityState
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> VisibleSecurityStateChanged:
|
||||
return cls(
|
||||
visible_security_state=VisibleSecurityState.from_json(json['visibleSecurityState'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Security.securityStateChanged')
|
||||
@dataclass
|
||||
class SecurityStateChanged:
|
||||
'''
|
||||
The security state of the page changed. No longer being sent.
|
||||
'''
|
||||
#: Security state.
|
||||
security_state: SecurityState
|
||||
#: True if the page was loaded over cryptographic transport such as HTTPS.
|
||||
scheme_is_cryptographic: bool
|
||||
#: Previously a list of explanations for the security state. Now always
|
||||
#: empty.
|
||||
explanations: typing.List[SecurityStateExplanation]
|
||||
#: Information about insecure content on the page.
|
||||
insecure_content_status: InsecureContentStatus
|
||||
#: Overrides user-visible description of the state. Always omitted.
|
||||
summary: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> SecurityStateChanged:
|
||||
return cls(
|
||||
security_state=SecurityState.from_json(json['securityState']),
|
||||
scheme_is_cryptographic=bool(json['schemeIsCryptographic']),
|
||||
explanations=[SecurityStateExplanation.from_json(i) for i in json['explanations']],
|
||||
insecure_content_status=InsecureContentStatus.from_json(json['insecureContentStatus']),
|
||||
summary=str(json['summary']) if 'summary' in json else None
|
||||
)
|
||||
@@ -0,0 +1,409 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: ServiceWorker (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import target
|
||||
|
||||
|
||||
class RegistrationID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> RegistrationID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'RegistrationID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceWorkerRegistration:
|
||||
'''
|
||||
ServiceWorker registration.
|
||||
'''
|
||||
registration_id: RegistrationID
|
||||
|
||||
scope_url: str
|
||||
|
||||
is_deleted: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['registrationId'] = self.registration_id.to_json()
|
||||
json['scopeURL'] = self.scope_url
|
||||
json['isDeleted'] = self.is_deleted
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
registration_id=RegistrationID.from_json(json['registrationId']),
|
||||
scope_url=str(json['scopeURL']),
|
||||
is_deleted=bool(json['isDeleted']),
|
||||
)
|
||||
|
||||
|
||||
class ServiceWorkerVersionRunningStatus(enum.Enum):
|
||||
STOPPED = "stopped"
|
||||
STARTING = "starting"
|
||||
RUNNING = "running"
|
||||
STOPPING = "stopping"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ServiceWorkerVersionStatus(enum.Enum):
|
||||
NEW = "new"
|
||||
INSTALLING = "installing"
|
||||
INSTALLED = "installed"
|
||||
ACTIVATING = "activating"
|
||||
ACTIVATED = "activated"
|
||||
REDUNDANT = "redundant"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceWorkerVersion:
|
||||
'''
|
||||
ServiceWorker version.
|
||||
'''
|
||||
version_id: str
|
||||
|
||||
registration_id: RegistrationID
|
||||
|
||||
script_url: str
|
||||
|
||||
running_status: ServiceWorkerVersionRunningStatus
|
||||
|
||||
status: ServiceWorkerVersionStatus
|
||||
|
||||
#: The Last-Modified header value of the main script.
|
||||
script_last_modified: typing.Optional[float] = None
|
||||
|
||||
#: The time at which the response headers of the main script were received from the server.
|
||||
#: For cached script it is the last time the cache entry was validated.
|
||||
script_response_time: typing.Optional[float] = None
|
||||
|
||||
controlled_clients: typing.Optional[typing.List[target.TargetID]] = None
|
||||
|
||||
target_id: typing.Optional[target.TargetID] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['versionId'] = self.version_id
|
||||
json['registrationId'] = self.registration_id.to_json()
|
||||
json['scriptURL'] = self.script_url
|
||||
json['runningStatus'] = self.running_status.to_json()
|
||||
json['status'] = self.status.to_json()
|
||||
if self.script_last_modified is not None:
|
||||
json['scriptLastModified'] = self.script_last_modified
|
||||
if self.script_response_time is not None:
|
||||
json['scriptResponseTime'] = self.script_response_time
|
||||
if self.controlled_clients is not None:
|
||||
json['controlledClients'] = [i.to_json() for i in self.controlled_clients]
|
||||
if self.target_id is not None:
|
||||
json['targetId'] = self.target_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
version_id=str(json['versionId']),
|
||||
registration_id=RegistrationID.from_json(json['registrationId']),
|
||||
script_url=str(json['scriptURL']),
|
||||
running_status=ServiceWorkerVersionRunningStatus.from_json(json['runningStatus']),
|
||||
status=ServiceWorkerVersionStatus.from_json(json['status']),
|
||||
script_last_modified=float(json['scriptLastModified']) if 'scriptLastModified' in json else None,
|
||||
script_response_time=float(json['scriptResponseTime']) if 'scriptResponseTime' in json else None,
|
||||
controlled_clients=[target.TargetID.from_json(i) for i in json['controlledClients']] if 'controlledClients' in json else None,
|
||||
target_id=target.TargetID.from_json(json['targetId']) if 'targetId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceWorkerErrorMessage:
|
||||
'''
|
||||
ServiceWorker error message.
|
||||
'''
|
||||
error_message: str
|
||||
|
||||
registration_id: RegistrationID
|
||||
|
||||
version_id: str
|
||||
|
||||
source_url: str
|
||||
|
||||
line_number: int
|
||||
|
||||
column_number: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['errorMessage'] = self.error_message
|
||||
json['registrationId'] = self.registration_id.to_json()
|
||||
json['versionId'] = self.version_id
|
||||
json['sourceURL'] = self.source_url
|
||||
json['lineNumber'] = self.line_number
|
||||
json['columnNumber'] = self.column_number
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
error_message=str(json['errorMessage']),
|
||||
registration_id=RegistrationID.from_json(json['registrationId']),
|
||||
version_id=str(json['versionId']),
|
||||
source_url=str(json['sourceURL']),
|
||||
line_number=int(json['lineNumber']),
|
||||
column_number=int(json['columnNumber']),
|
||||
)
|
||||
|
||||
|
||||
def deliver_push_message(
|
||||
origin: str,
|
||||
registration_id: RegistrationID,
|
||||
data: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param origin:
|
||||
:param registration_id:
|
||||
:param data:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['registrationId'] = registration_id.to_json()
|
||||
params['data'] = data
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.deliverPushMessage',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_sync_event(
|
||||
origin: str,
|
||||
registration_id: RegistrationID,
|
||||
tag: str,
|
||||
last_chance: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param origin:
|
||||
:param registration_id:
|
||||
:param tag:
|
||||
:param last_chance:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['registrationId'] = registration_id.to_json()
|
||||
params['tag'] = tag
|
||||
params['lastChance'] = last_chance
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.dispatchSyncEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_periodic_sync_event(
|
||||
origin: str,
|
||||
registration_id: RegistrationID,
|
||||
tag: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param origin:
|
||||
:param registration_id:
|
||||
:param tag:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['registrationId'] = registration_id.to_json()
|
||||
params['tag'] = tag
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.dispatchPeriodicSyncEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def inspect_worker(
|
||||
version_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param version_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['versionId'] = version_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.inspectWorker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_force_update_on_page_load(
|
||||
force_update_on_page_load: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param force_update_on_page_load:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['forceUpdateOnPageLoad'] = force_update_on_page_load
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.setForceUpdateOnPageLoad',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def skip_waiting(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.skipWaiting',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_worker(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.startWorker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_all_workers() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.stopAllWorkers',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_worker(
|
||||
version_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param version_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['versionId'] = version_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.stopWorker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def unregister(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.unregister',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def update_registration(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.updateRegistration',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('ServiceWorker.workerErrorReported')
|
||||
@dataclass
|
||||
class WorkerErrorReported:
|
||||
error_message: ServiceWorkerErrorMessage
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> WorkerErrorReported:
|
||||
return cls(
|
||||
error_message=ServiceWorkerErrorMessage.from_json(json['errorMessage'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('ServiceWorker.workerRegistrationUpdated')
|
||||
@dataclass
|
||||
class WorkerRegistrationUpdated:
|
||||
registrations: typing.List[ServiceWorkerRegistration]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> WorkerRegistrationUpdated:
|
||||
return cls(
|
||||
registrations=[ServiceWorkerRegistration.from_json(i) for i in json['registrations']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('ServiceWorker.workerVersionUpdated')
|
||||
@dataclass
|
||||
class WorkerVersionUpdated:
|
||||
versions: typing.List[ServiceWorkerVersion]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> WorkerVersionUpdated:
|
||||
return cls(
|
||||
versions=[ServiceWorkerVersion.from_json(i) for i in json['versions']]
|
||||
)
|
||||
@@ -0,0 +1,571 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Storage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import browser
|
||||
from . import network
|
||||
|
||||
|
||||
class StorageType(enum.Enum):
|
||||
'''
|
||||
Enum of possible storage types.
|
||||
'''
|
||||
APPCACHE = "appcache"
|
||||
COOKIES = "cookies"
|
||||
FILE_SYSTEMS = "file_systems"
|
||||
INDEXEDDB = "indexeddb"
|
||||
LOCAL_STORAGE = "local_storage"
|
||||
SHADER_CACHE = "shader_cache"
|
||||
WEBSQL = "websql"
|
||||
SERVICE_WORKERS = "service_workers"
|
||||
CACHE_STORAGE = "cache_storage"
|
||||
INTEREST_GROUPS = "interest_groups"
|
||||
ALL_ = "all"
|
||||
OTHER = "other"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UsageForType:
|
||||
'''
|
||||
Usage for a storage type.
|
||||
'''
|
||||
#: Name of storage type.
|
||||
storage_type: StorageType
|
||||
|
||||
#: Storage usage (bytes).
|
||||
usage: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['storageType'] = self.storage_type.to_json()
|
||||
json['usage'] = self.usage
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
storage_type=StorageType.from_json(json['storageType']),
|
||||
usage=float(json['usage']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrustTokens:
|
||||
'''
|
||||
Pair of issuer origin and number of available (signed, but not used) Trust
|
||||
Tokens from that issuer.
|
||||
'''
|
||||
issuer_origin: str
|
||||
|
||||
count: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['issuerOrigin'] = self.issuer_origin
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
issuer_origin=str(json['issuerOrigin']),
|
||||
count=float(json['count']),
|
||||
)
|
||||
|
||||
|
||||
class InterestGroupAccessType(enum.Enum):
|
||||
'''
|
||||
Enum of interest group access types.
|
||||
'''
|
||||
JOIN = "join"
|
||||
LEAVE = "leave"
|
||||
UPDATE = "update"
|
||||
BID = "bid"
|
||||
WIN = "win"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InterestGroupAd:
|
||||
'''
|
||||
Ad advertising element inside an interest group.
|
||||
'''
|
||||
render_url: str
|
||||
|
||||
metadata: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['renderUrl'] = self.render_url
|
||||
if self.metadata is not None:
|
||||
json['metadata'] = self.metadata
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
render_url=str(json['renderUrl']),
|
||||
metadata=str(json['metadata']) if 'metadata' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InterestGroupDetails:
|
||||
'''
|
||||
The full details of an interest group.
|
||||
'''
|
||||
owner_origin: str
|
||||
|
||||
name: str
|
||||
|
||||
expiration_time: network.TimeSinceEpoch
|
||||
|
||||
joining_origin: str
|
||||
|
||||
trusted_bidding_signals_keys: typing.List[str]
|
||||
|
||||
ads: typing.List[InterestGroupAd]
|
||||
|
||||
ad_components: typing.List[InterestGroupAd]
|
||||
|
||||
bidding_url: typing.Optional[str] = None
|
||||
|
||||
bidding_wasm_helper_url: typing.Optional[str] = None
|
||||
|
||||
update_url: typing.Optional[str] = None
|
||||
|
||||
trusted_bidding_signals_url: typing.Optional[str] = None
|
||||
|
||||
user_bidding_signals: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['ownerOrigin'] = self.owner_origin
|
||||
json['name'] = self.name
|
||||
json['expirationTime'] = self.expiration_time.to_json()
|
||||
json['joiningOrigin'] = self.joining_origin
|
||||
json['trustedBiddingSignalsKeys'] = [i for i in self.trusted_bidding_signals_keys]
|
||||
json['ads'] = [i.to_json() for i in self.ads]
|
||||
json['adComponents'] = [i.to_json() for i in self.ad_components]
|
||||
if self.bidding_url is not None:
|
||||
json['biddingUrl'] = self.bidding_url
|
||||
if self.bidding_wasm_helper_url is not None:
|
||||
json['biddingWasmHelperUrl'] = self.bidding_wasm_helper_url
|
||||
if self.update_url is not None:
|
||||
json['updateUrl'] = self.update_url
|
||||
if self.trusted_bidding_signals_url is not None:
|
||||
json['trustedBiddingSignalsUrl'] = self.trusted_bidding_signals_url
|
||||
if self.user_bidding_signals is not None:
|
||||
json['userBiddingSignals'] = self.user_bidding_signals
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
owner_origin=str(json['ownerOrigin']),
|
||||
name=str(json['name']),
|
||||
expiration_time=network.TimeSinceEpoch.from_json(json['expirationTime']),
|
||||
joining_origin=str(json['joiningOrigin']),
|
||||
trusted_bidding_signals_keys=[str(i) for i in json['trustedBiddingSignalsKeys']],
|
||||
ads=[InterestGroupAd.from_json(i) for i in json['ads']],
|
||||
ad_components=[InterestGroupAd.from_json(i) for i in json['adComponents']],
|
||||
bidding_url=str(json['biddingUrl']) if 'biddingUrl' in json else None,
|
||||
bidding_wasm_helper_url=str(json['biddingWasmHelperUrl']) if 'biddingWasmHelperUrl' in json else None,
|
||||
update_url=str(json['updateUrl']) if 'updateUrl' in json else None,
|
||||
trusted_bidding_signals_url=str(json['trustedBiddingSignalsUrl']) if 'trustedBiddingSignalsUrl' in json else None,
|
||||
user_bidding_signals=str(json['userBiddingSignals']) if 'userBiddingSignals' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def clear_data_for_origin(
|
||||
origin: str,
|
||||
storage_types: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears storage for origin.
|
||||
|
||||
:param origin: Security origin.
|
||||
:param storage_types: Comma separated list of StorageType to clear.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['storageTypes'] = storage_types
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.clearDataForOrigin',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_cookies(
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[network.Cookie]]:
|
||||
'''
|
||||
Returns all browser cookies.
|
||||
|
||||
:param browser_context_id: *(Optional)* Browser context to use when called on the browser endpoint.
|
||||
:returns: Array of cookie objects.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.getCookies',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [network.Cookie.from_json(i) for i in json['cookies']]
|
||||
|
||||
|
||||
def set_cookies(
|
||||
cookies: typing.List[network.CookieParam],
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets given cookies.
|
||||
|
||||
:param cookies: Cookies to be set.
|
||||
:param browser_context_id: *(Optional)* Browser context to use when called on the browser endpoint.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cookies'] = [i.to_json() for i in cookies]
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.setCookies',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_cookies(
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears cookies.
|
||||
|
||||
:param browser_context_id: *(Optional)* Browser context to use when called on the browser endpoint.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.clearCookies',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_usage_and_quota(
|
||||
origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[float, float, bool, typing.List[UsageForType]]]:
|
||||
'''
|
||||
Returns usage and quota in bytes.
|
||||
|
||||
:param origin: Security origin.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **usage** - Storage usage (bytes).
|
||||
1. **quota** - Storage quota (bytes).
|
||||
2. **overrideActive** - Whether or not the origin has an active storage quota override
|
||||
3. **usageBreakdown** - Storage usage per type (bytes).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.getUsageAndQuota',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
float(json['usage']),
|
||||
float(json['quota']),
|
||||
bool(json['overrideActive']),
|
||||
[UsageForType.from_json(i) for i in json['usageBreakdown']]
|
||||
)
|
||||
|
||||
|
||||
def override_quota_for_origin(
|
||||
origin: str,
|
||||
quota_size: typing.Optional[float] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Override quota for the specified origin
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param origin: Security origin.
|
||||
:param quota_size: *(Optional)* The quota size (in bytes) to override the original quota with. If this is called multiple times, the overridden quota will be equal to the quotaSize provided in the final call. If this is called without specifying a quotaSize, the quota will be reset to the default value for the specified origin. If this is called multiple times with different origins, the override will be maintained for each origin until it is disabled (called without a quotaSize).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
if quota_size is not None:
|
||||
params['quotaSize'] = quota_size
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.overrideQuotaForOrigin',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def track_cache_storage_for_origin(
|
||||
origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Registers origin to be notified when an update occurs to its cache storage list.
|
||||
|
||||
:param origin: Security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.trackCacheStorageForOrigin',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def track_indexed_db_for_origin(
|
||||
origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Registers origin to be notified when an update occurs to its IndexedDB.
|
||||
|
||||
:param origin: Security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.trackIndexedDBForOrigin',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def untrack_cache_storage_for_origin(
|
||||
origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Unregisters origin from receiving notifications for cache storage.
|
||||
|
||||
:param origin: Security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.untrackCacheStorageForOrigin',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def untrack_indexed_db_for_origin(
|
||||
origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Unregisters origin from receiving notifications for IndexedDB.
|
||||
|
||||
:param origin: Security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.untrackIndexedDBForOrigin',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_trust_tokens() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[TrustTokens]]:
|
||||
'''
|
||||
Returns the number of stored Trust Tokens per issuer for the
|
||||
current browsing context.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.getTrustTokens',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [TrustTokens.from_json(i) for i in json['tokens']]
|
||||
|
||||
|
||||
def clear_trust_tokens(
|
||||
issuer_origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,bool]:
|
||||
'''
|
||||
Removes all Trust Tokens issued by the provided issuerOrigin.
|
||||
Leaves other stored data, including the issuer's Redemption Records, intact.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param issuer_origin:
|
||||
:returns: True if any tokens were deleted, false otherwise.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['issuerOrigin'] = issuer_origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.clearTrustTokens',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return bool(json['didDeleteTokens'])
|
||||
|
||||
|
||||
def get_interest_group_details(
|
||||
owner_origin: str,
|
||||
name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,InterestGroupDetails]:
|
||||
'''
|
||||
Gets details for a named interest group.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param owner_origin:
|
||||
:param name:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['ownerOrigin'] = owner_origin
|
||||
params['name'] = name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.getInterestGroupDetails',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return InterestGroupDetails.from_json(json['details'])
|
||||
|
||||
|
||||
def set_interest_group_tracking(
|
||||
enable: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables/Disables issuing of interestGroupAccessed events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enable:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enable'] = enable
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Storage.setInterestGroupTracking',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Storage.cacheStorageContentUpdated')
|
||||
@dataclass
|
||||
class CacheStorageContentUpdated:
|
||||
'''
|
||||
A cache's contents have been modified.
|
||||
'''
|
||||
#: Origin to update.
|
||||
origin: str
|
||||
#: Name of cache in origin.
|
||||
cache_name: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> CacheStorageContentUpdated:
|
||||
return cls(
|
||||
origin=str(json['origin']),
|
||||
cache_name=str(json['cacheName'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Storage.cacheStorageListUpdated')
|
||||
@dataclass
|
||||
class CacheStorageListUpdated:
|
||||
'''
|
||||
A cache has been added/deleted.
|
||||
'''
|
||||
#: Origin to update.
|
||||
origin: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> CacheStorageListUpdated:
|
||||
return cls(
|
||||
origin=str(json['origin'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Storage.indexedDBContentUpdated')
|
||||
@dataclass
|
||||
class IndexedDBContentUpdated:
|
||||
'''
|
||||
The origin's IndexedDB object store has been modified.
|
||||
'''
|
||||
#: Origin to update.
|
||||
origin: str
|
||||
#: Database to update.
|
||||
database_name: str
|
||||
#: ObjectStore to update.
|
||||
object_store_name: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> IndexedDBContentUpdated:
|
||||
return cls(
|
||||
origin=str(json['origin']),
|
||||
database_name=str(json['databaseName']),
|
||||
object_store_name=str(json['objectStoreName'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Storage.indexedDBListUpdated')
|
||||
@dataclass
|
||||
class IndexedDBListUpdated:
|
||||
'''
|
||||
The origin's IndexedDB database list has been modified.
|
||||
'''
|
||||
#: Origin to update.
|
||||
origin: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> IndexedDBListUpdated:
|
||||
return cls(
|
||||
origin=str(json['origin'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Storage.interestGroupAccessed')
|
||||
@dataclass
|
||||
class InterestGroupAccessed:
|
||||
'''
|
||||
One of the interest groups was accessed by the associated page.
|
||||
'''
|
||||
access_time: network.TimeSinceEpoch
|
||||
type_: InterestGroupAccessType
|
||||
owner_origin: str
|
||||
name: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> InterestGroupAccessed:
|
||||
return cls(
|
||||
access_time=network.TimeSinceEpoch.from_json(json['accessTime']),
|
||||
type_=InterestGroupAccessType.from_json(json['type']),
|
||||
owner_origin=str(json['ownerOrigin']),
|
||||
name=str(json['name'])
|
||||
)
|
||||
@@ -0,0 +1,347 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: SystemInfo (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class GPUDevice:
|
||||
'''
|
||||
Describes a single graphics processor (GPU).
|
||||
'''
|
||||
#: PCI ID of the GPU vendor, if available; 0 otherwise.
|
||||
vendor_id: float
|
||||
|
||||
#: PCI ID of the GPU device, if available; 0 otherwise.
|
||||
device_id: float
|
||||
|
||||
#: String description of the GPU vendor, if the PCI ID is not available.
|
||||
vendor_string: str
|
||||
|
||||
#: String description of the GPU device, if the PCI ID is not available.
|
||||
device_string: str
|
||||
|
||||
#: String description of the GPU driver vendor.
|
||||
driver_vendor: str
|
||||
|
||||
#: String description of the GPU driver version.
|
||||
driver_version: str
|
||||
|
||||
#: Sub sys ID of the GPU, only available on Windows.
|
||||
sub_sys_id: typing.Optional[float] = None
|
||||
|
||||
#: Revision of the GPU, only available on Windows.
|
||||
revision: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['vendorId'] = self.vendor_id
|
||||
json['deviceId'] = self.device_id
|
||||
json['vendorString'] = self.vendor_string
|
||||
json['deviceString'] = self.device_string
|
||||
json['driverVendor'] = self.driver_vendor
|
||||
json['driverVersion'] = self.driver_version
|
||||
if self.sub_sys_id is not None:
|
||||
json['subSysId'] = self.sub_sys_id
|
||||
if self.revision is not None:
|
||||
json['revision'] = self.revision
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
vendor_id=float(json['vendorId']),
|
||||
device_id=float(json['deviceId']),
|
||||
vendor_string=str(json['vendorString']),
|
||||
device_string=str(json['deviceString']),
|
||||
driver_vendor=str(json['driverVendor']),
|
||||
driver_version=str(json['driverVersion']),
|
||||
sub_sys_id=float(json['subSysId']) if 'subSysId' in json else None,
|
||||
revision=float(json['revision']) if 'revision' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Size:
|
||||
'''
|
||||
Describes the width and height dimensions of an entity.
|
||||
'''
|
||||
#: Width in pixels.
|
||||
width: int
|
||||
|
||||
#: Height in pixels.
|
||||
height: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['width'] = self.width
|
||||
json['height'] = self.height
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
width=int(json['width']),
|
||||
height=int(json['height']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VideoDecodeAcceleratorCapability:
|
||||
'''
|
||||
Describes a supported video decoding profile with its associated minimum and
|
||||
maximum resolutions.
|
||||
'''
|
||||
#: Video codec profile that is supported, e.g. VP9 Profile 2.
|
||||
profile: str
|
||||
|
||||
#: Maximum video dimensions in pixels supported for this ``profile``.
|
||||
max_resolution: Size
|
||||
|
||||
#: Minimum video dimensions in pixels supported for this ``profile``.
|
||||
min_resolution: Size
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['profile'] = self.profile
|
||||
json['maxResolution'] = self.max_resolution.to_json()
|
||||
json['minResolution'] = self.min_resolution.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
profile=str(json['profile']),
|
||||
max_resolution=Size.from_json(json['maxResolution']),
|
||||
min_resolution=Size.from_json(json['minResolution']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VideoEncodeAcceleratorCapability:
|
||||
'''
|
||||
Describes a supported video encoding profile with its associated maximum
|
||||
resolution and maximum framerate.
|
||||
'''
|
||||
#: Video codec profile that is supported, e.g H264 Main.
|
||||
profile: str
|
||||
|
||||
#: Maximum video dimensions in pixels supported for this ``profile``.
|
||||
max_resolution: Size
|
||||
|
||||
#: Maximum encoding framerate in frames per second supported for this
|
||||
#: ``profile``, as fraction's numerator and denominator, e.g. 24/1 fps,
|
||||
#: 24000/1001 fps, etc.
|
||||
max_framerate_numerator: int
|
||||
|
||||
max_framerate_denominator: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['profile'] = self.profile
|
||||
json['maxResolution'] = self.max_resolution.to_json()
|
||||
json['maxFramerateNumerator'] = self.max_framerate_numerator
|
||||
json['maxFramerateDenominator'] = self.max_framerate_denominator
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
profile=str(json['profile']),
|
||||
max_resolution=Size.from_json(json['maxResolution']),
|
||||
max_framerate_numerator=int(json['maxFramerateNumerator']),
|
||||
max_framerate_denominator=int(json['maxFramerateDenominator']),
|
||||
)
|
||||
|
||||
|
||||
class SubsamplingFormat(enum.Enum):
|
||||
'''
|
||||
YUV subsampling type of the pixels of a given image.
|
||||
'''
|
||||
YUV420 = "yuv420"
|
||||
YUV422 = "yuv422"
|
||||
YUV444 = "yuv444"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ImageType(enum.Enum):
|
||||
'''
|
||||
Image format of a given image.
|
||||
'''
|
||||
JPEG = "jpeg"
|
||||
WEBP = "webp"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageDecodeAcceleratorCapability:
|
||||
'''
|
||||
Describes a supported image decoding profile with its associated minimum and
|
||||
maximum resolutions and subsampling.
|
||||
'''
|
||||
#: Image coded, e.g. Jpeg.
|
||||
image_type: ImageType
|
||||
|
||||
#: Maximum supported dimensions of the image in pixels.
|
||||
max_dimensions: Size
|
||||
|
||||
#: Minimum supported dimensions of the image in pixels.
|
||||
min_dimensions: Size
|
||||
|
||||
#: Optional array of supported subsampling formats, e.g. 4:2:0, if known.
|
||||
subsamplings: typing.List[SubsamplingFormat]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['imageType'] = self.image_type.to_json()
|
||||
json['maxDimensions'] = self.max_dimensions.to_json()
|
||||
json['minDimensions'] = self.min_dimensions.to_json()
|
||||
json['subsamplings'] = [i.to_json() for i in self.subsamplings]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
image_type=ImageType.from_json(json['imageType']),
|
||||
max_dimensions=Size.from_json(json['maxDimensions']),
|
||||
min_dimensions=Size.from_json(json['minDimensions']),
|
||||
subsamplings=[SubsamplingFormat.from_json(i) for i in json['subsamplings']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GPUInfo:
|
||||
'''
|
||||
Provides information about the GPU(s) on the system.
|
||||
'''
|
||||
#: The graphics devices on the system. Element 0 is the primary GPU.
|
||||
devices: typing.List[GPUDevice]
|
||||
|
||||
#: An optional array of GPU driver bug workarounds.
|
||||
driver_bug_workarounds: typing.List[str]
|
||||
|
||||
#: Supported accelerated video decoding capabilities.
|
||||
video_decoding: typing.List[VideoDecodeAcceleratorCapability]
|
||||
|
||||
#: Supported accelerated video encoding capabilities.
|
||||
video_encoding: typing.List[VideoEncodeAcceleratorCapability]
|
||||
|
||||
#: Supported accelerated image decoding capabilities.
|
||||
image_decoding: typing.List[ImageDecodeAcceleratorCapability]
|
||||
|
||||
#: An optional dictionary of additional GPU related attributes.
|
||||
aux_attributes: typing.Optional[dict] = None
|
||||
|
||||
#: An optional dictionary of graphics features and their status.
|
||||
feature_status: typing.Optional[dict] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['devices'] = [i.to_json() for i in self.devices]
|
||||
json['driverBugWorkarounds'] = [i for i in self.driver_bug_workarounds]
|
||||
json['videoDecoding'] = [i.to_json() for i in self.video_decoding]
|
||||
json['videoEncoding'] = [i.to_json() for i in self.video_encoding]
|
||||
json['imageDecoding'] = [i.to_json() for i in self.image_decoding]
|
||||
if self.aux_attributes is not None:
|
||||
json['auxAttributes'] = self.aux_attributes
|
||||
if self.feature_status is not None:
|
||||
json['featureStatus'] = self.feature_status
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
devices=[GPUDevice.from_json(i) for i in json['devices']],
|
||||
driver_bug_workarounds=[str(i) for i in json['driverBugWorkarounds']],
|
||||
video_decoding=[VideoDecodeAcceleratorCapability.from_json(i) for i in json['videoDecoding']],
|
||||
video_encoding=[VideoEncodeAcceleratorCapability.from_json(i) for i in json['videoEncoding']],
|
||||
image_decoding=[ImageDecodeAcceleratorCapability.from_json(i) for i in json['imageDecoding']],
|
||||
aux_attributes=dict(json['auxAttributes']) if 'auxAttributes' in json else None,
|
||||
feature_status=dict(json['featureStatus']) if 'featureStatus' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessInfo:
|
||||
'''
|
||||
Represents process info.
|
||||
'''
|
||||
#: Specifies process type.
|
||||
type_: str
|
||||
|
||||
#: Specifies process id.
|
||||
id_: int
|
||||
|
||||
#: Specifies cumulative CPU usage in seconds across all threads of the
|
||||
#: process since the process start.
|
||||
cpu_time: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['id'] = self.id_
|
||||
json['cpuTime'] = self.cpu_time
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
id_=int(json['id']),
|
||||
cpu_time=float(json['cpuTime']),
|
||||
)
|
||||
|
||||
|
||||
def get_info() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[GPUInfo, str, str, str]]:
|
||||
'''
|
||||
Returns information about the system.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **gpu** - Information about the GPUs on the system.
|
||||
1. **modelName** - A platform-dependent description of the model of the machine. On Mac OS, this is, for example, 'MacBookPro'. Will be the empty string if not supported.
|
||||
2. **modelVersion** - A platform-dependent description of the version of the machine. On Mac OS, this is, for example, '10.1'. Will be the empty string if not supported.
|
||||
3. **commandLine** - The command line string used to launch the browser. Will be the empty string if not supported.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'SystemInfo.getInfo',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
GPUInfo.from_json(json['gpu']),
|
||||
str(json['modelName']),
|
||||
str(json['modelVersion']),
|
||||
str(json['commandLine'])
|
||||
)
|
||||
|
||||
|
||||
def get_process_info() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ProcessInfo]]:
|
||||
'''
|
||||
Returns information about all running processes.
|
||||
|
||||
:returns: An array of process info blocks.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'SystemInfo.getProcessInfo',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [ProcessInfo.from_json(i) for i in json['processInfo']]
|
||||
@@ -0,0 +1,643 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Target
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import browser
|
||||
from . import page
|
||||
|
||||
|
||||
class TargetID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> TargetID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TargetID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class SessionID(str):
|
||||
'''
|
||||
Unique identifier of attached debugging session.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> SessionID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'SessionID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class TargetInfo:
|
||||
target_id: TargetID
|
||||
|
||||
type_: str
|
||||
|
||||
title: str
|
||||
|
||||
url: str
|
||||
|
||||
#: Whether the target has an attached client.
|
||||
attached: bool
|
||||
|
||||
#: Whether the target has access to the originating window.
|
||||
can_access_opener: bool
|
||||
|
||||
#: Opener target Id
|
||||
opener_id: typing.Optional[TargetID] = None
|
||||
|
||||
#: Frame id of originating window (is only set if target has an opener).
|
||||
opener_frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['targetId'] = self.target_id.to_json()
|
||||
json['type'] = self.type_
|
||||
json['title'] = self.title
|
||||
json['url'] = self.url
|
||||
json['attached'] = self.attached
|
||||
json['canAccessOpener'] = self.can_access_opener
|
||||
if self.opener_id is not None:
|
||||
json['openerId'] = self.opener_id.to_json()
|
||||
if self.opener_frame_id is not None:
|
||||
json['openerFrameId'] = self.opener_frame_id.to_json()
|
||||
if self.browser_context_id is not None:
|
||||
json['browserContextId'] = self.browser_context_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
target_id=TargetID.from_json(json['targetId']),
|
||||
type_=str(json['type']),
|
||||
title=str(json['title']),
|
||||
url=str(json['url']),
|
||||
attached=bool(json['attached']),
|
||||
can_access_opener=bool(json['canAccessOpener']),
|
||||
opener_id=TargetID.from_json(json['openerId']) if 'openerId' in json else None,
|
||||
opener_frame_id=page.FrameId.from_json(json['openerFrameId']) if 'openerFrameId' in json else None,
|
||||
browser_context_id=browser.BrowserContextID.from_json(json['browserContextId']) if 'browserContextId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemoteLocation:
|
||||
host: str
|
||||
|
||||
port: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['host'] = self.host
|
||||
json['port'] = self.port
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
host=str(json['host']),
|
||||
port=int(json['port']),
|
||||
)
|
||||
|
||||
|
||||
def activate_target(
|
||||
target_id: TargetID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Activates (focuses) the target.
|
||||
|
||||
:param target_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.activateTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def attach_to_target(
|
||||
target_id: TargetID,
|
||||
flatten: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SessionID]:
|
||||
'''
|
||||
Attaches to the target with given id.
|
||||
|
||||
:param target_id:
|
||||
:param flatten: *(Optional)* Enables "flat" access to the session via specifying sessionId attribute in the commands. We plan to make this the default, deprecate non-flattened mode, and eventually retire it. See crbug.com/991325.
|
||||
:returns: Id assigned to the session.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
if flatten is not None:
|
||||
params['flatten'] = flatten
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.attachToTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SessionID.from_json(json['sessionId'])
|
||||
|
||||
|
||||
def attach_to_browser_target() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SessionID]:
|
||||
'''
|
||||
Attaches to the browser target, only uses flat sessionId mode.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: Id assigned to the session.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.attachToBrowserTarget',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SessionID.from_json(json['sessionId'])
|
||||
|
||||
|
||||
def close_target(
|
||||
target_id: TargetID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,bool]:
|
||||
'''
|
||||
Closes the target. If the target is a page that gets closed too.
|
||||
|
||||
:param target_id:
|
||||
:returns: Always set to true. If an error occurs, the response indicates protocol error.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.closeTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return bool(json['success'])
|
||||
|
||||
|
||||
def expose_dev_tools_protocol(
|
||||
target_id: TargetID,
|
||||
binding_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Inject object to the target's main frame that provides a communication
|
||||
channel with browser target.
|
||||
|
||||
Injected object will be available as ``window[bindingName]``.
|
||||
|
||||
The object has the follwing API:
|
||||
- ``binding.send(json)`` - a method to send messages over the remote debugging protocol
|
||||
- ``binding.onmessage = json => handleMessage(json)`` - a callback that will be called for the protocol notifications and command responses.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id:
|
||||
:param binding_name: *(Optional)* Binding name, 'cdp' if not specified.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
if binding_name is not None:
|
||||
params['bindingName'] = binding_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.exposeDevToolsProtocol',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def create_browser_context(
|
||||
dispose_on_detach: typing.Optional[bool] = None,
|
||||
proxy_server: typing.Optional[str] = None,
|
||||
proxy_bypass_list: typing.Optional[str] = None,
|
||||
origins_with_universal_network_access: typing.Optional[typing.List[str]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,browser.BrowserContextID]:
|
||||
'''
|
||||
Creates a new empty BrowserContext. Similar to an incognito profile but you can have more than
|
||||
one.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param dispose_on_detach: *(Optional)* If specified, disposes this context when debugging session disconnects.
|
||||
:param proxy_server: *(Optional)* Proxy server, similar to the one passed to --proxy-server
|
||||
:param proxy_bypass_list: *(Optional)* Proxy bypass list, similar to the one passed to --proxy-bypass-list
|
||||
:param origins_with_universal_network_access: *(Optional)* An optional list of origins to grant unlimited cross-origin access to. Parts of the URL other than those constituting origin are ignored.
|
||||
:returns: The id of the context created.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if dispose_on_detach is not None:
|
||||
params['disposeOnDetach'] = dispose_on_detach
|
||||
if proxy_server is not None:
|
||||
params['proxyServer'] = proxy_server
|
||||
if proxy_bypass_list is not None:
|
||||
params['proxyBypassList'] = proxy_bypass_list
|
||||
if origins_with_universal_network_access is not None:
|
||||
params['originsWithUniversalNetworkAccess'] = [i for i in origins_with_universal_network_access]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.createBrowserContext',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return browser.BrowserContextID.from_json(json['browserContextId'])
|
||||
|
||||
|
||||
def get_browser_contexts() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[browser.BrowserContextID]]:
|
||||
'''
|
||||
Returns all browser contexts created with ``Target.createBrowserContext`` method.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: An array of browser context ids.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.getBrowserContexts',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [browser.BrowserContextID.from_json(i) for i in json['browserContextIds']]
|
||||
|
||||
|
||||
def create_target(
|
||||
url: str,
|
||||
width: typing.Optional[int] = None,
|
||||
height: typing.Optional[int] = None,
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None,
|
||||
enable_begin_frame_control: typing.Optional[bool] = None,
|
||||
new_window: typing.Optional[bool] = None,
|
||||
background: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,TargetID]:
|
||||
'''
|
||||
Creates a new page.
|
||||
|
||||
:param url: The initial URL the page will be navigated to. An empty string indicates about:blank.
|
||||
:param width: *(Optional)* Frame width in DIP (headless chrome only).
|
||||
:param height: *(Optional)* Frame height in DIP (headless chrome only).
|
||||
:param browser_context_id: **(EXPERIMENTAL)** *(Optional)* The browser context to create the page in.
|
||||
:param enable_begin_frame_control: **(EXPERIMENTAL)** *(Optional)* Whether BeginFrames for this target will be controlled via DevTools (headless chrome only, not supported on MacOS yet, false by default).
|
||||
:param new_window: *(Optional)* Whether to create a new Window or Tab (chrome-only, false by default).
|
||||
:param background: *(Optional)* Whether to create the target in background or foreground (chrome-only, false by default).
|
||||
:returns: The id of the page opened.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
if width is not None:
|
||||
params['width'] = width
|
||||
if height is not None:
|
||||
params['height'] = height
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
if enable_begin_frame_control is not None:
|
||||
params['enableBeginFrameControl'] = enable_begin_frame_control
|
||||
if new_window is not None:
|
||||
params['newWindow'] = new_window
|
||||
if background is not None:
|
||||
params['background'] = background
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.createTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return TargetID.from_json(json['targetId'])
|
||||
|
||||
|
||||
def detach_from_target(
|
||||
session_id: typing.Optional[SessionID] = None,
|
||||
target_id: typing.Optional[TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Detaches session with given id.
|
||||
|
||||
:param session_id: *(Optional)* Session to detach.
|
||||
:param target_id: *(Optional)* Deprecated.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if session_id is not None:
|
||||
params['sessionId'] = session_id.to_json()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.detachFromTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispose_browser_context(
|
||||
browser_context_id: browser.BrowserContextID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a BrowserContext. All the belonging pages will be closed without calling their
|
||||
beforeunload hooks.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param browser_context_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.disposeBrowserContext',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_target_info(
|
||||
target_id: typing.Optional[TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,TargetInfo]:
|
||||
'''
|
||||
Returns information about a target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id: *(Optional)*
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.getTargetInfo',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return TargetInfo.from_json(json['targetInfo'])
|
||||
|
||||
|
||||
def get_targets() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[TargetInfo]]:
|
||||
'''
|
||||
Retrieves a list of available targets.
|
||||
|
||||
:returns: The list of targets.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.getTargets',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [TargetInfo.from_json(i) for i in json['targetInfos']]
|
||||
|
||||
|
||||
def send_message_to_target(
|
||||
message: str,
|
||||
session_id: typing.Optional[SessionID] = None,
|
||||
target_id: typing.Optional[TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sends protocol message over session with given id.
|
||||
Consider using flat mode instead; see commands attachToTarget, setAutoAttach,
|
||||
and crbug.com/991325.
|
||||
|
||||
:param message:
|
||||
:param session_id: *(Optional)* Identifier of the session.
|
||||
:param target_id: *(Optional)* Deprecated.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['message'] = message
|
||||
if session_id is not None:
|
||||
params['sessionId'] = session_id.to_json()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.sendMessageToTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_auto_attach(
|
||||
auto_attach: bool,
|
||||
wait_for_debugger_on_start: bool,
|
||||
flatten: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Controls whether to automatically attach to new targets which are considered to be related to
|
||||
this one. When turned on, attaches to all existing related targets as well. When turned off,
|
||||
automatically detaches from all currently attached targets.
|
||||
This also clears all targets added by ``autoAttachRelated`` from the list of targets to watch
|
||||
for creation of related targets.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param auto_attach: Whether to auto-attach to related targets.
|
||||
:param wait_for_debugger_on_start: Whether to pause new targets when attaching to them. Use ```Runtime.runIfWaitingForDebugger``` to run paused targets.
|
||||
:param flatten: *(Optional)* Enables "flat" access to the session via specifying sessionId attribute in the commands. We plan to make this the default, deprecate non-flattened mode, and eventually retire it. See crbug.com/991325.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['autoAttach'] = auto_attach
|
||||
params['waitForDebuggerOnStart'] = wait_for_debugger_on_start
|
||||
if flatten is not None:
|
||||
params['flatten'] = flatten
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.setAutoAttach',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def auto_attach_related(
|
||||
target_id: TargetID,
|
||||
wait_for_debugger_on_start: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Adds the specified target to the list of targets that will be monitored for any related target
|
||||
creation (such as child frames, child workers and new versions of service worker) and reported
|
||||
through ``attachedToTarget``. The specified target is also auto-attached.
|
||||
This cancels the effect of any previous ``setAutoAttach`` and is also cancelled by subsequent
|
||||
``setAutoAttach``. Only available at the Browser target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id:
|
||||
:param wait_for_debugger_on_start: Whether to pause new targets when attaching to them. Use ```Runtime.runIfWaitingForDebugger``` to run paused targets.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
params['waitForDebuggerOnStart'] = wait_for_debugger_on_start
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.autoAttachRelated',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_discover_targets(
|
||||
discover: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Controls whether to discover available targets and notify via
|
||||
``targetCreated/targetInfoChanged/targetDestroyed`` events.
|
||||
|
||||
:param discover: Whether to discover available targets.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['discover'] = discover
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.setDiscoverTargets',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_remote_locations(
|
||||
locations: typing.List[RemoteLocation]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables target discovery for the specified locations, when ``setDiscoverTargets`` was set to
|
||||
``true``.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param locations: List of remote locations.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['locations'] = [i.to_json() for i in locations]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.setRemoteLocations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Target.attachedToTarget')
|
||||
@dataclass
|
||||
class AttachedToTarget:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Issued when attached to target because of auto-attach or ``attachToTarget`` command.
|
||||
'''
|
||||
#: Identifier assigned to the session used to send/receive messages.
|
||||
session_id: SessionID
|
||||
target_info: TargetInfo
|
||||
waiting_for_debugger: bool
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AttachedToTarget:
|
||||
return cls(
|
||||
session_id=SessionID.from_json(json['sessionId']),
|
||||
target_info=TargetInfo.from_json(json['targetInfo']),
|
||||
waiting_for_debugger=bool(json['waitingForDebugger'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.detachedFromTarget')
|
||||
@dataclass
|
||||
class DetachedFromTarget:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Issued when detached from target for any reason (including ``detachFromTarget`` command). Can be
|
||||
issued multiple times per target if multiple sessions have been attached to it.
|
||||
'''
|
||||
#: Detached session identifier.
|
||||
session_id: SessionID
|
||||
#: Deprecated.
|
||||
target_id: typing.Optional[TargetID]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DetachedFromTarget:
|
||||
return cls(
|
||||
session_id=SessionID.from_json(json['sessionId']),
|
||||
target_id=TargetID.from_json(json['targetId']) if 'targetId' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.receivedMessageFromTarget')
|
||||
@dataclass
|
||||
class ReceivedMessageFromTarget:
|
||||
'''
|
||||
Notifies about a new protocol message received from the session (as reported in
|
||||
``attachedToTarget`` event).
|
||||
'''
|
||||
#: Identifier of a session which sends a message.
|
||||
session_id: SessionID
|
||||
message: str
|
||||
#: Deprecated.
|
||||
target_id: typing.Optional[TargetID]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ReceivedMessageFromTarget:
|
||||
return cls(
|
||||
session_id=SessionID.from_json(json['sessionId']),
|
||||
message=str(json['message']),
|
||||
target_id=TargetID.from_json(json['targetId']) if 'targetId' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetCreated')
|
||||
@dataclass
|
||||
class TargetCreated:
|
||||
'''
|
||||
Issued when a possible inspection target is created.
|
||||
'''
|
||||
target_info: TargetInfo
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetCreated:
|
||||
return cls(
|
||||
target_info=TargetInfo.from_json(json['targetInfo'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetDestroyed')
|
||||
@dataclass
|
||||
class TargetDestroyed:
|
||||
'''
|
||||
Issued when a target is destroyed.
|
||||
'''
|
||||
target_id: TargetID
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetDestroyed:
|
||||
return cls(
|
||||
target_id=TargetID.from_json(json['targetId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetCrashed')
|
||||
@dataclass
|
||||
class TargetCrashed:
|
||||
'''
|
||||
Issued when a target has crashed.
|
||||
'''
|
||||
target_id: TargetID
|
||||
#: Termination status type.
|
||||
status: str
|
||||
#: Termination error code.
|
||||
error_code: int
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetCrashed:
|
||||
return cls(
|
||||
target_id=TargetID.from_json(json['targetId']),
|
||||
status=str(json['status']),
|
||||
error_code=int(json['errorCode'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetInfoChanged')
|
||||
@dataclass
|
||||
class TargetInfoChanged:
|
||||
'''
|
||||
Issued when some information about a target has changed. This only happens between
|
||||
``targetCreated`` and ``targetDestroyed``.
|
||||
'''
|
||||
target_info: TargetInfo
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetInfoChanged:
|
||||
return cls(
|
||||
target_info=TargetInfo.from_json(json['targetInfo'])
|
||||
)
|
||||
@@ -0,0 +1,63 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Tethering (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def bind(
|
||||
port: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Request browser port binding.
|
||||
|
||||
:param port: Port number to bind.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['port'] = port
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tethering.bind',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def unbind(
|
||||
port: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Request browser port unbinding.
|
||||
|
||||
:param port: Port number to unbind.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['port'] = port
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tethering.unbind',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Tethering.accepted')
|
||||
@dataclass
|
||||
class Accepted:
|
||||
'''
|
||||
Informs that port was successfully bound and got a specified connection id.
|
||||
'''
|
||||
#: Port number that was successfully bound.
|
||||
port: int
|
||||
#: Connection id to be used.
|
||||
connection_id: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> Accepted:
|
||||
return cls(
|
||||
port=int(json['port']),
|
||||
connection_id=str(json['connectionId'])
|
||||
)
|
||||
@@ -0,0 +1,340 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Tracing (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import io
|
||||
|
||||
|
||||
class MemoryDumpConfig(dict):
|
||||
'''
|
||||
Configuration for memory dump. Used only when "memory-infra" category is enabled.
|
||||
'''
|
||||
def to_json(self) -> dict:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: dict) -> MemoryDumpConfig:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'MemoryDumpConfig({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraceConfig:
|
||||
#: Controls how the trace buffer stores data.
|
||||
record_mode: typing.Optional[str] = None
|
||||
|
||||
#: Turns on JavaScript stack sampling.
|
||||
enable_sampling: typing.Optional[bool] = None
|
||||
|
||||
#: Turns on system tracing.
|
||||
enable_systrace: typing.Optional[bool] = None
|
||||
|
||||
#: Turns on argument filter.
|
||||
enable_argument_filter: typing.Optional[bool] = None
|
||||
|
||||
#: Included category filters.
|
||||
included_categories: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Excluded category filters.
|
||||
excluded_categories: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Configuration to synthesize the delays in tracing.
|
||||
synthetic_delays: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Configuration for memory dump triggers. Used only when "memory-infra" category is enabled.
|
||||
memory_dump_config: typing.Optional[MemoryDumpConfig] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.record_mode is not None:
|
||||
json['recordMode'] = self.record_mode
|
||||
if self.enable_sampling is not None:
|
||||
json['enableSampling'] = self.enable_sampling
|
||||
if self.enable_systrace is not None:
|
||||
json['enableSystrace'] = self.enable_systrace
|
||||
if self.enable_argument_filter is not None:
|
||||
json['enableArgumentFilter'] = self.enable_argument_filter
|
||||
if self.included_categories is not None:
|
||||
json['includedCategories'] = [i for i in self.included_categories]
|
||||
if self.excluded_categories is not None:
|
||||
json['excludedCategories'] = [i for i in self.excluded_categories]
|
||||
if self.synthetic_delays is not None:
|
||||
json['syntheticDelays'] = [i for i in self.synthetic_delays]
|
||||
if self.memory_dump_config is not None:
|
||||
json['memoryDumpConfig'] = self.memory_dump_config.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
record_mode=str(json['recordMode']) if 'recordMode' in json else None,
|
||||
enable_sampling=bool(json['enableSampling']) if 'enableSampling' in json else None,
|
||||
enable_systrace=bool(json['enableSystrace']) if 'enableSystrace' in json else None,
|
||||
enable_argument_filter=bool(json['enableArgumentFilter']) if 'enableArgumentFilter' in json else None,
|
||||
included_categories=[str(i) for i in json['includedCategories']] if 'includedCategories' in json else None,
|
||||
excluded_categories=[str(i) for i in json['excludedCategories']] if 'excludedCategories' in json else None,
|
||||
synthetic_delays=[str(i) for i in json['syntheticDelays']] if 'syntheticDelays' in json else None,
|
||||
memory_dump_config=MemoryDumpConfig.from_json(json['memoryDumpConfig']) if 'memoryDumpConfig' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class StreamFormat(enum.Enum):
|
||||
'''
|
||||
Data format of a trace. Can be either the legacy JSON format or the
|
||||
protocol buffer format. Note that the JSON format will be deprecated soon.
|
||||
'''
|
||||
JSON = "json"
|
||||
PROTO = "proto"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class StreamCompression(enum.Enum):
|
||||
'''
|
||||
Compression type to use for traces returned via streams.
|
||||
'''
|
||||
NONE = "none"
|
||||
GZIP = "gzip"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class MemoryDumpLevelOfDetail(enum.Enum):
|
||||
'''
|
||||
Details exposed when memory request explicitly declared.
|
||||
Keep consistent with memory_dump_request_args.h and
|
||||
memory_instrumentation.mojom
|
||||
'''
|
||||
BACKGROUND = "background"
|
||||
LIGHT = "light"
|
||||
DETAILED = "detailed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class TracingBackend(enum.Enum):
|
||||
'''
|
||||
Backend type to use for tracing. ``chrome`` uses the Chrome-integrated
|
||||
tracing service and is supported on all platforms. ``system`` is only
|
||||
supported on Chrome OS and uses the Perfetto system tracing service.
|
||||
``auto`` chooses ``system`` when the perfettoConfig provided to Tracing.start
|
||||
specifies at least one non-Chrome data source; otherwise uses ``chrome``.
|
||||
'''
|
||||
AUTO = "auto"
|
||||
CHROME = "chrome"
|
||||
SYSTEM = "system"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def end() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stop trace events collection.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.end',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_categories() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Gets supported tracing categories.
|
||||
|
||||
:returns: A list of supported tracing categories.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.getCategories',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['categories']]
|
||||
|
||||
|
||||
def record_clock_sync_marker(
|
||||
sync_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Record a clock sync marker in the trace.
|
||||
|
||||
:param sync_id: The ID of this clock sync marker
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['syncId'] = sync_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.recordClockSyncMarker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_memory_dump(
|
||||
deterministic: typing.Optional[bool] = None,
|
||||
level_of_detail: typing.Optional[MemoryDumpLevelOfDetail] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, bool]]:
|
||||
'''
|
||||
Request a global memory dump.
|
||||
|
||||
:param deterministic: *(Optional)* Enables more deterministic results by forcing garbage collection
|
||||
:param level_of_detail: *(Optional)* Specifies level of details in memory dump. Defaults to "detailed".
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **dumpGuid** - GUID of the resulting global memory dump.
|
||||
1. **success** - True iff the global memory dump succeeded.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if deterministic is not None:
|
||||
params['deterministic'] = deterministic
|
||||
if level_of_detail is not None:
|
||||
params['levelOfDetail'] = level_of_detail.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.requestMemoryDump',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['dumpGuid']),
|
||||
bool(json['success'])
|
||||
)
|
||||
|
||||
|
||||
def start(
|
||||
categories: typing.Optional[str] = None,
|
||||
options: typing.Optional[str] = None,
|
||||
buffer_usage_reporting_interval: typing.Optional[float] = None,
|
||||
transfer_mode: typing.Optional[str] = None,
|
||||
stream_format: typing.Optional[StreamFormat] = None,
|
||||
stream_compression: typing.Optional[StreamCompression] = None,
|
||||
trace_config: typing.Optional[TraceConfig] = None,
|
||||
perfetto_config: typing.Optional[str] = None,
|
||||
tracing_backend: typing.Optional[TracingBackend] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Start trace events collection.
|
||||
|
||||
:param categories: *(Optional)* Category/tag filter
|
||||
:param options: *(Optional)* Tracing options
|
||||
:param buffer_usage_reporting_interval: *(Optional)* If set, the agent will issue bufferUsage events at this interval, specified in milliseconds
|
||||
:param transfer_mode: *(Optional)* Whether to report trace events as series of dataCollected events or to save trace to a stream (defaults to ```ReportEvents````).
|
||||
:param stream_format: *(Optional)* Trace data format to use. This only applies when using ````ReturnAsStream```` transfer mode (defaults to ````json````).
|
||||
:param stream_compression: *(Optional)* Compression format to use. This only applies when using ````ReturnAsStream```` transfer mode (defaults to ````none````)
|
||||
:param trace_config: *(Optional)*
|
||||
:param perfetto_config: *(Optional)* Base64-encoded serialized perfetto.protos.TraceConfig protobuf message When specified, the parameters ````categories````, ````options````, ````traceConfig```` are ignored.
|
||||
:param tracing_backend: *(Optional)* Backend type (defaults to ````auto```)
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if categories is not None:
|
||||
params['categories'] = categories
|
||||
if options is not None:
|
||||
params['options'] = options
|
||||
if buffer_usage_reporting_interval is not None:
|
||||
params['bufferUsageReportingInterval'] = buffer_usage_reporting_interval
|
||||
if transfer_mode is not None:
|
||||
params['transferMode'] = transfer_mode
|
||||
if stream_format is not None:
|
||||
params['streamFormat'] = stream_format.to_json()
|
||||
if stream_compression is not None:
|
||||
params['streamCompression'] = stream_compression.to_json()
|
||||
if trace_config is not None:
|
||||
params['traceConfig'] = trace_config.to_json()
|
||||
if perfetto_config is not None:
|
||||
params['perfettoConfig'] = perfetto_config
|
||||
if tracing_backend is not None:
|
||||
params['tracingBackend'] = tracing_backend.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.start',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Tracing.bufferUsage')
|
||||
@dataclass
|
||||
class BufferUsage:
|
||||
#: A number in range [0..1] that indicates the used size of event buffer as a fraction of its
|
||||
#: total size.
|
||||
percent_full: typing.Optional[float]
|
||||
#: An approximate number of events in the trace log.
|
||||
event_count: typing.Optional[float]
|
||||
#: A number in range [0..1] that indicates the used size of event buffer as a fraction of its
|
||||
#: total size.
|
||||
value: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> BufferUsage:
|
||||
return cls(
|
||||
percent_full=float(json['percentFull']) if 'percentFull' in json else None,
|
||||
event_count=float(json['eventCount']) if 'eventCount' in json else None,
|
||||
value=float(json['value']) if 'value' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Tracing.dataCollected')
|
||||
@dataclass
|
||||
class DataCollected:
|
||||
'''
|
||||
Contains an bucket of collected trace events. When tracing is stopped collected events will be
|
||||
send as a sequence of dataCollected events followed by tracingComplete event.
|
||||
'''
|
||||
value: typing.List[dict]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DataCollected:
|
||||
return cls(
|
||||
value=[dict(i) for i in json['value']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Tracing.tracingComplete')
|
||||
@dataclass
|
||||
class TracingComplete:
|
||||
'''
|
||||
Signals that tracing is stopped and there is no trace buffers pending flush, all data were
|
||||
delivered via dataCollected events.
|
||||
'''
|
||||
#: Indicates whether some trace data is known to have been lost, e.g. because the trace ring
|
||||
#: buffer wrapped around.
|
||||
data_loss_occurred: bool
|
||||
#: A handle of the stream that holds resulting trace data.
|
||||
stream: typing.Optional[io.StreamHandle]
|
||||
#: Trace data format of returned stream.
|
||||
trace_format: typing.Optional[StreamFormat]
|
||||
#: Compression format of returned stream.
|
||||
stream_compression: typing.Optional[StreamCompression]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TracingComplete:
|
||||
return cls(
|
||||
data_loss_occurred=bool(json['dataLossOccurred']),
|
||||
stream=io.StreamHandle.from_json(json['stream']) if 'stream' in json else None,
|
||||
trace_format=StreamFormat.from_json(json['traceFormat']) if 'traceFormat' in json else None,
|
||||
stream_compression=StreamCompression.from_json(json['streamCompression']) if 'streamCompression' in json else None
|
||||
)
|
||||
@@ -0,0 +1,19 @@
|
||||
|
||||
import typing
|
||||
|
||||
|
||||
T_JSON_DICT = typing.Dict[str, typing.Any]
|
||||
_event_parsers = dict()
|
||||
|
||||
|
||||
def event_class(method):
|
||||
''' A decorator that registers a class as an event class. '''
|
||||
def decorate(cls):
|
||||
_event_parsers[method] = cls
|
||||
return cls
|
||||
return decorate
|
||||
|
||||
|
||||
def parse_json_event(json: T_JSON_DICT) -> typing.Any:
|
||||
''' Parse a JSON dictionary into a CDP event. '''
|
||||
return _event_parsers[json['method']].from_json(json['params'])
|
||||
@@ -0,0 +1,603 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: WebAudio (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class GraphObjectId(str):
|
||||
'''
|
||||
An unique ID for a graph object (AudioContext, AudioNode, AudioParam) in Web Audio API
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> GraphObjectId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'GraphObjectId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ContextType(enum.Enum):
|
||||
'''
|
||||
Enum of BaseAudioContext types
|
||||
'''
|
||||
REALTIME = "realtime"
|
||||
OFFLINE = "offline"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ContextState(enum.Enum):
|
||||
'''
|
||||
Enum of AudioContextState from the spec
|
||||
'''
|
||||
SUSPENDED = "suspended"
|
||||
RUNNING = "running"
|
||||
CLOSED = "closed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class NodeType(str):
|
||||
'''
|
||||
Enum of AudioNode types
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> NodeType:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'NodeType({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ChannelCountMode(enum.Enum):
|
||||
'''
|
||||
Enum of AudioNode::ChannelCountMode from the spec
|
||||
'''
|
||||
CLAMPED_MAX = "clamped-max"
|
||||
EXPLICIT = "explicit"
|
||||
MAX_ = "max"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ChannelInterpretation(enum.Enum):
|
||||
'''
|
||||
Enum of AudioNode::ChannelInterpretation from the spec
|
||||
'''
|
||||
DISCRETE = "discrete"
|
||||
SPEAKERS = "speakers"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ParamType(str):
|
||||
'''
|
||||
Enum of AudioParam types
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> ParamType:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'ParamType({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AutomationRate(enum.Enum):
|
||||
'''
|
||||
Enum of AudioParam::AutomationRate from the spec
|
||||
'''
|
||||
A_RATE = "a-rate"
|
||||
K_RATE = "k-rate"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ContextRealtimeData:
|
||||
'''
|
||||
Fields in AudioContext that change in real-time.
|
||||
'''
|
||||
#: The current context time in second in BaseAudioContext.
|
||||
current_time: float
|
||||
|
||||
#: The time spent on rendering graph divided by render quantum duration,
|
||||
#: and multiplied by 100. 100 means the audio renderer reached the full
|
||||
#: capacity and glitch may occur.
|
||||
render_capacity: float
|
||||
|
||||
#: A running mean of callback interval.
|
||||
callback_interval_mean: float
|
||||
|
||||
#: A running variance of callback interval.
|
||||
callback_interval_variance: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['currentTime'] = self.current_time
|
||||
json['renderCapacity'] = self.render_capacity
|
||||
json['callbackIntervalMean'] = self.callback_interval_mean
|
||||
json['callbackIntervalVariance'] = self.callback_interval_variance
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
current_time=float(json['currentTime']),
|
||||
render_capacity=float(json['renderCapacity']),
|
||||
callback_interval_mean=float(json['callbackIntervalMean']),
|
||||
callback_interval_variance=float(json['callbackIntervalVariance']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseAudioContext:
|
||||
'''
|
||||
Protocol object for BaseAudioContext
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
|
||||
context_type: ContextType
|
||||
|
||||
context_state: ContextState
|
||||
|
||||
#: Platform-dependent callback buffer size.
|
||||
callback_buffer_size: float
|
||||
|
||||
#: Number of output channels supported by audio hardware in use.
|
||||
max_output_channel_count: float
|
||||
|
||||
#: Context sample rate.
|
||||
sample_rate: float
|
||||
|
||||
realtime_data: typing.Optional[ContextRealtimeData] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
json['contextType'] = self.context_type.to_json()
|
||||
json['contextState'] = self.context_state.to_json()
|
||||
json['callbackBufferSize'] = self.callback_buffer_size
|
||||
json['maxOutputChannelCount'] = self.max_output_channel_count
|
||||
json['sampleRate'] = self.sample_rate
|
||||
if self.realtime_data is not None:
|
||||
json['realtimeData'] = self.realtime_data.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
context_type=ContextType.from_json(json['contextType']),
|
||||
context_state=ContextState.from_json(json['contextState']),
|
||||
callback_buffer_size=float(json['callbackBufferSize']),
|
||||
max_output_channel_count=float(json['maxOutputChannelCount']),
|
||||
sample_rate=float(json['sampleRate']),
|
||||
realtime_data=ContextRealtimeData.from_json(json['realtimeData']) if 'realtimeData' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AudioListener:
|
||||
'''
|
||||
Protocol object for AudioListener
|
||||
'''
|
||||
listener_id: GraphObjectId
|
||||
|
||||
context_id: GraphObjectId
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['listenerId'] = self.listener_id.to_json()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
listener_id=GraphObjectId.from_json(json['listenerId']),
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AudioNode:
|
||||
'''
|
||||
Protocol object for AudioNode
|
||||
'''
|
||||
node_id: GraphObjectId
|
||||
|
||||
context_id: GraphObjectId
|
||||
|
||||
node_type: NodeType
|
||||
|
||||
number_of_inputs: float
|
||||
|
||||
number_of_outputs: float
|
||||
|
||||
channel_count: float
|
||||
|
||||
channel_count_mode: ChannelCountMode
|
||||
|
||||
channel_interpretation: ChannelInterpretation
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
json['nodeType'] = self.node_type.to_json()
|
||||
json['numberOfInputs'] = self.number_of_inputs
|
||||
json['numberOfOutputs'] = self.number_of_outputs
|
||||
json['channelCount'] = self.channel_count
|
||||
json['channelCountMode'] = self.channel_count_mode.to_json()
|
||||
json['channelInterpretation'] = self.channel_interpretation.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_id=GraphObjectId.from_json(json['nodeId']),
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
node_type=NodeType.from_json(json['nodeType']),
|
||||
number_of_inputs=float(json['numberOfInputs']),
|
||||
number_of_outputs=float(json['numberOfOutputs']),
|
||||
channel_count=float(json['channelCount']),
|
||||
channel_count_mode=ChannelCountMode.from_json(json['channelCountMode']),
|
||||
channel_interpretation=ChannelInterpretation.from_json(json['channelInterpretation']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AudioParam:
|
||||
'''
|
||||
Protocol object for AudioParam
|
||||
'''
|
||||
param_id: GraphObjectId
|
||||
|
||||
node_id: GraphObjectId
|
||||
|
||||
context_id: GraphObjectId
|
||||
|
||||
param_type: ParamType
|
||||
|
||||
rate: AutomationRate
|
||||
|
||||
default_value: float
|
||||
|
||||
min_value: float
|
||||
|
||||
max_value: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['paramId'] = self.param_id.to_json()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
json['paramType'] = self.param_type.to_json()
|
||||
json['rate'] = self.rate.to_json()
|
||||
json['defaultValue'] = self.default_value
|
||||
json['minValue'] = self.min_value
|
||||
json['maxValue'] = self.max_value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
param_id=GraphObjectId.from_json(json['paramId']),
|
||||
node_id=GraphObjectId.from_json(json['nodeId']),
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
param_type=ParamType.from_json(json['paramType']),
|
||||
rate=AutomationRate.from_json(json['rate']),
|
||||
default_value=float(json['defaultValue']),
|
||||
min_value=float(json['minValue']),
|
||||
max_value=float(json['maxValue']),
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the WebAudio domain and starts sending context lifetime events.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAudio.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the WebAudio domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAudio.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_realtime_data(
|
||||
context_id: GraphObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,ContextRealtimeData]:
|
||||
'''
|
||||
Fetch the realtime data from the registered contexts.
|
||||
|
||||
:param context_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['contextId'] = context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAudio.getRealtimeData',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return ContextRealtimeData.from_json(json['realtimeData'])
|
||||
|
||||
|
||||
@event_class('WebAudio.contextCreated')
|
||||
@dataclass
|
||||
class ContextCreated:
|
||||
'''
|
||||
Notifies that a new BaseAudioContext has been created.
|
||||
'''
|
||||
context: BaseAudioContext
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ContextCreated:
|
||||
return cls(
|
||||
context=BaseAudioContext.from_json(json['context'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.contextWillBeDestroyed')
|
||||
@dataclass
|
||||
class ContextWillBeDestroyed:
|
||||
'''
|
||||
Notifies that an existing BaseAudioContext will be destroyed.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ContextWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.contextChanged')
|
||||
@dataclass
|
||||
class ContextChanged:
|
||||
'''
|
||||
Notifies that existing BaseAudioContext has changed some properties (id stays the same)..
|
||||
'''
|
||||
context: BaseAudioContext
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ContextChanged:
|
||||
return cls(
|
||||
context=BaseAudioContext.from_json(json['context'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioListenerCreated')
|
||||
@dataclass
|
||||
class AudioListenerCreated:
|
||||
'''
|
||||
Notifies that the construction of an AudioListener has finished.
|
||||
'''
|
||||
listener: AudioListener
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioListenerCreated:
|
||||
return cls(
|
||||
listener=AudioListener.from_json(json['listener'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioListenerWillBeDestroyed')
|
||||
@dataclass
|
||||
class AudioListenerWillBeDestroyed:
|
||||
'''
|
||||
Notifies that a new AudioListener has been created.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
listener_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioListenerWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
listener_id=GraphObjectId.from_json(json['listenerId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioNodeCreated')
|
||||
@dataclass
|
||||
class AudioNodeCreated:
|
||||
'''
|
||||
Notifies that a new AudioNode has been created.
|
||||
'''
|
||||
node: AudioNode
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioNodeCreated:
|
||||
return cls(
|
||||
node=AudioNode.from_json(json['node'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioNodeWillBeDestroyed')
|
||||
@dataclass
|
||||
class AudioNodeWillBeDestroyed:
|
||||
'''
|
||||
Notifies that an existing AudioNode has been destroyed.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
node_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioNodeWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
node_id=GraphObjectId.from_json(json['nodeId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioParamCreated')
|
||||
@dataclass
|
||||
class AudioParamCreated:
|
||||
'''
|
||||
Notifies that a new AudioParam has been created.
|
||||
'''
|
||||
param: AudioParam
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioParamCreated:
|
||||
return cls(
|
||||
param=AudioParam.from_json(json['param'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioParamWillBeDestroyed')
|
||||
@dataclass
|
||||
class AudioParamWillBeDestroyed:
|
||||
'''
|
||||
Notifies that an existing AudioParam has been destroyed.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
node_id: GraphObjectId
|
||||
param_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioParamWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
node_id=GraphObjectId.from_json(json['nodeId']),
|
||||
param_id=GraphObjectId.from_json(json['paramId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodesConnected')
|
||||
@dataclass
|
||||
class NodesConnected:
|
||||
'''
|
||||
Notifies that two AudioNodes are connected.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
destination_input_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesConnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None,
|
||||
destination_input_index=float(json['destinationInputIndex']) if 'destinationInputIndex' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodesDisconnected')
|
||||
@dataclass
|
||||
class NodesDisconnected:
|
||||
'''
|
||||
Notifies that AudioNodes are disconnected. The destination can be null, and it means all the outgoing connections from the source are disconnected.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
destination_input_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesDisconnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None,
|
||||
destination_input_index=float(json['destinationInputIndex']) if 'destinationInputIndex' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodeParamConnected')
|
||||
@dataclass
|
||||
class NodeParamConnected:
|
||||
'''
|
||||
Notifies that an AudioNode is connected to an AudioParam.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodeParamConnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodeParamDisconnected')
|
||||
@dataclass
|
||||
class NodeParamDisconnected:
|
||||
'''
|
||||
Notifies that an AudioNode is disconnected to an AudioParam.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodeParamDisconnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None
|
||||
)
|
||||
@@ -0,0 +1,388 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: WebAuthn (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class AuthenticatorId(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> AuthenticatorId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'AuthenticatorId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AuthenticatorProtocol(enum.Enum):
|
||||
U2F = "u2f"
|
||||
CTAP2 = "ctap2"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class Ctap2Version(enum.Enum):
|
||||
CTAP2_0 = "ctap2_0"
|
||||
CTAP2_1 = "ctap2_1"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AuthenticatorTransport(enum.Enum):
|
||||
USB = "usb"
|
||||
NFC = "nfc"
|
||||
BLE = "ble"
|
||||
CABLE = "cable"
|
||||
INTERNAL = "internal"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VirtualAuthenticatorOptions:
|
||||
protocol: AuthenticatorProtocol
|
||||
|
||||
transport: AuthenticatorTransport
|
||||
|
||||
#: Defaults to ctap2_0. Ignored if ``protocol`` == u2f.
|
||||
ctap2_version: typing.Optional[Ctap2Version] = None
|
||||
|
||||
#: Defaults to false.
|
||||
has_resident_key: typing.Optional[bool] = None
|
||||
|
||||
#: Defaults to false.
|
||||
has_user_verification: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the largeBlob extension.
|
||||
#: https://w3c.github.io/webauthn#largeBlob
|
||||
#: Defaults to false.
|
||||
has_large_blob: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the credBlob extension.
|
||||
#: https://fidoalliance.org/specs/fido-v2.1-rd-20201208/fido-client-to-authenticator-protocol-v2.1-rd-20201208.html#sctn-credBlob-extension
|
||||
#: Defaults to false.
|
||||
has_cred_blob: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the minPinLength extension.
|
||||
#: https://fidoalliance.org/specs/fido-v2.1-ps-20210615/fido-client-to-authenticator-protocol-v2.1-ps-20210615.html#sctn-minpinlength-extension
|
||||
#: Defaults to false.
|
||||
has_min_pin_length: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, tests of user presence will succeed immediately.
|
||||
#: Otherwise, they will not be resolved. Defaults to true.
|
||||
automatic_presence_simulation: typing.Optional[bool] = None
|
||||
|
||||
#: Sets whether User Verification succeeds or fails for an authenticator.
|
||||
#: Defaults to false.
|
||||
is_user_verified: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['protocol'] = self.protocol.to_json()
|
||||
json['transport'] = self.transport.to_json()
|
||||
if self.ctap2_version is not None:
|
||||
json['ctap2Version'] = self.ctap2_version.to_json()
|
||||
if self.has_resident_key is not None:
|
||||
json['hasResidentKey'] = self.has_resident_key
|
||||
if self.has_user_verification is not None:
|
||||
json['hasUserVerification'] = self.has_user_verification
|
||||
if self.has_large_blob is not None:
|
||||
json['hasLargeBlob'] = self.has_large_blob
|
||||
if self.has_cred_blob is not None:
|
||||
json['hasCredBlob'] = self.has_cred_blob
|
||||
if self.has_min_pin_length is not None:
|
||||
json['hasMinPinLength'] = self.has_min_pin_length
|
||||
if self.automatic_presence_simulation is not None:
|
||||
json['automaticPresenceSimulation'] = self.automatic_presence_simulation
|
||||
if self.is_user_verified is not None:
|
||||
json['isUserVerified'] = self.is_user_verified
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
protocol=AuthenticatorProtocol.from_json(json['protocol']),
|
||||
transport=AuthenticatorTransport.from_json(json['transport']),
|
||||
ctap2_version=Ctap2Version.from_json(json['ctap2Version']) if 'ctap2Version' in json else None,
|
||||
has_resident_key=bool(json['hasResidentKey']) if 'hasResidentKey' in json else None,
|
||||
has_user_verification=bool(json['hasUserVerification']) if 'hasUserVerification' in json else None,
|
||||
has_large_blob=bool(json['hasLargeBlob']) if 'hasLargeBlob' in json else None,
|
||||
has_cred_blob=bool(json['hasCredBlob']) if 'hasCredBlob' in json else None,
|
||||
has_min_pin_length=bool(json['hasMinPinLength']) if 'hasMinPinLength' in json else None,
|
||||
automatic_presence_simulation=bool(json['automaticPresenceSimulation']) if 'automaticPresenceSimulation' in json else None,
|
||||
is_user_verified=bool(json['isUserVerified']) if 'isUserVerified' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Credential:
|
||||
credential_id: str
|
||||
|
||||
is_resident_credential: bool
|
||||
|
||||
#: The ECDSA P-256 private key in PKCS#8 format.
|
||||
private_key: str
|
||||
|
||||
#: Signature counter. This is incremented by one for each successful
|
||||
#: assertion.
|
||||
#: See https://w3c.github.io/webauthn/#signature-counter
|
||||
sign_count: int
|
||||
|
||||
#: Relying Party ID the credential is scoped to. Must be set when adding a
|
||||
#: credential.
|
||||
rp_id: typing.Optional[str] = None
|
||||
|
||||
#: An opaque byte sequence with a maximum size of 64 bytes mapping the
|
||||
#: credential to a specific user.
|
||||
user_handle: typing.Optional[str] = None
|
||||
|
||||
#: The large blob associated with the credential.
|
||||
#: See https://w3c.github.io/webauthn/#sctn-large-blob-extension
|
||||
large_blob: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['credentialId'] = self.credential_id
|
||||
json['isResidentCredential'] = self.is_resident_credential
|
||||
json['privateKey'] = self.private_key
|
||||
json['signCount'] = self.sign_count
|
||||
if self.rp_id is not None:
|
||||
json['rpId'] = self.rp_id
|
||||
if self.user_handle is not None:
|
||||
json['userHandle'] = self.user_handle
|
||||
if self.large_blob is not None:
|
||||
json['largeBlob'] = self.large_blob
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
credential_id=str(json['credentialId']),
|
||||
is_resident_credential=bool(json['isResidentCredential']),
|
||||
private_key=str(json['privateKey']),
|
||||
sign_count=int(json['signCount']),
|
||||
rp_id=str(json['rpId']) if 'rpId' in json else None,
|
||||
user_handle=str(json['userHandle']) if 'userHandle' in json else None,
|
||||
large_blob=str(json['largeBlob']) if 'largeBlob' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable the WebAuthn domain and start intercepting credential storage and
|
||||
retrieval with a virtual authenticator.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable the WebAuthn domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def add_virtual_authenticator(
|
||||
options: VirtualAuthenticatorOptions
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,AuthenticatorId]:
|
||||
'''
|
||||
Creates and adds a virtual authenticator.
|
||||
|
||||
:param options:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['options'] = options.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.addVirtualAuthenticator',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return AuthenticatorId.from_json(json['authenticatorId'])
|
||||
|
||||
|
||||
def remove_virtual_authenticator(
|
||||
authenticator_id: AuthenticatorId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes the given authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.removeVirtualAuthenticator',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def add_credential(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential: Credential
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Adds the credential to the specified authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credential'] = credential.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.addCredential',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_credential(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Credential]:
|
||||
'''
|
||||
Returns a single credential stored in the given virtual authenticator that
|
||||
matches the credential ID.
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credentialId'] = credential_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.getCredential',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Credential.from_json(json['credential'])
|
||||
|
||||
|
||||
def get_credentials(
|
||||
authenticator_id: AuthenticatorId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Credential]]:
|
||||
'''
|
||||
Returns all the credentials stored in the given virtual authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.getCredentials',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Credential.from_json(i) for i in json['credentials']]
|
||||
|
||||
|
||||
def remove_credential(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes a credential from the authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credentialId'] = credential_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.removeCredential',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_credentials(
|
||||
authenticator_id: AuthenticatorId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all the credentials from the specified device.
|
||||
|
||||
:param authenticator_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.clearCredentials',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_user_verified(
|
||||
authenticator_id: AuthenticatorId,
|
||||
is_user_verified: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets whether User Verification succeeds or fails for an authenticator.
|
||||
The default is true.
|
||||
|
||||
:param authenticator_id:
|
||||
:param is_user_verified:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['isUserVerified'] = is_user_verified
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.setUserVerified',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_automatic_presence_simulation(
|
||||
authenticator_id: AuthenticatorId,
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets whether tests of user presence will succeed immediately (if true) or fail to resolve (if false) for an authenticator.
|
||||
The default is true.
|
||||
|
||||
:param authenticator_id:
|
||||
:param enabled:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.setAutomaticPresenceSimulation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
@@ -0,0 +1,52 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
from . import accessibility
|
||||
from . import animation
|
||||
from . import audits
|
||||
from . import background_service
|
||||
from . import browser
|
||||
from . import css
|
||||
from . import cache_storage
|
||||
from . import cast
|
||||
from . import console
|
||||
from . import dom
|
||||
from . import dom_debugger
|
||||
from . import dom_snapshot
|
||||
from . import dom_storage
|
||||
from . import database
|
||||
from . import debugger
|
||||
from . import device_orientation
|
||||
from . import emulation
|
||||
from . import event_breakpoints
|
||||
from . import fetch
|
||||
from . import headless_experimental
|
||||
from . import heap_profiler
|
||||
from . import io
|
||||
from . import indexed_db
|
||||
from . import input_
|
||||
from . import inspector
|
||||
from . import layer_tree
|
||||
from . import log
|
||||
from . import media
|
||||
from . import memory
|
||||
from . import network
|
||||
from . import overlay
|
||||
from . import page
|
||||
from . import performance
|
||||
from . import performance_timeline
|
||||
from . import profiler
|
||||
from . import runtime
|
||||
from . import schema
|
||||
from . import security
|
||||
from . import service_worker
|
||||
from . import storage
|
||||
from . import system_info
|
||||
from . import target
|
||||
from . import tethering
|
||||
from . import tracing
|
||||
from . import web_audio
|
||||
from . import web_authn
|
||||
from . import util
|
||||
|
||||
@@ -0,0 +1,640 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Accessibility (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import page
|
||||
from . import runtime
|
||||
|
||||
|
||||
class AXNodeId(str):
|
||||
'''
|
||||
Unique accessibility node identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> AXNodeId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'AXNodeId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AXValueType(enum.Enum):
|
||||
'''
|
||||
Enum of possible property types.
|
||||
'''
|
||||
BOOLEAN = "boolean"
|
||||
TRISTATE = "tristate"
|
||||
BOOLEAN_OR_UNDEFINED = "booleanOrUndefined"
|
||||
IDREF = "idref"
|
||||
IDREF_LIST = "idrefList"
|
||||
INTEGER = "integer"
|
||||
NODE = "node"
|
||||
NODE_LIST = "nodeList"
|
||||
NUMBER = "number"
|
||||
STRING = "string"
|
||||
COMPUTED_STRING = "computedString"
|
||||
TOKEN = "token"
|
||||
TOKEN_LIST = "tokenList"
|
||||
DOM_RELATION = "domRelation"
|
||||
ROLE = "role"
|
||||
INTERNAL_ROLE = "internalRole"
|
||||
VALUE_UNDEFINED = "valueUndefined"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AXValueSourceType(enum.Enum):
|
||||
'''
|
||||
Enum of possible property sources.
|
||||
'''
|
||||
ATTRIBUTE = "attribute"
|
||||
IMPLICIT = "implicit"
|
||||
STYLE = "style"
|
||||
CONTENTS = "contents"
|
||||
PLACEHOLDER = "placeholder"
|
||||
RELATED_ELEMENT = "relatedElement"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AXValueNativeSourceType(enum.Enum):
|
||||
'''
|
||||
Enum of possible native property sources (as a subtype of a particular AXValueSourceType).
|
||||
'''
|
||||
DESCRIPTION = "description"
|
||||
FIGCAPTION = "figcaption"
|
||||
LABEL = "label"
|
||||
LABELFOR = "labelfor"
|
||||
LABELWRAPPED = "labelwrapped"
|
||||
LEGEND = "legend"
|
||||
RUBYANNOTATION = "rubyannotation"
|
||||
TABLECAPTION = "tablecaption"
|
||||
TITLE = "title"
|
||||
OTHER = "other"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXValueSource:
|
||||
'''
|
||||
A single source for a computed AX property.
|
||||
'''
|
||||
#: What type of source this is.
|
||||
type_: AXValueSourceType
|
||||
|
||||
#: The value of this property source.
|
||||
value: typing.Optional[AXValue] = None
|
||||
|
||||
#: The name of the relevant attribute, if any.
|
||||
attribute: typing.Optional[str] = None
|
||||
|
||||
#: The value of the relevant attribute, if any.
|
||||
attribute_value: typing.Optional[AXValue] = None
|
||||
|
||||
#: Whether this source is superseded by a higher priority source.
|
||||
superseded: typing.Optional[bool] = None
|
||||
|
||||
#: The native markup source for this value, e.g. a <label> element.
|
||||
native_source: typing.Optional[AXValueNativeSourceType] = None
|
||||
|
||||
#: The value, such as a node or node list, of the native source.
|
||||
native_source_value: typing.Optional[AXValue] = None
|
||||
|
||||
#: Whether the value for this property is invalid.
|
||||
invalid: typing.Optional[bool] = None
|
||||
|
||||
#: Reason for the value being invalid, if it is.
|
||||
invalid_reason: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value.to_json()
|
||||
if self.attribute is not None:
|
||||
json['attribute'] = self.attribute
|
||||
if self.attribute_value is not None:
|
||||
json['attributeValue'] = self.attribute_value.to_json()
|
||||
if self.superseded is not None:
|
||||
json['superseded'] = self.superseded
|
||||
if self.native_source is not None:
|
||||
json['nativeSource'] = self.native_source.to_json()
|
||||
if self.native_source_value is not None:
|
||||
json['nativeSourceValue'] = self.native_source_value.to_json()
|
||||
if self.invalid is not None:
|
||||
json['invalid'] = self.invalid
|
||||
if self.invalid_reason is not None:
|
||||
json['invalidReason'] = self.invalid_reason
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=AXValueSourceType.from_json(json['type']),
|
||||
value=AXValue.from_json(json['value']) if 'value' in json else None,
|
||||
attribute=str(json['attribute']) if 'attribute' in json else None,
|
||||
attribute_value=AXValue.from_json(json['attributeValue']) if 'attributeValue' in json else None,
|
||||
superseded=bool(json['superseded']) if 'superseded' in json else None,
|
||||
native_source=AXValueNativeSourceType.from_json(json['nativeSource']) if 'nativeSource' in json else None,
|
||||
native_source_value=AXValue.from_json(json['nativeSourceValue']) if 'nativeSourceValue' in json else None,
|
||||
invalid=bool(json['invalid']) if 'invalid' in json else None,
|
||||
invalid_reason=str(json['invalidReason']) if 'invalidReason' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXRelatedNode:
|
||||
#: The BackendNodeId of the related DOM node.
|
||||
backend_dom_node_id: dom.BackendNodeId
|
||||
|
||||
#: The IDRef value provided, if any.
|
||||
idref: typing.Optional[str] = None
|
||||
|
||||
#: The text alternative of this node in the current context.
|
||||
text: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
|
||||
if self.idref is not None:
|
||||
json['idref'] = self.idref
|
||||
if self.text is not None:
|
||||
json['text'] = self.text
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']),
|
||||
idref=str(json['idref']) if 'idref' in json else None,
|
||||
text=str(json['text']) if 'text' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXProperty:
|
||||
#: The name of this property.
|
||||
name: AXPropertyName
|
||||
|
||||
#: The value of this property.
|
||||
value: AXValue
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name.to_json()
|
||||
json['value'] = self.value.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=AXPropertyName.from_json(json['name']),
|
||||
value=AXValue.from_json(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXValue:
|
||||
'''
|
||||
A single computed AX property.
|
||||
'''
|
||||
#: The type of this value.
|
||||
type_: AXValueType
|
||||
|
||||
#: The computed value of this property.
|
||||
value: typing.Optional[typing.Any] = None
|
||||
|
||||
#: One or more related nodes, if applicable.
|
||||
related_nodes: typing.Optional[typing.List[AXRelatedNode]] = None
|
||||
|
||||
#: The sources which contributed to the computation of this property.
|
||||
sources: typing.Optional[typing.List[AXValueSource]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value
|
||||
if self.related_nodes is not None:
|
||||
json['relatedNodes'] = [i.to_json() for i in self.related_nodes]
|
||||
if self.sources is not None:
|
||||
json['sources'] = [i.to_json() for i in self.sources]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=AXValueType.from_json(json['type']),
|
||||
value=json['value'] if 'value' in json else None,
|
||||
related_nodes=[AXRelatedNode.from_json(i) for i in json['relatedNodes']] if 'relatedNodes' in json else None,
|
||||
sources=[AXValueSource.from_json(i) for i in json['sources']] if 'sources' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class AXPropertyName(enum.Enum):
|
||||
'''
|
||||
Values of AXProperty name:
|
||||
- from 'busy' to 'roledescription': states which apply to every AX node
|
||||
- from 'live' to 'root': attributes which apply to nodes in live regions
|
||||
- from 'autocomplete' to 'valuetext': attributes which apply to widgets
|
||||
- from 'checked' to 'selected': states which apply to widgets
|
||||
- from 'activedescendant' to 'owns' - relationships between elements other than parent/child/sibling.
|
||||
'''
|
||||
BUSY = "busy"
|
||||
DISABLED = "disabled"
|
||||
EDITABLE = "editable"
|
||||
FOCUSABLE = "focusable"
|
||||
FOCUSED = "focused"
|
||||
HIDDEN = "hidden"
|
||||
HIDDEN_ROOT = "hiddenRoot"
|
||||
INVALID = "invalid"
|
||||
KEYSHORTCUTS = "keyshortcuts"
|
||||
SETTABLE = "settable"
|
||||
ROLEDESCRIPTION = "roledescription"
|
||||
LIVE = "live"
|
||||
ATOMIC = "atomic"
|
||||
RELEVANT = "relevant"
|
||||
ROOT = "root"
|
||||
AUTOCOMPLETE = "autocomplete"
|
||||
HAS_POPUP = "hasPopup"
|
||||
LEVEL = "level"
|
||||
MULTISELECTABLE = "multiselectable"
|
||||
ORIENTATION = "orientation"
|
||||
MULTILINE = "multiline"
|
||||
READONLY = "readonly"
|
||||
REQUIRED = "required"
|
||||
VALUEMIN = "valuemin"
|
||||
VALUEMAX = "valuemax"
|
||||
VALUETEXT = "valuetext"
|
||||
CHECKED = "checked"
|
||||
EXPANDED = "expanded"
|
||||
MODAL = "modal"
|
||||
PRESSED = "pressed"
|
||||
SELECTED = "selected"
|
||||
ACTIVEDESCENDANT = "activedescendant"
|
||||
CONTROLS = "controls"
|
||||
DESCRIBEDBY = "describedby"
|
||||
DETAILS = "details"
|
||||
ERRORMESSAGE = "errormessage"
|
||||
FLOWTO = "flowto"
|
||||
LABELLEDBY = "labelledby"
|
||||
OWNS = "owns"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXNode:
|
||||
'''
|
||||
A node in the accessibility tree.
|
||||
'''
|
||||
#: Unique identifier for this node.
|
||||
node_id: AXNodeId
|
||||
|
||||
#: Whether this node is ignored for accessibility
|
||||
ignored: bool
|
||||
|
||||
#: Collection of reasons why this node is hidden.
|
||||
ignored_reasons: typing.Optional[typing.List[AXProperty]] = None
|
||||
|
||||
#: This ``Node``'s role, whether explicit or implicit.
|
||||
role: typing.Optional[AXValue] = None
|
||||
|
||||
#: The accessible name for this ``Node``.
|
||||
name: typing.Optional[AXValue] = None
|
||||
|
||||
#: The accessible description for this ``Node``.
|
||||
description: typing.Optional[AXValue] = None
|
||||
|
||||
#: The value for this ``Node``.
|
||||
value: typing.Optional[AXValue] = None
|
||||
|
||||
#: All other properties
|
||||
properties: typing.Optional[typing.List[AXProperty]] = None
|
||||
|
||||
#: ID for this node's parent.
|
||||
parent_id: typing.Optional[AXNodeId] = None
|
||||
|
||||
#: IDs for each of this node's child nodes.
|
||||
child_ids: typing.Optional[typing.List[AXNodeId]] = None
|
||||
|
||||
#: The backend ID for the associated DOM node, if any.
|
||||
backend_dom_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: The frame ID for the frame associated with this nodes document.
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['ignored'] = self.ignored
|
||||
if self.ignored_reasons is not None:
|
||||
json['ignoredReasons'] = [i.to_json() for i in self.ignored_reasons]
|
||||
if self.role is not None:
|
||||
json['role'] = self.role.to_json()
|
||||
if self.name is not None:
|
||||
json['name'] = self.name.to_json()
|
||||
if self.description is not None:
|
||||
json['description'] = self.description.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value.to_json()
|
||||
if self.properties is not None:
|
||||
json['properties'] = [i.to_json() for i in self.properties]
|
||||
if self.parent_id is not None:
|
||||
json['parentId'] = self.parent_id.to_json()
|
||||
if self.child_ids is not None:
|
||||
json['childIds'] = [i.to_json() for i in self.child_ids]
|
||||
if self.backend_dom_node_id is not None:
|
||||
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
|
||||
if self.frame_id is not None:
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_id=AXNodeId.from_json(json['nodeId']),
|
||||
ignored=bool(json['ignored']),
|
||||
ignored_reasons=[AXProperty.from_json(i) for i in json['ignoredReasons']] if 'ignoredReasons' in json else None,
|
||||
role=AXValue.from_json(json['role']) if 'role' in json else None,
|
||||
name=AXValue.from_json(json['name']) if 'name' in json else None,
|
||||
description=AXValue.from_json(json['description']) if 'description' in json else None,
|
||||
value=AXValue.from_json(json['value']) if 'value' in json else None,
|
||||
properties=[AXProperty.from_json(i) for i in json['properties']] if 'properties' in json else None,
|
||||
parent_id=AXNodeId.from_json(json['parentId']) if 'parentId' in json else None,
|
||||
child_ids=[AXNodeId.from_json(i) for i in json['childIds']] if 'childIds' in json else None,
|
||||
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']) if 'backendDOMNodeId' in json else None,
|
||||
frame_id=page.FrameId.from_json(json['frameId']) if 'frameId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the accessibility domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the accessibility domain which causes ``AXNodeId``'s to remain consistent between method calls.
|
||||
This turns on accessibility for the page, which can impact performance until accessibility is disabled.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_partial_ax_tree(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None,
|
||||
fetch_relatives: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches the accessibility node and partial accessibility tree for this DOM node, if it exists.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node to get the partial accessibility tree for.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node to get the partial accessibility tree for.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get the partial accessibility tree for.
|
||||
:param fetch_relatives: *(Optional)* Whether to fetch this nodes ancestors, siblings and children. Defaults to true.
|
||||
:returns: The ``Accessibility.AXNode`` for this DOM node, if it exists, plus its ancestors, siblings and children, if requested.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
if fetch_relatives is not None:
|
||||
params['fetchRelatives'] = fetch_relatives
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getPartialAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_full_ax_tree(
|
||||
depth: typing.Optional[int] = None,
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches the entire accessibility tree for the root Document
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param depth: *(Optional)* The maximum depth at which descendants of the root node should be retrieved. If omitted, the full tree is returned.
|
||||
:param frame_id: *(Optional)* The frame for whose document the AX tree should be retrieved. If omited, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if depth is not None:
|
||||
params['depth'] = depth
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getFullAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_root_ax_node(
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,AXNode]:
|
||||
'''
|
||||
Fetches the root node.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getRootAXNode',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return AXNode.from_json(json['node'])
|
||||
|
||||
|
||||
def get_ax_node_and_ancestors(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches a node and all ancestors up to and including the root.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node to get.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node to get.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getAXNodeAndAncestors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_child_ax_nodes(
|
||||
id_: AXNodeId,
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches a particular accessibility node by AXNodeId.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param id_:
|
||||
:param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_.to_json()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getChildAXNodes',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def query_ax_tree(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None,
|
||||
accessible_name: typing.Optional[str] = None,
|
||||
role: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Query a DOM node's accessibility subtree for accessible name and role.
|
||||
This command computes the name and role for all nodes in the subtree, including those that are
|
||||
ignored for accessibility, and returns those that mactch the specified name and role. If no DOM
|
||||
node is specified, or the DOM node does not exist, the command returns an error. If neither
|
||||
``accessibleName`` or ``role`` is specified, it returns all the accessibility nodes in the subtree.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node for the root to query.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node for the root to query.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper for the root to query.
|
||||
:param accessible_name: *(Optional)* Find nodes with this computed name.
|
||||
:param role: *(Optional)* Find nodes with this computed role.
|
||||
:returns: A list of ``Accessibility.AXNode`` matching the specified attributes, including nodes that are ignored for accessibility.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
if accessible_name is not None:
|
||||
params['accessibleName'] = accessible_name
|
||||
if role is not None:
|
||||
params['role'] = role
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.queryAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
@event_class('Accessibility.loadComplete')
|
||||
@dataclass
|
||||
class LoadComplete:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The loadComplete event mirrors the load complete event sent by the browser to assistive
|
||||
technology when the web page has finished loading.
|
||||
'''
|
||||
#: New document root node.
|
||||
root: AXNode
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LoadComplete:
|
||||
return cls(
|
||||
root=AXNode.from_json(json['root'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Accessibility.nodesUpdated')
|
||||
@dataclass
|
||||
class NodesUpdated:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The nodesUpdated event is sent every time a previously requested node has changed the in tree.
|
||||
'''
|
||||
#: Updated node data.
|
||||
nodes: typing.List[AXNode]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesUpdated:
|
||||
return cls(
|
||||
nodes=[AXNode.from_json(i) for i in json['nodes']]
|
||||
)
|
||||
@@ -0,0 +1,415 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Animation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class Animation:
|
||||
'''
|
||||
Animation instance.
|
||||
'''
|
||||
#: ``Animation``'s id.
|
||||
id_: str
|
||||
|
||||
#: ``Animation``'s name.
|
||||
name: str
|
||||
|
||||
#: ``Animation``'s internal paused state.
|
||||
paused_state: bool
|
||||
|
||||
#: ``Animation``'s play state.
|
||||
play_state: str
|
||||
|
||||
#: ``Animation``'s playback rate.
|
||||
playback_rate: float
|
||||
|
||||
#: ``Animation``'s start time.
|
||||
start_time: float
|
||||
|
||||
#: ``Animation``'s current time.
|
||||
current_time: float
|
||||
|
||||
#: Animation type of ``Animation``.
|
||||
type_: str
|
||||
|
||||
#: ``Animation``'s source animation node.
|
||||
source: typing.Optional[AnimationEffect] = None
|
||||
|
||||
#: A unique ID for ``Animation`` representing the sources that triggered this CSS
|
||||
#: animation/transition.
|
||||
css_id: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_
|
||||
json['name'] = self.name
|
||||
json['pausedState'] = self.paused_state
|
||||
json['playState'] = self.play_state
|
||||
json['playbackRate'] = self.playback_rate
|
||||
json['startTime'] = self.start_time
|
||||
json['currentTime'] = self.current_time
|
||||
json['type'] = self.type_
|
||||
if self.source is not None:
|
||||
json['source'] = self.source.to_json()
|
||||
if self.css_id is not None:
|
||||
json['cssId'] = self.css_id
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
name=str(json['name']),
|
||||
paused_state=bool(json['pausedState']),
|
||||
play_state=str(json['playState']),
|
||||
playback_rate=float(json['playbackRate']),
|
||||
start_time=float(json['startTime']),
|
||||
current_time=float(json['currentTime']),
|
||||
type_=str(json['type']),
|
||||
source=AnimationEffect.from_json(json['source']) if 'source' in json else None,
|
||||
css_id=str(json['cssId']) if 'cssId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnimationEffect:
|
||||
'''
|
||||
AnimationEffect instance
|
||||
'''
|
||||
#: ``AnimationEffect``'s delay.
|
||||
delay: float
|
||||
|
||||
#: ``AnimationEffect``'s end delay.
|
||||
end_delay: float
|
||||
|
||||
#: ``AnimationEffect``'s iteration start.
|
||||
iteration_start: float
|
||||
|
||||
#: ``AnimationEffect``'s iterations.
|
||||
iterations: float
|
||||
|
||||
#: ``AnimationEffect``'s iteration duration.
|
||||
duration: float
|
||||
|
||||
#: ``AnimationEffect``'s playback direction.
|
||||
direction: str
|
||||
|
||||
#: ``AnimationEffect``'s fill mode.
|
||||
fill: str
|
||||
|
||||
#: ``AnimationEffect``'s timing function.
|
||||
easing: str
|
||||
|
||||
#: ``AnimationEffect``'s target node.
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: ``AnimationEffect``'s keyframes.
|
||||
keyframes_rule: typing.Optional[KeyframesRule] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['delay'] = self.delay
|
||||
json['endDelay'] = self.end_delay
|
||||
json['iterationStart'] = self.iteration_start
|
||||
json['iterations'] = self.iterations
|
||||
json['duration'] = self.duration
|
||||
json['direction'] = self.direction
|
||||
json['fill'] = self.fill
|
||||
json['easing'] = self.easing
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.keyframes_rule is not None:
|
||||
json['keyframesRule'] = self.keyframes_rule.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
delay=float(json['delay']),
|
||||
end_delay=float(json['endDelay']),
|
||||
iteration_start=float(json['iterationStart']),
|
||||
iterations=float(json['iterations']),
|
||||
duration=float(json['duration']),
|
||||
direction=str(json['direction']),
|
||||
fill=str(json['fill']),
|
||||
easing=str(json['easing']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
keyframes_rule=KeyframesRule.from_json(json['keyframesRule']) if 'keyframesRule' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyframesRule:
|
||||
'''
|
||||
Keyframes Rule
|
||||
'''
|
||||
#: List of animation keyframes.
|
||||
keyframes: typing.List[KeyframeStyle]
|
||||
|
||||
#: CSS keyframed animation's name.
|
||||
name: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['keyframes'] = [i.to_json() for i in self.keyframes]
|
||||
if self.name is not None:
|
||||
json['name'] = self.name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
keyframes=[KeyframeStyle.from_json(i) for i in json['keyframes']],
|
||||
name=str(json['name']) if 'name' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyframeStyle:
|
||||
'''
|
||||
Keyframe Style
|
||||
'''
|
||||
#: Keyframe's time offset.
|
||||
offset: str
|
||||
|
||||
#: ``AnimationEffect``'s timing function.
|
||||
easing: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['offset'] = self.offset
|
||||
json['easing'] = self.easing
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
offset=str(json['offset']),
|
||||
easing=str(json['easing']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables animation domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables animation domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_current_time(
|
||||
id_: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Returns the current time of the an animation.
|
||||
|
||||
:param id_: Id of animation.
|
||||
:returns: Current time of the page.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.getCurrentTime',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['currentTime'])
|
||||
|
||||
|
||||
def get_playback_rate() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Gets the playback rate of the document timeline.
|
||||
|
||||
:returns: Playback rate for animations on page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.getPlaybackRate',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['playbackRate'])
|
||||
|
||||
|
||||
def release_animations(
|
||||
animations: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Releases a set of animations to no longer be manipulated.
|
||||
|
||||
:param animations: List of animation ids to seek.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.releaseAnimations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def resolve_animation(
|
||||
animation_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,runtime.RemoteObject]:
|
||||
'''
|
||||
Gets the remote object of the Animation.
|
||||
|
||||
:param animation_id: Animation id.
|
||||
:returns: Corresponding remote object.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animationId'] = animation_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.resolveAnimation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return runtime.RemoteObject.from_json(json['remoteObject'])
|
||||
|
||||
|
||||
def seek_animations(
|
||||
animations: typing.List[str],
|
||||
current_time: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Seek a set of animations to a particular time within each animation.
|
||||
|
||||
:param animations: List of animation ids to seek.
|
||||
:param current_time: Set the current time of each animation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
params['currentTime'] = current_time
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.seekAnimations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_paused(
|
||||
animations: typing.List[str],
|
||||
paused: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the paused state of a set of animations.
|
||||
|
||||
:param animations: Animations to set the pause state of.
|
||||
:param paused: Paused state to set to.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
params['paused'] = paused
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setPaused',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_playback_rate(
|
||||
playback_rate: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the playback rate of the document timeline.
|
||||
|
||||
:param playback_rate: Playback rate for animations on page
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['playbackRate'] = playback_rate
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setPlaybackRate',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_timing(
|
||||
animation_id: str,
|
||||
duration: float,
|
||||
delay: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the timing of an animation node.
|
||||
|
||||
:param animation_id: Animation id.
|
||||
:param duration: Duration of the animation.
|
||||
:param delay: Delay of the animation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animationId'] = animation_id
|
||||
params['duration'] = duration
|
||||
params['delay'] = delay
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setTiming',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Animation.animationCanceled')
|
||||
@dataclass
|
||||
class AnimationCanceled:
|
||||
'''
|
||||
Event for when an animation has been cancelled.
|
||||
'''
|
||||
#: Id of the animation that was cancelled.
|
||||
id_: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationCanceled:
|
||||
return cls(
|
||||
id_=str(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationCreated')
|
||||
@dataclass
|
||||
class AnimationCreated:
|
||||
'''
|
||||
Event for each animation that has been created.
|
||||
'''
|
||||
#: Id of the animation that was created.
|
||||
id_: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationCreated:
|
||||
return cls(
|
||||
id_=str(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationStarted')
|
||||
@dataclass
|
||||
class AnimationStarted:
|
||||
'''
|
||||
Event for animation that has been started.
|
||||
'''
|
||||
#: Animation that was started.
|
||||
animation: Animation
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationStarted:
|
||||
return cls(
|
||||
animation=Animation.from_json(json['animation'])
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,208 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: BackgroundService (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
from . import service_worker
|
||||
|
||||
|
||||
class ServiceName(enum.Enum):
|
||||
'''
|
||||
The Background Service that will be associated with the commands/events.
|
||||
Every Background Service operates independently, but they share the same
|
||||
API.
|
||||
'''
|
||||
BACKGROUND_FETCH = "backgroundFetch"
|
||||
BACKGROUND_SYNC = "backgroundSync"
|
||||
PUSH_MESSAGING = "pushMessaging"
|
||||
NOTIFICATIONS = "notifications"
|
||||
PAYMENT_HANDLER = "paymentHandler"
|
||||
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventMetadata:
|
||||
'''
|
||||
A key-value pair for additional event information to pass along.
|
||||
'''
|
||||
key: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=str(json['key']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BackgroundServiceEvent:
|
||||
#: Timestamp of the event (in seconds).
|
||||
timestamp: network.TimeSinceEpoch
|
||||
|
||||
#: The origin this event belongs to.
|
||||
origin: str
|
||||
|
||||
#: The Service Worker ID that initiated the event.
|
||||
service_worker_registration_id: service_worker.RegistrationID
|
||||
|
||||
#: The Background Service this event belongs to.
|
||||
service: ServiceName
|
||||
|
||||
#: A description of the event.
|
||||
event_name: str
|
||||
|
||||
#: An identifier that groups related events together.
|
||||
instance_id: str
|
||||
|
||||
#: A list of event-specific information.
|
||||
event_metadata: typing.List[EventMetadata]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
json['origin'] = self.origin
|
||||
json['serviceWorkerRegistrationId'] = self.service_worker_registration_id.to_json()
|
||||
json['service'] = self.service.to_json()
|
||||
json['eventName'] = self.event_name
|
||||
json['instanceId'] = self.instance_id
|
||||
json['eventMetadata'] = [i.to_json() for i in self.event_metadata]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
timestamp=network.TimeSinceEpoch.from_json(json['timestamp']),
|
||||
origin=str(json['origin']),
|
||||
service_worker_registration_id=service_worker.RegistrationID.from_json(json['serviceWorkerRegistrationId']),
|
||||
service=ServiceName.from_json(json['service']),
|
||||
event_name=str(json['eventName']),
|
||||
instance_id=str(json['instanceId']),
|
||||
event_metadata=[EventMetadata.from_json(i) for i in json['eventMetadata']],
|
||||
)
|
||||
|
||||
|
||||
def start_observing(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables event updates for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.startObserving',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_observing(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables event updates for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.stopObserving',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_recording(
|
||||
should_record: bool,
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set the recording state for the service.
|
||||
|
||||
:param should_record:
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['shouldRecord'] = should_record
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.setRecording',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_events(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all stored data for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.clearEvents',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('BackgroundService.recordingStateChanged')
|
||||
@dataclass
|
||||
class RecordingStateChanged:
|
||||
'''
|
||||
Called when the recording state for the service has been updated.
|
||||
'''
|
||||
is_recording: bool
|
||||
service: ServiceName
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RecordingStateChanged:
|
||||
return cls(
|
||||
is_recording=bool(json['isRecording']),
|
||||
service=ServiceName.from_json(json['service'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('BackgroundService.backgroundServiceEventReceived')
|
||||
@dataclass
|
||||
class BackgroundServiceEventReceived:
|
||||
'''
|
||||
Called with all existing backgroundServiceEvents when enabled, and all new
|
||||
events afterwards if enabled and recording.
|
||||
'''
|
||||
background_service_event: BackgroundServiceEvent
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEventReceived:
|
||||
return cls(
|
||||
background_service_event=BackgroundServiceEvent.from_json(json['backgroundServiceEvent'])
|
||||
)
|
||||
@@ -0,0 +1,697 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Browser
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import page
|
||||
from . import target
|
||||
|
||||
|
||||
class BrowserContextID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> BrowserContextID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'BrowserContextID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class WindowID(int):
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> WindowID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'WindowID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class WindowState(enum.Enum):
|
||||
'''
|
||||
The state of the browser window.
|
||||
'''
|
||||
NORMAL = "normal"
|
||||
MINIMIZED = "minimized"
|
||||
MAXIMIZED = "maximized"
|
||||
FULLSCREEN = "fullscreen"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Bounds:
|
||||
'''
|
||||
Browser window bounds information
|
||||
'''
|
||||
#: The offset from the left edge of the screen to the window in pixels.
|
||||
left: typing.Optional[int] = None
|
||||
|
||||
#: The offset from the top edge of the screen to the window in pixels.
|
||||
top: typing.Optional[int] = None
|
||||
|
||||
#: The window width in pixels.
|
||||
width: typing.Optional[int] = None
|
||||
|
||||
#: The window height in pixels.
|
||||
height: typing.Optional[int] = None
|
||||
|
||||
#: The window state. Default to normal.
|
||||
window_state: typing.Optional[WindowState] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.left is not None:
|
||||
json['left'] = self.left
|
||||
if self.top is not None:
|
||||
json['top'] = self.top
|
||||
if self.width is not None:
|
||||
json['width'] = self.width
|
||||
if self.height is not None:
|
||||
json['height'] = self.height
|
||||
if self.window_state is not None:
|
||||
json['windowState'] = self.window_state.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
left=int(json['left']) if 'left' in json else None,
|
||||
top=int(json['top']) if 'top' in json else None,
|
||||
width=int(json['width']) if 'width' in json else None,
|
||||
height=int(json['height']) if 'height' in json else None,
|
||||
window_state=WindowState.from_json(json['windowState']) if 'windowState' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class PermissionType(enum.Enum):
|
||||
ACCESSIBILITY_EVENTS = "accessibilityEvents"
|
||||
AUDIO_CAPTURE = "audioCapture"
|
||||
BACKGROUND_SYNC = "backgroundSync"
|
||||
BACKGROUND_FETCH = "backgroundFetch"
|
||||
CLIPBOARD_READ_WRITE = "clipboardReadWrite"
|
||||
CLIPBOARD_SANITIZED_WRITE = "clipboardSanitizedWrite"
|
||||
DISPLAY_CAPTURE = "displayCapture"
|
||||
DURABLE_STORAGE = "durableStorage"
|
||||
FLASH = "flash"
|
||||
GEOLOCATION = "geolocation"
|
||||
MIDI = "midi"
|
||||
MIDI_SYSEX = "midiSysex"
|
||||
NFC = "nfc"
|
||||
NOTIFICATIONS = "notifications"
|
||||
PAYMENT_HANDLER = "paymentHandler"
|
||||
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
|
||||
PROTECTED_MEDIA_IDENTIFIER = "protectedMediaIdentifier"
|
||||
SENSORS = "sensors"
|
||||
VIDEO_CAPTURE = "videoCapture"
|
||||
VIDEO_CAPTURE_PAN_TILT_ZOOM = "videoCapturePanTiltZoom"
|
||||
IDLE_DETECTION = "idleDetection"
|
||||
WAKE_LOCK_SCREEN = "wakeLockScreen"
|
||||
WAKE_LOCK_SYSTEM = "wakeLockSystem"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class PermissionSetting(enum.Enum):
|
||||
GRANTED = "granted"
|
||||
DENIED = "denied"
|
||||
PROMPT = "prompt"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PermissionDescriptor:
|
||||
'''
|
||||
Definition of PermissionDescriptor defined in the Permissions API:
|
||||
https://w3c.github.io/permissions/#dictdef-permissiondescriptor.
|
||||
'''
|
||||
#: Name of permission.
|
||||
#: See https://cs.chromium.org/chromium/src/third_party/blink/renderer/modules/permissions/permission_descriptor.idl for valid permission names.
|
||||
name: str
|
||||
|
||||
#: For "midi" permission, may also specify sysex control.
|
||||
sysex: typing.Optional[bool] = None
|
||||
|
||||
#: For "push" permission, may specify userVisibleOnly.
|
||||
#: Note that userVisibleOnly = true is the only currently supported type.
|
||||
user_visible_only: typing.Optional[bool] = None
|
||||
|
||||
#: For "clipboard" permission, may specify allowWithoutSanitization.
|
||||
allow_without_sanitization: typing.Optional[bool] = None
|
||||
|
||||
#: For "camera" permission, may specify panTiltZoom.
|
||||
pan_tilt_zoom: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
if self.sysex is not None:
|
||||
json['sysex'] = self.sysex
|
||||
if self.user_visible_only is not None:
|
||||
json['userVisibleOnly'] = self.user_visible_only
|
||||
if self.allow_without_sanitization is not None:
|
||||
json['allowWithoutSanitization'] = self.allow_without_sanitization
|
||||
if self.pan_tilt_zoom is not None:
|
||||
json['panTiltZoom'] = self.pan_tilt_zoom
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
sysex=bool(json['sysex']) if 'sysex' in json else None,
|
||||
user_visible_only=bool(json['userVisibleOnly']) if 'userVisibleOnly' in json else None,
|
||||
allow_without_sanitization=bool(json['allowWithoutSanitization']) if 'allowWithoutSanitization' in json else None,
|
||||
pan_tilt_zoom=bool(json['panTiltZoom']) if 'panTiltZoom' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class BrowserCommandId(enum.Enum):
|
||||
'''
|
||||
Browser command ids used by executeBrowserCommand.
|
||||
'''
|
||||
OPEN_TAB_SEARCH = "openTabSearch"
|
||||
CLOSE_TAB_SEARCH = "closeTabSearch"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Bucket:
|
||||
'''
|
||||
Chrome histogram bucket.
|
||||
'''
|
||||
#: Minimum value (inclusive).
|
||||
low: int
|
||||
|
||||
#: Maximum value (exclusive).
|
||||
high: int
|
||||
|
||||
#: Number of samples.
|
||||
count: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['low'] = self.low
|
||||
json['high'] = self.high
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
low=int(json['low']),
|
||||
high=int(json['high']),
|
||||
count=int(json['count']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Histogram:
|
||||
'''
|
||||
Chrome histogram.
|
||||
'''
|
||||
#: Name.
|
||||
name: str
|
||||
|
||||
#: Sum of sample values.
|
||||
sum_: int
|
||||
|
||||
#: Total number of samples.
|
||||
count: int
|
||||
|
||||
#: Buckets.
|
||||
buckets: typing.List[Bucket]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['sum'] = self.sum_
|
||||
json['count'] = self.count
|
||||
json['buckets'] = [i.to_json() for i in self.buckets]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
sum_=int(json['sum']),
|
||||
count=int(json['count']),
|
||||
buckets=[Bucket.from_json(i) for i in json['buckets']],
|
||||
)
|
||||
|
||||
|
||||
def set_permission(
|
||||
permission: PermissionDescriptor,
|
||||
setting: PermissionSetting,
|
||||
origin: typing.Optional[str] = None,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set permission settings for given origin.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param permission: Descriptor of permission to override.
|
||||
:param setting: Setting of the permission.
|
||||
:param origin: *(Optional)* Origin the permission applies to, all origins if not specified.
|
||||
:param browser_context_id: *(Optional)* Context to override. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['permission'] = permission.to_json()
|
||||
params['setting'] = setting.to_json()
|
||||
if origin is not None:
|
||||
params['origin'] = origin
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setPermission',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def grant_permissions(
|
||||
permissions: typing.List[PermissionType],
|
||||
origin: typing.Optional[str] = None,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Grant specific permissions to the given origin and reject all others.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param permissions:
|
||||
:param origin: *(Optional)* Origin the permission applies to, all origins if not specified.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to override permissions. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['permissions'] = [i.to_json() for i in permissions]
|
||||
if origin is not None:
|
||||
params['origin'] = origin
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.grantPermissions',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def reset_permissions(
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Reset all permission management for all origins.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param browser_context_id: *(Optional)* BrowserContext to reset permissions. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.resetPermissions',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_download_behavior(
|
||||
behavior: str,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None,
|
||||
download_path: typing.Optional[str] = None,
|
||||
events_enabled: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set the behavior when downloading a file.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param behavior: Whether to allow all or deny all download requests, or use default Chrome behavior if available (otherwise deny). ``allowAndName`` allows download and names files according to their dowmload guids.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to set download behavior. When omitted, default browser context is used.
|
||||
:param download_path: *(Optional)* The default path to save downloaded files to. This is required if behavior is set to 'allow' or 'allowAndName'.
|
||||
:param events_enabled: *(Optional)* Whether to emit download events (defaults to false).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['behavior'] = behavior
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
if download_path is not None:
|
||||
params['downloadPath'] = download_path
|
||||
if events_enabled is not None:
|
||||
params['eventsEnabled'] = events_enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setDownloadBehavior',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def cancel_download(
|
||||
guid: str,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Cancel a download if in progress
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param guid: Global unique identifier of the download.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to perform the action in. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['guid'] = guid
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.cancelDownload',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def close() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Close browser gracefully.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.close',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def crash() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Crashes browser on the main thread.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.crash',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def crash_gpu_process() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Crashes GPU process.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.crashGpuProcess',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_version() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, str, str, str, str]]:
|
||||
'''
|
||||
Returns version information.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **protocolVersion** - Protocol version.
|
||||
1. **product** - Product name.
|
||||
2. **revision** - Product revision.
|
||||
3. **userAgent** - User-Agent.
|
||||
4. **jsVersion** - V8 version.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getVersion',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['protocolVersion']),
|
||||
str(json['product']),
|
||||
str(json['revision']),
|
||||
str(json['userAgent']),
|
||||
str(json['jsVersion'])
|
||||
)
|
||||
|
||||
|
||||
def get_browser_command_line() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Returns the command line switches for the browser process if, and only if
|
||||
--enable-automation is on the commandline.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: Commandline parameters
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getBrowserCommandLine',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['arguments']]
|
||||
|
||||
|
||||
def get_histograms(
|
||||
query: typing.Optional[str] = None,
|
||||
delta: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Histogram]]:
|
||||
'''
|
||||
Get Chrome histograms.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param query: *(Optional)* Requested substring in name. Only histograms which have query as a substring in their name are extracted. An empty or absent query returns all histograms.
|
||||
:param delta: *(Optional)* If true, retrieve delta since last call.
|
||||
:returns: Histograms.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if query is not None:
|
||||
params['query'] = query
|
||||
if delta is not None:
|
||||
params['delta'] = delta
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getHistograms',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Histogram.from_json(i) for i in json['histograms']]
|
||||
|
||||
|
||||
def get_histogram(
|
||||
name: str,
|
||||
delta: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Histogram]:
|
||||
'''
|
||||
Get a Chrome histogram by name.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param name: Requested histogram name.
|
||||
:param delta: *(Optional)* If true, retrieve delta since last call.
|
||||
:returns: Histogram.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['name'] = name
|
||||
if delta is not None:
|
||||
params['delta'] = delta
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getHistogram',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Histogram.from_json(json['histogram'])
|
||||
|
||||
|
||||
def get_window_bounds(
|
||||
window_id: WindowID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Bounds]:
|
||||
'''
|
||||
Get position and size of the browser window.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param window_id: Browser window id.
|
||||
:returns: Bounds information of the window. When window state is 'minimized', the restored window position and size are returned.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['windowId'] = window_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getWindowBounds',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Bounds.from_json(json['bounds'])
|
||||
|
||||
|
||||
def get_window_for_target(
|
||||
target_id: typing.Optional[target.TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[WindowID, Bounds]]:
|
||||
'''
|
||||
Get the browser window that contains the devtools target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id: *(Optional)* Devtools agent host id. If called as a part of the session, associated targetId is used.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **windowId** - Browser window id.
|
||||
1. **bounds** - Bounds information of the window. When window state is 'minimized', the restored window position and size are returned.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getWindowForTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
WindowID.from_json(json['windowId']),
|
||||
Bounds.from_json(json['bounds'])
|
||||
)
|
||||
|
||||
|
||||
def set_window_bounds(
|
||||
window_id: WindowID,
|
||||
bounds: Bounds
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set position and/or size of the browser window.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param window_id: Browser window id.
|
||||
:param bounds: New window bounds. The 'minimized', 'maximized' and 'fullscreen' states cannot be combined with 'left', 'top', 'width' or 'height'. Leaves unspecified fields unchanged.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['windowId'] = window_id.to_json()
|
||||
params['bounds'] = bounds.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setWindowBounds',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dock_tile(
|
||||
badge_label: typing.Optional[str] = None,
|
||||
image: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set dock tile details, platform-specific.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param badge_label: *(Optional)*
|
||||
:param image: *(Optional)* Png encoded image.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if badge_label is not None:
|
||||
params['badgeLabel'] = badge_label
|
||||
if image is not None:
|
||||
params['image'] = image
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setDockTile',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def execute_browser_command(
|
||||
command_id: BrowserCommandId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Invoke custom browser commands used by telemetry.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param command_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['commandId'] = command_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.executeBrowserCommand',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Browser.downloadWillBegin')
|
||||
@dataclass
|
||||
class DownloadWillBegin:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Fired when page is about to start a download.
|
||||
'''
|
||||
#: Id of the frame that caused the download to begin.
|
||||
frame_id: page.FrameId
|
||||
#: Global unique identifier of the download.
|
||||
guid: str
|
||||
#: URL of the resource being downloaded.
|
||||
url: str
|
||||
#: Suggested file name of the resource (the actual name of the file saved on disk may differ).
|
||||
suggested_filename: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DownloadWillBegin:
|
||||
return cls(
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
guid=str(json['guid']),
|
||||
url=str(json['url']),
|
||||
suggested_filename=str(json['suggestedFilename'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Browser.downloadProgress')
|
||||
@dataclass
|
||||
class DownloadProgress:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Fired when download makes progress. Last call has ``done`` == true.
|
||||
'''
|
||||
#: Global unique identifier of the download.
|
||||
guid: str
|
||||
#: Total expected bytes to download.
|
||||
total_bytes: float
|
||||
#: Total bytes received.
|
||||
received_bytes: float
|
||||
#: Download status.
|
||||
state: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DownloadProgress:
|
||||
return cls(
|
||||
guid=str(json['guid']),
|
||||
total_bytes=float(json['totalBytes']),
|
||||
received_bytes=float(json['receivedBytes']),
|
||||
state=str(json['state'])
|
||||
)
|
||||
@@ -0,0 +1,287 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: CacheStorage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class CacheId(str):
|
||||
'''
|
||||
Unique identifier of the Cache object.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> CacheId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'CacheId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class CachedResponseType(enum.Enum):
|
||||
'''
|
||||
type of HTTP response cached
|
||||
'''
|
||||
BASIC = "basic"
|
||||
CORS = "cors"
|
||||
DEFAULT = "default"
|
||||
ERROR = "error"
|
||||
OPAQUE_RESPONSE = "opaqueResponse"
|
||||
OPAQUE_REDIRECT = "opaqueRedirect"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataEntry:
|
||||
'''
|
||||
Data entry.
|
||||
'''
|
||||
#: Request URL.
|
||||
request_url: str
|
||||
|
||||
#: Request method.
|
||||
request_method: str
|
||||
|
||||
#: Request headers
|
||||
request_headers: typing.List[Header]
|
||||
|
||||
#: Number of seconds since epoch.
|
||||
response_time: float
|
||||
|
||||
#: HTTP response status code.
|
||||
response_status: int
|
||||
|
||||
#: HTTP response status text.
|
||||
response_status_text: str
|
||||
|
||||
#: HTTP response type
|
||||
response_type: CachedResponseType
|
||||
|
||||
#: Response headers
|
||||
response_headers: typing.List[Header]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['requestURL'] = self.request_url
|
||||
json['requestMethod'] = self.request_method
|
||||
json['requestHeaders'] = [i.to_json() for i in self.request_headers]
|
||||
json['responseTime'] = self.response_time
|
||||
json['responseStatus'] = self.response_status
|
||||
json['responseStatusText'] = self.response_status_text
|
||||
json['responseType'] = self.response_type.to_json()
|
||||
json['responseHeaders'] = [i.to_json() for i in self.response_headers]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
request_url=str(json['requestURL']),
|
||||
request_method=str(json['requestMethod']),
|
||||
request_headers=[Header.from_json(i) for i in json['requestHeaders']],
|
||||
response_time=float(json['responseTime']),
|
||||
response_status=int(json['responseStatus']),
|
||||
response_status_text=str(json['responseStatusText']),
|
||||
response_type=CachedResponseType.from_json(json['responseType']),
|
||||
response_headers=[Header.from_json(i) for i in json['responseHeaders']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cache:
|
||||
'''
|
||||
Cache identifier.
|
||||
'''
|
||||
#: An opaque unique id of the cache.
|
||||
cache_id: CacheId
|
||||
|
||||
#: Security origin of the cache.
|
||||
security_origin: str
|
||||
|
||||
#: The name of the cache.
|
||||
cache_name: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['cacheId'] = self.cache_id.to_json()
|
||||
json['securityOrigin'] = self.security_origin
|
||||
json['cacheName'] = self.cache_name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
cache_id=CacheId.from_json(json['cacheId']),
|
||||
security_origin=str(json['securityOrigin']),
|
||||
cache_name=str(json['cacheName']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Header:
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CachedResponse:
|
||||
'''
|
||||
Cached response
|
||||
'''
|
||||
#: Entry content, base64-encoded.
|
||||
body: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['body'] = self.body
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
body=str(json['body']),
|
||||
)
|
||||
|
||||
|
||||
def delete_cache(
|
||||
cache_id: CacheId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a cache.
|
||||
|
||||
:param cache_id: Id of cache for deletion.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.deleteCache',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_entry(
|
||||
cache_id: CacheId,
|
||||
request: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a cache entry.
|
||||
|
||||
:param cache_id: Id of cache where the entry will be deleted.
|
||||
:param request: URL spec of the request.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
params['request'] = request
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.deleteEntry',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_cache_names(
|
||||
security_origin: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Cache]]:
|
||||
'''
|
||||
Requests cache names.
|
||||
|
||||
:param security_origin: Security origin.
|
||||
:returns: Caches for the security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['securityOrigin'] = security_origin
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestCacheNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Cache.from_json(i) for i in json['caches']]
|
||||
|
||||
|
||||
def request_cached_response(
|
||||
cache_id: CacheId,
|
||||
request_url: str,
|
||||
request_headers: typing.List[Header]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,CachedResponse]:
|
||||
'''
|
||||
Fetches cache entry.
|
||||
|
||||
:param cache_id: Id of cache that contains the entry.
|
||||
:param request_url: URL spec of the request.
|
||||
:param request_headers: headers of the request.
|
||||
:returns: Response read from the cache.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
params['requestURL'] = request_url
|
||||
params['requestHeaders'] = [i.to_json() for i in request_headers]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestCachedResponse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return CachedResponse.from_json(json['response'])
|
||||
|
||||
|
||||
def request_entries(
|
||||
cache_id: CacheId,
|
||||
skip_count: typing.Optional[int] = None,
|
||||
page_size: typing.Optional[int] = None,
|
||||
path_filter: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DataEntry], float]]:
|
||||
'''
|
||||
Requests data from cache.
|
||||
|
||||
:param cache_id: ID of cache to get entries from.
|
||||
:param skip_count: *(Optional)* Number of records to skip.
|
||||
:param page_size: *(Optional)* Number of records to fetch.
|
||||
:param path_filter: *(Optional)* If present, only return the entries containing this substring in the path
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **cacheDataEntries** - Array of object store data entries.
|
||||
1. **returnCount** - Count of returned entries from this storage. If pathFilter is empty, it is the count of all entries from this storage.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
if skip_count is not None:
|
||||
params['skipCount'] = skip_count
|
||||
if page_size is not None:
|
||||
params['pageSize'] = page_size
|
||||
if path_filter is not None:
|
||||
params['pathFilter'] = path_filter
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestEntries',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DataEntry.from_json(i) for i in json['cacheDataEntries']],
|
||||
float(json['returnCount'])
|
||||
)
|
||||
@@ -0,0 +1,170 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Cast (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Sink:
|
||||
name: str
|
||||
|
||||
id_: str
|
||||
|
||||
#: Text describing the current session. Present only if there is an active
|
||||
#: session on the sink.
|
||||
session: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['id'] = self.id_
|
||||
if self.session is not None:
|
||||
json['session'] = self.session
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
id_=str(json['id']),
|
||||
session=str(json['session']) if 'session' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
presentation_url: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts observing for sinks that can be used for tab mirroring, and if set,
|
||||
sinks compatible with ``presentationUrl`` as well. When sinks are found, a
|
||||
``sinksUpdated`` event is fired.
|
||||
Also starts observing for issue messages. When an issue is added or removed,
|
||||
an ``issueUpdated`` event is fired.
|
||||
|
||||
:param presentation_url: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if presentation_url is not None:
|
||||
params['presentationUrl'] = presentation_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stops observing for sinks and issues.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_sink_to_use(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets a sink to be used when the web page requests the browser to choose a
|
||||
sink via Presentation API, Remote Playback API, or Cast SDK.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.setSinkToUse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_desktop_mirroring(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts mirroring the desktop to the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.startDesktopMirroring',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_tab_mirroring(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts mirroring the tab to the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.startTabMirroring',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_casting(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stops the active Cast session on the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.stopCasting',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Cast.sinksUpdated')
|
||||
@dataclass
|
||||
class SinksUpdated:
|
||||
'''
|
||||
This is fired whenever the list of available sinks changes. A sink is a
|
||||
device or a software surface that you can cast to.
|
||||
'''
|
||||
sinks: typing.List[Sink]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> SinksUpdated:
|
||||
return cls(
|
||||
sinks=[Sink.from_json(i) for i in json['sinks']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Cast.issueUpdated')
|
||||
@dataclass
|
||||
class IssueUpdated:
|
||||
'''
|
||||
This is fired whenever the outstanding issue/error message changes.
|
||||
``issueMessage`` is empty if there is no issue.
|
||||
'''
|
||||
issue_message: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> IssueUpdated:
|
||||
return cls(
|
||||
issue_message=str(json['issueMessage'])
|
||||
)
|
||||
@@ -0,0 +1,105 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Console
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class ConsoleMessage:
|
||||
'''
|
||||
Console message.
|
||||
'''
|
||||
#: Message source.
|
||||
source: str
|
||||
|
||||
#: Message severity.
|
||||
level: str
|
||||
|
||||
#: Message text.
|
||||
text: str
|
||||
|
||||
#: URL of the message origin.
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
#: Line number in the resource that generated this message (1-based).
|
||||
line: typing.Optional[int] = None
|
||||
|
||||
#: Column number in the resource that generated this message (1-based).
|
||||
column: typing.Optional[int] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['source'] = self.source
|
||||
json['level'] = self.level
|
||||
json['text'] = self.text
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.line is not None:
|
||||
json['line'] = self.line
|
||||
if self.column is not None:
|
||||
json['column'] = self.column
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
source=str(json['source']),
|
||||
level=str(json['level']),
|
||||
text=str(json['text']),
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
line=int(json['line']) if 'line' in json else None,
|
||||
column=int(json['column']) if 'column' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def clear_messages() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Does nothing.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.clearMessages',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables console domain, prevents further console messages from being reported to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables console domain, sends the messages collected so far to the client by means of the
|
||||
``messageAdded`` notification.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Console.messageAdded')
|
||||
@dataclass
|
||||
class MessageAdded:
|
||||
'''
|
||||
Issued when new console message is added.
|
||||
'''
|
||||
#: Console message that has been added.
|
||||
message: ConsoleMessage
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> MessageAdded:
|
||||
return cls(
|
||||
message=ConsoleMessage.from_json(json['message'])
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,162 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Database (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class DatabaseId(str):
|
||||
'''
|
||||
Unique identifier of Database object.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> DatabaseId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'DatabaseId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class Database:
|
||||
'''
|
||||
Database object.
|
||||
'''
|
||||
#: Database ID.
|
||||
id_: DatabaseId
|
||||
|
||||
#: Database domain.
|
||||
domain: str
|
||||
|
||||
#: Database name.
|
||||
name: str
|
||||
|
||||
#: Database version.
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_.to_json()
|
||||
json['domain'] = self.domain
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=DatabaseId.from_json(json['id']),
|
||||
domain=str(json['domain']),
|
||||
name=str(json['name']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Error:
|
||||
'''
|
||||
Database error.
|
||||
'''
|
||||
#: Error message.
|
||||
message: str
|
||||
|
||||
#: Error code.
|
||||
code: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['message'] = self.message
|
||||
json['code'] = self.code
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
message=str(json['message']),
|
||||
code=int(json['code']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables database tracking, prevents database events from being sent to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables database tracking, database events will now be delivered to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def execute_sql(
|
||||
database_id: DatabaseId,
|
||||
query: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.Optional[typing.List[str]], typing.Optional[typing.List[typing.Any]], typing.Optional[Error]]]:
|
||||
'''
|
||||
:param database_id:
|
||||
:param query:
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **columnNames** -
|
||||
1. **values** -
|
||||
2. **sqlError** -
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['databaseId'] = database_id.to_json()
|
||||
params['query'] = query
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.executeSQL',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[str(i) for i in json['columnNames']] if 'columnNames' in json else None,
|
||||
[i for i in json['values']] if 'values' in json else None,
|
||||
Error.from_json(json['sqlError']) if 'sqlError' in json else None
|
||||
)
|
||||
|
||||
|
||||
def get_database_table_names(
|
||||
database_id: DatabaseId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
:param database_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['databaseId'] = database_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.getDatabaseTableNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['tableNames']]
|
||||
|
||||
|
||||
@event_class('Database.addDatabase')
|
||||
@dataclass
|
||||
class AddDatabase:
|
||||
database: Database
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AddDatabase:
|
||||
return cls(
|
||||
database=Database.from_json(json['database'])
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,43 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DeviceOrientation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def clear_device_orientation_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden Device Orientation.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceOrientation.clearDeviceOrientationOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_device_orientation_override(
|
||||
alpha: float,
|
||||
beta: float,
|
||||
gamma: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Device Orientation.
|
||||
|
||||
:param alpha: Mock alpha
|
||||
:param beta: Mock beta
|
||||
:param gamma: Mock gamma
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['alpha'] = alpha
|
||||
params['beta'] = beta
|
||||
params['gamma'] = gamma
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceOrientation.setDeviceOrientationOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,312 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMDebugger
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import runtime
|
||||
|
||||
|
||||
class DOMBreakpointType(enum.Enum):
|
||||
'''
|
||||
DOM breakpoint type.
|
||||
'''
|
||||
SUBTREE_MODIFIED = "subtree-modified"
|
||||
ATTRIBUTE_MODIFIED = "attribute-modified"
|
||||
NODE_REMOVED = "node-removed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class CSPViolationType(enum.Enum):
|
||||
'''
|
||||
CSP Violation type.
|
||||
'''
|
||||
TRUSTEDTYPE_SINK_VIOLATION = "trustedtype-sink-violation"
|
||||
TRUSTEDTYPE_POLICY_VIOLATION = "trustedtype-policy-violation"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventListener:
|
||||
'''
|
||||
Object event listener.
|
||||
'''
|
||||
#: ``EventListener``'s type.
|
||||
type_: str
|
||||
|
||||
#: ``EventListener``'s useCapture.
|
||||
use_capture: bool
|
||||
|
||||
#: ``EventListener``'s passive flag.
|
||||
passive: bool
|
||||
|
||||
#: ``EventListener``'s once flag.
|
||||
once: bool
|
||||
|
||||
#: Script id of the handler code.
|
||||
script_id: runtime.ScriptId
|
||||
|
||||
#: Line number in the script (0-based).
|
||||
line_number: int
|
||||
|
||||
#: Column number in the script (0-based).
|
||||
column_number: int
|
||||
|
||||
#: Event handler function value.
|
||||
handler: typing.Optional[runtime.RemoteObject] = None
|
||||
|
||||
#: Event original handler function value.
|
||||
original_handler: typing.Optional[runtime.RemoteObject] = None
|
||||
|
||||
#: Node the listener is added to (if any).
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['useCapture'] = self.use_capture
|
||||
json['passive'] = self.passive
|
||||
json['once'] = self.once
|
||||
json['scriptId'] = self.script_id.to_json()
|
||||
json['lineNumber'] = self.line_number
|
||||
json['columnNumber'] = self.column_number
|
||||
if self.handler is not None:
|
||||
json['handler'] = self.handler.to_json()
|
||||
if self.original_handler is not None:
|
||||
json['originalHandler'] = self.original_handler.to_json()
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
use_capture=bool(json['useCapture']),
|
||||
passive=bool(json['passive']),
|
||||
once=bool(json['once']),
|
||||
script_id=runtime.ScriptId.from_json(json['scriptId']),
|
||||
line_number=int(json['lineNumber']),
|
||||
column_number=int(json['columnNumber']),
|
||||
handler=runtime.RemoteObject.from_json(json['handler']) if 'handler' in json else None,
|
||||
original_handler=runtime.RemoteObject.from_json(json['originalHandler']) if 'originalHandler' in json else None,
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def get_event_listeners(
|
||||
object_id: runtime.RemoteObjectId,
|
||||
depth: typing.Optional[int] = None,
|
||||
pierce: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[EventListener]]:
|
||||
'''
|
||||
Returns event listeners of the given object.
|
||||
|
||||
:param object_id: Identifier of the object to return listeners for.
|
||||
:param depth: *(Optional)* The maximum depth at which Node children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0.
|
||||
:param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). Reports listeners for all contexts if pierce is enabled.
|
||||
:returns: Array of relevant listeners.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
if depth is not None:
|
||||
params['depth'] = depth
|
||||
if pierce is not None:
|
||||
params['pierce'] = pierce
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.getEventListeners',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [EventListener.from_json(i) for i in json['listeners']]
|
||||
|
||||
|
||||
def remove_dom_breakpoint(
|
||||
node_id: dom.NodeId,
|
||||
type_: DOMBreakpointType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes DOM breakpoint that was set using ``setDOMBreakpoint``.
|
||||
|
||||
:param node_id: Identifier of the node to remove breakpoint from.
|
||||
:param type_: Type of the breakpoint to remove.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['nodeId'] = node_id.to_json()
|
||||
params['type'] = type_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeDOMBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_event_listener_breakpoint(
|
||||
event_name: str,
|
||||
target_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular DOM event.
|
||||
|
||||
:param event_name: Event name.
|
||||
:param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
if target_name is not None:
|
||||
params['targetName'] = target_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeEventListenerBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular native event.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_xhr_breakpoint(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint from XMLHttpRequest.
|
||||
|
||||
:param url: Resource URL substring.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeXHRBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_break_on_csp_violation(
|
||||
violation_types: typing.List[CSPViolationType]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular CSP violations.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param violation_types: CSP Violations to stop upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['violationTypes'] = [i.to_json() for i in violation_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setBreakOnCSPViolation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dom_breakpoint(
|
||||
node_id: dom.NodeId,
|
||||
type_: DOMBreakpointType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular operation with DOM.
|
||||
|
||||
:param node_id: Identifier of the node to set breakpoint on.
|
||||
:param type_: Type of the operation to stop upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['nodeId'] = node_id.to_json()
|
||||
params['type'] = type_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setDOMBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_event_listener_breakpoint(
|
||||
event_name: str,
|
||||
target_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular DOM event.
|
||||
|
||||
:param event_name: DOM Event name to stop on (any DOM event will do).
|
||||
:param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name to stop on. If equal to ```"*"``` or not provided, will stop on any EventTarget.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
if target_name is not None:
|
||||
params['targetName'] = target_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setEventListenerBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular native event.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_xhr_breakpoint(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on XMLHttpRequest.
|
||||
|
||||
:param url: Resource URL substring. All XHRs having this substring in the URL will get stopped upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setXHRBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
@@ -0,0 +1,863 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMSnapshot (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import dom_debugger
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class DOMNode:
|
||||
'''
|
||||
A Node in the DOM tree.
|
||||
'''
|
||||
#: ``Node``'s nodeType.
|
||||
node_type: int
|
||||
|
||||
#: ``Node``'s nodeName.
|
||||
node_name: str
|
||||
|
||||
#: ``Node``'s nodeValue.
|
||||
node_value: str
|
||||
|
||||
#: ``Node``'s id, corresponds to DOM.Node.backendNodeId.
|
||||
backend_node_id: dom.BackendNodeId
|
||||
|
||||
#: Only set for textarea elements, contains the text value.
|
||||
text_value: typing.Optional[str] = None
|
||||
|
||||
#: Only set for input elements, contains the input's associated text value.
|
||||
input_value: typing.Optional[str] = None
|
||||
|
||||
#: Only set for radio and checkbox input elements, indicates if the element has been checked
|
||||
input_checked: typing.Optional[bool] = None
|
||||
|
||||
#: Only set for option elements, indicates if the element has been selected
|
||||
option_selected: typing.Optional[bool] = None
|
||||
|
||||
#: The indexes of the node's child nodes in the ``domNodes`` array returned by ``getSnapshot``, if
|
||||
#: any.
|
||||
child_node_indexes: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Attributes of an ``Element`` node.
|
||||
attributes: typing.Optional[typing.List[NameValue]] = None
|
||||
|
||||
#: Indexes of pseudo elements associated with this node in the ``domNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
pseudo_element_indexes: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The index of the node's related layout tree node in the ``layoutTreeNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
layout_node_index: typing.Optional[int] = None
|
||||
|
||||
#: Document URL that ``Document`` or ``FrameOwner`` node points to.
|
||||
document_url: typing.Optional[str] = None
|
||||
|
||||
#: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion.
|
||||
base_url: typing.Optional[str] = None
|
||||
|
||||
#: Only set for documents, contains the document's content language.
|
||||
content_language: typing.Optional[str] = None
|
||||
|
||||
#: Only set for documents, contains the document's character set encoding.
|
||||
document_encoding: typing.Optional[str] = None
|
||||
|
||||
#: ``DocumentType`` node's publicId.
|
||||
public_id: typing.Optional[str] = None
|
||||
|
||||
#: ``DocumentType`` node's systemId.
|
||||
system_id: typing.Optional[str] = None
|
||||
|
||||
#: Frame ID for frame owner elements and also for the document node.
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
#: The index of a frame owner element's content document in the ``domNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
content_document_index: typing.Optional[int] = None
|
||||
|
||||
#: Type of a pseudo element node.
|
||||
pseudo_type: typing.Optional[dom.PseudoType] = None
|
||||
|
||||
#: Shadow root type.
|
||||
shadow_root_type: typing.Optional[dom.ShadowRootType] = None
|
||||
|
||||
#: Whether this DOM node responds to mouse clicks. This includes nodes that have had click
|
||||
#: event listeners attached via JavaScript as well as anchor tags that naturally navigate when
|
||||
#: clicked.
|
||||
is_clickable: typing.Optional[bool] = None
|
||||
|
||||
#: Details of the node's event listeners, if any.
|
||||
event_listeners: typing.Optional[typing.List[dom_debugger.EventListener]] = None
|
||||
|
||||
#: The selected url for nodes with a srcset attribute.
|
||||
current_source_url: typing.Optional[str] = None
|
||||
|
||||
#: The url of the script (if any) that generates this node.
|
||||
origin_url: typing.Optional[str] = None
|
||||
|
||||
#: Scroll offsets, set when this node is a Document.
|
||||
scroll_offset_x: typing.Optional[float] = None
|
||||
|
||||
scroll_offset_y: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeType'] = self.node_type
|
||||
json['nodeName'] = self.node_name
|
||||
json['nodeValue'] = self.node_value
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.text_value is not None:
|
||||
json['textValue'] = self.text_value
|
||||
if self.input_value is not None:
|
||||
json['inputValue'] = self.input_value
|
||||
if self.input_checked is not None:
|
||||
json['inputChecked'] = self.input_checked
|
||||
if self.option_selected is not None:
|
||||
json['optionSelected'] = self.option_selected
|
||||
if self.child_node_indexes is not None:
|
||||
json['childNodeIndexes'] = [i for i in self.child_node_indexes]
|
||||
if self.attributes is not None:
|
||||
json['attributes'] = [i.to_json() for i in self.attributes]
|
||||
if self.pseudo_element_indexes is not None:
|
||||
json['pseudoElementIndexes'] = [i for i in self.pseudo_element_indexes]
|
||||
if self.layout_node_index is not None:
|
||||
json['layoutNodeIndex'] = self.layout_node_index
|
||||
if self.document_url is not None:
|
||||
json['documentURL'] = self.document_url
|
||||
if self.base_url is not None:
|
||||
json['baseURL'] = self.base_url
|
||||
if self.content_language is not None:
|
||||
json['contentLanguage'] = self.content_language
|
||||
if self.document_encoding is not None:
|
||||
json['documentEncoding'] = self.document_encoding
|
||||
if self.public_id is not None:
|
||||
json['publicId'] = self.public_id
|
||||
if self.system_id is not None:
|
||||
json['systemId'] = self.system_id
|
||||
if self.frame_id is not None:
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
if self.content_document_index is not None:
|
||||
json['contentDocumentIndex'] = self.content_document_index
|
||||
if self.pseudo_type is not None:
|
||||
json['pseudoType'] = self.pseudo_type.to_json()
|
||||
if self.shadow_root_type is not None:
|
||||
json['shadowRootType'] = self.shadow_root_type.to_json()
|
||||
if self.is_clickable is not None:
|
||||
json['isClickable'] = self.is_clickable
|
||||
if self.event_listeners is not None:
|
||||
json['eventListeners'] = [i.to_json() for i in self.event_listeners]
|
||||
if self.current_source_url is not None:
|
||||
json['currentSourceURL'] = self.current_source_url
|
||||
if self.origin_url is not None:
|
||||
json['originURL'] = self.origin_url
|
||||
if self.scroll_offset_x is not None:
|
||||
json['scrollOffsetX'] = self.scroll_offset_x
|
||||
if self.scroll_offset_y is not None:
|
||||
json['scrollOffsetY'] = self.scroll_offset_y
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_type=int(json['nodeType']),
|
||||
node_name=str(json['nodeName']),
|
||||
node_value=str(json['nodeValue']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']),
|
||||
text_value=str(json['textValue']) if 'textValue' in json else None,
|
||||
input_value=str(json['inputValue']) if 'inputValue' in json else None,
|
||||
input_checked=bool(json['inputChecked']) if 'inputChecked' in json else None,
|
||||
option_selected=bool(json['optionSelected']) if 'optionSelected' in json else None,
|
||||
child_node_indexes=[int(i) for i in json['childNodeIndexes']] if 'childNodeIndexes' in json else None,
|
||||
attributes=[NameValue.from_json(i) for i in json['attributes']] if 'attributes' in json else None,
|
||||
pseudo_element_indexes=[int(i) for i in json['pseudoElementIndexes']] if 'pseudoElementIndexes' in json else None,
|
||||
layout_node_index=int(json['layoutNodeIndex']) if 'layoutNodeIndex' in json else None,
|
||||
document_url=str(json['documentURL']) if 'documentURL' in json else None,
|
||||
base_url=str(json['baseURL']) if 'baseURL' in json else None,
|
||||
content_language=str(json['contentLanguage']) if 'contentLanguage' in json else None,
|
||||
document_encoding=str(json['documentEncoding']) if 'documentEncoding' in json else None,
|
||||
public_id=str(json['publicId']) if 'publicId' in json else None,
|
||||
system_id=str(json['systemId']) if 'systemId' in json else None,
|
||||
frame_id=page.FrameId.from_json(json['frameId']) if 'frameId' in json else None,
|
||||
content_document_index=int(json['contentDocumentIndex']) if 'contentDocumentIndex' in json else None,
|
||||
pseudo_type=dom.PseudoType.from_json(json['pseudoType']) if 'pseudoType' in json else None,
|
||||
shadow_root_type=dom.ShadowRootType.from_json(json['shadowRootType']) if 'shadowRootType' in json else None,
|
||||
is_clickable=bool(json['isClickable']) if 'isClickable' in json else None,
|
||||
event_listeners=[dom_debugger.EventListener.from_json(i) for i in json['eventListeners']] if 'eventListeners' in json else None,
|
||||
current_source_url=str(json['currentSourceURL']) if 'currentSourceURL' in json else None,
|
||||
origin_url=str(json['originURL']) if 'originURL' in json else None,
|
||||
scroll_offset_x=float(json['scrollOffsetX']) if 'scrollOffsetX' in json else None,
|
||||
scroll_offset_y=float(json['scrollOffsetY']) if 'scrollOffsetY' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InlineTextBox:
|
||||
'''
|
||||
Details of post layout rendered text positions. The exact layout should not be regarded as
|
||||
stable and may change between versions.
|
||||
'''
|
||||
#: The bounding box in document coordinates. Note that scroll offset of the document is ignored.
|
||||
bounding_box: dom.Rect
|
||||
|
||||
#: The starting index in characters, for this post layout textbox substring. Characters that
|
||||
#: would be represented as a surrogate pair in UTF-16 have length 2.
|
||||
start_character_index: int
|
||||
|
||||
#: The number of characters in this post layout textbox substring. Characters that would be
|
||||
#: represented as a surrogate pair in UTF-16 have length 2.
|
||||
num_characters: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['boundingBox'] = self.bounding_box.to_json()
|
||||
json['startCharacterIndex'] = self.start_character_index
|
||||
json['numCharacters'] = self.num_characters
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
bounding_box=dom.Rect.from_json(json['boundingBox']),
|
||||
start_character_index=int(json['startCharacterIndex']),
|
||||
num_characters=int(json['numCharacters']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutTreeNode:
|
||||
'''
|
||||
Details of an element in the DOM tree with a LayoutObject.
|
||||
'''
|
||||
#: The index of the related DOM node in the ``domNodes`` array returned by ``getSnapshot``.
|
||||
dom_node_index: int
|
||||
|
||||
#: The bounding box in document coordinates. Note that scroll offset of the document is ignored.
|
||||
bounding_box: dom.Rect
|
||||
|
||||
#: Contents of the LayoutText, if any.
|
||||
layout_text: typing.Optional[str] = None
|
||||
|
||||
#: The post-layout inline text nodes, if any.
|
||||
inline_text_nodes: typing.Optional[typing.List[InlineTextBox]] = None
|
||||
|
||||
#: Index into the ``computedStyles`` array returned by ``getSnapshot``.
|
||||
style_index: typing.Optional[int] = None
|
||||
|
||||
#: Global paint order index, which is determined by the stacking order of the nodes. Nodes
|
||||
#: that are painted together will have the same index. Only provided if includePaintOrder in
|
||||
#: getSnapshot was true.
|
||||
paint_order: typing.Optional[int] = None
|
||||
|
||||
#: Set to true to indicate the element begins a new stacking context.
|
||||
is_stacking_context: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['domNodeIndex'] = self.dom_node_index
|
||||
json['boundingBox'] = self.bounding_box.to_json()
|
||||
if self.layout_text is not None:
|
||||
json['layoutText'] = self.layout_text
|
||||
if self.inline_text_nodes is not None:
|
||||
json['inlineTextNodes'] = [i.to_json() for i in self.inline_text_nodes]
|
||||
if self.style_index is not None:
|
||||
json['styleIndex'] = self.style_index
|
||||
if self.paint_order is not None:
|
||||
json['paintOrder'] = self.paint_order
|
||||
if self.is_stacking_context is not None:
|
||||
json['isStackingContext'] = self.is_stacking_context
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
dom_node_index=int(json['domNodeIndex']),
|
||||
bounding_box=dom.Rect.from_json(json['boundingBox']),
|
||||
layout_text=str(json['layoutText']) if 'layoutText' in json else None,
|
||||
inline_text_nodes=[InlineTextBox.from_json(i) for i in json['inlineTextNodes']] if 'inlineTextNodes' in json else None,
|
||||
style_index=int(json['styleIndex']) if 'styleIndex' in json else None,
|
||||
paint_order=int(json['paintOrder']) if 'paintOrder' in json else None,
|
||||
is_stacking_context=bool(json['isStackingContext']) if 'isStackingContext' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComputedStyle:
|
||||
'''
|
||||
A subset of the full ComputedStyle as defined by the request whitelist.
|
||||
'''
|
||||
#: Name/value pairs of computed style properties.
|
||||
properties: typing.List[NameValue]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['properties'] = [i.to_json() for i in self.properties]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
properties=[NameValue.from_json(i) for i in json['properties']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NameValue:
|
||||
'''
|
||||
A name/value pair.
|
||||
'''
|
||||
#: Attribute/property name.
|
||||
name: str
|
||||
|
||||
#: Attribute/property value.
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
class StringIndex(int):
|
||||
'''
|
||||
Index of the string in the strings table.
|
||||
'''
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> StringIndex:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'StringIndex({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ArrayOfStrings(list):
|
||||
'''
|
||||
Index of the string in the strings table.
|
||||
'''
|
||||
def to_json(self) -> typing.List[StringIndex]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[StringIndex]) -> ArrayOfStrings:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'ArrayOfStrings({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareStringData:
|
||||
'''
|
||||
Data that is only present on rare nodes.
|
||||
'''
|
||||
index: typing.List[int]
|
||||
|
||||
value: typing.List[StringIndex]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
json['value'] = [i.to_json() for i in self.value]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
value=[StringIndex.from_json(i) for i in json['value']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareBooleanData:
|
||||
index: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareIntegerData:
|
||||
index: typing.List[int]
|
||||
|
||||
value: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
json['value'] = [i for i in self.value]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
value=[int(i) for i in json['value']],
|
||||
)
|
||||
|
||||
|
||||
class Rectangle(list):
|
||||
def to_json(self) -> typing.List[float]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[float]) -> Rectangle:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Rectangle({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class DocumentSnapshot:
|
||||
'''
|
||||
Document snapshot.
|
||||
'''
|
||||
#: Document URL that ``Document`` or ``FrameOwner`` node points to.
|
||||
document_url: StringIndex
|
||||
|
||||
#: Document title.
|
||||
title: StringIndex
|
||||
|
||||
#: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion.
|
||||
base_url: StringIndex
|
||||
|
||||
#: Contains the document's content language.
|
||||
content_language: StringIndex
|
||||
|
||||
#: Contains the document's character set encoding.
|
||||
encoding_name: StringIndex
|
||||
|
||||
#: ``DocumentType`` node's publicId.
|
||||
public_id: StringIndex
|
||||
|
||||
#: ``DocumentType`` node's systemId.
|
||||
system_id: StringIndex
|
||||
|
||||
#: Frame ID for frame owner elements and also for the document node.
|
||||
frame_id: StringIndex
|
||||
|
||||
#: A table with dom nodes.
|
||||
nodes: NodeTreeSnapshot
|
||||
|
||||
#: The nodes in the layout tree.
|
||||
layout: LayoutTreeSnapshot
|
||||
|
||||
#: The post-layout inline text nodes.
|
||||
text_boxes: TextBoxSnapshot
|
||||
|
||||
#: Horizontal scroll offset.
|
||||
scroll_offset_x: typing.Optional[float] = None
|
||||
|
||||
#: Vertical scroll offset.
|
||||
scroll_offset_y: typing.Optional[float] = None
|
||||
|
||||
#: Document content width.
|
||||
content_width: typing.Optional[float] = None
|
||||
|
||||
#: Document content height.
|
||||
content_height: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['documentURL'] = self.document_url.to_json()
|
||||
json['title'] = self.title.to_json()
|
||||
json['baseURL'] = self.base_url.to_json()
|
||||
json['contentLanguage'] = self.content_language.to_json()
|
||||
json['encodingName'] = self.encoding_name.to_json()
|
||||
json['publicId'] = self.public_id.to_json()
|
||||
json['systemId'] = self.system_id.to_json()
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
json['nodes'] = self.nodes.to_json()
|
||||
json['layout'] = self.layout.to_json()
|
||||
json['textBoxes'] = self.text_boxes.to_json()
|
||||
if self.scroll_offset_x is not None:
|
||||
json['scrollOffsetX'] = self.scroll_offset_x
|
||||
if self.scroll_offset_y is not None:
|
||||
json['scrollOffsetY'] = self.scroll_offset_y
|
||||
if self.content_width is not None:
|
||||
json['contentWidth'] = self.content_width
|
||||
if self.content_height is not None:
|
||||
json['contentHeight'] = self.content_height
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
document_url=StringIndex.from_json(json['documentURL']),
|
||||
title=StringIndex.from_json(json['title']),
|
||||
base_url=StringIndex.from_json(json['baseURL']),
|
||||
content_language=StringIndex.from_json(json['contentLanguage']),
|
||||
encoding_name=StringIndex.from_json(json['encodingName']),
|
||||
public_id=StringIndex.from_json(json['publicId']),
|
||||
system_id=StringIndex.from_json(json['systemId']),
|
||||
frame_id=StringIndex.from_json(json['frameId']),
|
||||
nodes=NodeTreeSnapshot.from_json(json['nodes']),
|
||||
layout=LayoutTreeSnapshot.from_json(json['layout']),
|
||||
text_boxes=TextBoxSnapshot.from_json(json['textBoxes']),
|
||||
scroll_offset_x=float(json['scrollOffsetX']) if 'scrollOffsetX' in json else None,
|
||||
scroll_offset_y=float(json['scrollOffsetY']) if 'scrollOffsetY' in json else None,
|
||||
content_width=float(json['contentWidth']) if 'contentWidth' in json else None,
|
||||
content_height=float(json['contentHeight']) if 'contentHeight' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeTreeSnapshot:
|
||||
'''
|
||||
Table containing nodes.
|
||||
'''
|
||||
#: Parent node index.
|
||||
parent_index: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: ``Node``'s nodeType.
|
||||
node_type: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Type of the shadow root the ``Node`` is in. String values are equal to the ``ShadowRootType`` enum.
|
||||
shadow_root_type: typing.Optional[RareStringData] = None
|
||||
|
||||
#: ``Node``'s nodeName.
|
||||
node_name: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: ``Node``'s nodeValue.
|
||||
node_value: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: ``Node``'s id, corresponds to DOM.Node.backendNodeId.
|
||||
backend_node_id: typing.Optional[typing.List[dom.BackendNodeId]] = None
|
||||
|
||||
#: Attributes of an ``Element`` node. Flatten name, value pairs.
|
||||
attributes: typing.Optional[typing.List[ArrayOfStrings]] = None
|
||||
|
||||
#: Only set for textarea elements, contains the text value.
|
||||
text_value: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Only set for input elements, contains the input's associated text value.
|
||||
input_value: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Only set for radio and checkbox input elements, indicates if the element has been checked
|
||||
input_checked: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: Only set for option elements, indicates if the element has been selected
|
||||
option_selected: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: The index of the document in the list of the snapshot documents.
|
||||
content_document_index: typing.Optional[RareIntegerData] = None
|
||||
|
||||
#: Type of a pseudo element node.
|
||||
pseudo_type: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Whether this DOM node responds to mouse clicks. This includes nodes that have had click
|
||||
#: event listeners attached via JavaScript as well as anchor tags that naturally navigate when
|
||||
#: clicked.
|
||||
is_clickable: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: The selected url for nodes with a srcset attribute.
|
||||
current_source_url: typing.Optional[RareStringData] = None
|
||||
|
||||
#: The url of the script (if any) that generates this node.
|
||||
origin_url: typing.Optional[RareStringData] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.parent_index is not None:
|
||||
json['parentIndex'] = [i for i in self.parent_index]
|
||||
if self.node_type is not None:
|
||||
json['nodeType'] = [i for i in self.node_type]
|
||||
if self.shadow_root_type is not None:
|
||||
json['shadowRootType'] = self.shadow_root_type.to_json()
|
||||
if self.node_name is not None:
|
||||
json['nodeName'] = [i.to_json() for i in self.node_name]
|
||||
if self.node_value is not None:
|
||||
json['nodeValue'] = [i.to_json() for i in self.node_value]
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = [i.to_json() for i in self.backend_node_id]
|
||||
if self.attributes is not None:
|
||||
json['attributes'] = [i.to_json() for i in self.attributes]
|
||||
if self.text_value is not None:
|
||||
json['textValue'] = self.text_value.to_json()
|
||||
if self.input_value is not None:
|
||||
json['inputValue'] = self.input_value.to_json()
|
||||
if self.input_checked is not None:
|
||||
json['inputChecked'] = self.input_checked.to_json()
|
||||
if self.option_selected is not None:
|
||||
json['optionSelected'] = self.option_selected.to_json()
|
||||
if self.content_document_index is not None:
|
||||
json['contentDocumentIndex'] = self.content_document_index.to_json()
|
||||
if self.pseudo_type is not None:
|
||||
json['pseudoType'] = self.pseudo_type.to_json()
|
||||
if self.is_clickable is not None:
|
||||
json['isClickable'] = self.is_clickable.to_json()
|
||||
if self.current_source_url is not None:
|
||||
json['currentSourceURL'] = self.current_source_url.to_json()
|
||||
if self.origin_url is not None:
|
||||
json['originURL'] = self.origin_url.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
parent_index=[int(i) for i in json['parentIndex']] if 'parentIndex' in json else None,
|
||||
node_type=[int(i) for i in json['nodeType']] if 'nodeType' in json else None,
|
||||
shadow_root_type=RareStringData.from_json(json['shadowRootType']) if 'shadowRootType' in json else None,
|
||||
node_name=[StringIndex.from_json(i) for i in json['nodeName']] if 'nodeName' in json else None,
|
||||
node_value=[StringIndex.from_json(i) for i in json['nodeValue']] if 'nodeValue' in json else None,
|
||||
backend_node_id=[dom.BackendNodeId.from_json(i) for i in json['backendNodeId']] if 'backendNodeId' in json else None,
|
||||
attributes=[ArrayOfStrings.from_json(i) for i in json['attributes']] if 'attributes' in json else None,
|
||||
text_value=RareStringData.from_json(json['textValue']) if 'textValue' in json else None,
|
||||
input_value=RareStringData.from_json(json['inputValue']) if 'inputValue' in json else None,
|
||||
input_checked=RareBooleanData.from_json(json['inputChecked']) if 'inputChecked' in json else None,
|
||||
option_selected=RareBooleanData.from_json(json['optionSelected']) if 'optionSelected' in json else None,
|
||||
content_document_index=RareIntegerData.from_json(json['contentDocumentIndex']) if 'contentDocumentIndex' in json else None,
|
||||
pseudo_type=RareStringData.from_json(json['pseudoType']) if 'pseudoType' in json else None,
|
||||
is_clickable=RareBooleanData.from_json(json['isClickable']) if 'isClickable' in json else None,
|
||||
current_source_url=RareStringData.from_json(json['currentSourceURL']) if 'currentSourceURL' in json else None,
|
||||
origin_url=RareStringData.from_json(json['originURL']) if 'originURL' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutTreeSnapshot:
|
||||
'''
|
||||
Table of details of an element in the DOM tree with a LayoutObject.
|
||||
'''
|
||||
#: Index of the corresponding node in the ``NodeTreeSnapshot`` array returned by ``captureSnapshot``.
|
||||
node_index: typing.List[int]
|
||||
|
||||
#: Array of indexes specifying computed style strings, filtered according to the ``computedStyles`` parameter passed to ``captureSnapshot``.
|
||||
styles: typing.List[ArrayOfStrings]
|
||||
|
||||
#: The absolute position bounding box.
|
||||
bounds: typing.List[Rectangle]
|
||||
|
||||
#: Contents of the LayoutText, if any.
|
||||
text: typing.List[StringIndex]
|
||||
|
||||
#: Stacking context information.
|
||||
stacking_contexts: RareBooleanData
|
||||
|
||||
#: Global paint order index, which is determined by the stacking order of the nodes. Nodes
|
||||
#: that are painted together will have the same index. Only provided if includePaintOrder in
|
||||
#: captureSnapshot was true.
|
||||
paint_orders: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The offset rect of nodes. Only available when includeDOMRects is set to true
|
||||
offset_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The scroll rect of nodes. Only available when includeDOMRects is set to true
|
||||
scroll_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The client rect of nodes. Only available when includeDOMRects is set to true
|
||||
client_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The list of background colors that are blended with colors of overlapping elements.
|
||||
blended_background_colors: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: The list of computed text opacities.
|
||||
text_color_opacities: typing.Optional[typing.List[float]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeIndex'] = [i for i in self.node_index]
|
||||
json['styles'] = [i.to_json() for i in self.styles]
|
||||
json['bounds'] = [i.to_json() for i in self.bounds]
|
||||
json['text'] = [i.to_json() for i in self.text]
|
||||
json['stackingContexts'] = self.stacking_contexts.to_json()
|
||||
if self.paint_orders is not None:
|
||||
json['paintOrders'] = [i for i in self.paint_orders]
|
||||
if self.offset_rects is not None:
|
||||
json['offsetRects'] = [i.to_json() for i in self.offset_rects]
|
||||
if self.scroll_rects is not None:
|
||||
json['scrollRects'] = [i.to_json() for i in self.scroll_rects]
|
||||
if self.client_rects is not None:
|
||||
json['clientRects'] = [i.to_json() for i in self.client_rects]
|
||||
if self.blended_background_colors is not None:
|
||||
json['blendedBackgroundColors'] = [i.to_json() for i in self.blended_background_colors]
|
||||
if self.text_color_opacities is not None:
|
||||
json['textColorOpacities'] = [i for i in self.text_color_opacities]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_index=[int(i) for i in json['nodeIndex']],
|
||||
styles=[ArrayOfStrings.from_json(i) for i in json['styles']],
|
||||
bounds=[Rectangle.from_json(i) for i in json['bounds']],
|
||||
text=[StringIndex.from_json(i) for i in json['text']],
|
||||
stacking_contexts=RareBooleanData.from_json(json['stackingContexts']),
|
||||
paint_orders=[int(i) for i in json['paintOrders']] if 'paintOrders' in json else None,
|
||||
offset_rects=[Rectangle.from_json(i) for i in json['offsetRects']] if 'offsetRects' in json else None,
|
||||
scroll_rects=[Rectangle.from_json(i) for i in json['scrollRects']] if 'scrollRects' in json else None,
|
||||
client_rects=[Rectangle.from_json(i) for i in json['clientRects']] if 'clientRects' in json else None,
|
||||
blended_background_colors=[StringIndex.from_json(i) for i in json['blendedBackgroundColors']] if 'blendedBackgroundColors' in json else None,
|
||||
text_color_opacities=[float(i) for i in json['textColorOpacities']] if 'textColorOpacities' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextBoxSnapshot:
|
||||
'''
|
||||
Table of details of the post layout rendered text positions. The exact layout should not be regarded as
|
||||
stable and may change between versions.
|
||||
'''
|
||||
#: Index of the layout tree node that owns this box collection.
|
||||
layout_index: typing.List[int]
|
||||
|
||||
#: The absolute position bounding box.
|
||||
bounds: typing.List[Rectangle]
|
||||
|
||||
#: The starting index in characters, for this post layout textbox substring. Characters that
|
||||
#: would be represented as a surrogate pair in UTF-16 have length 2.
|
||||
start: typing.List[int]
|
||||
|
||||
#: The number of characters in this post layout textbox substring. Characters that would be
|
||||
#: represented as a surrogate pair in UTF-16 have length 2.
|
||||
length: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['layoutIndex'] = [i for i in self.layout_index]
|
||||
json['bounds'] = [i.to_json() for i in self.bounds]
|
||||
json['start'] = [i for i in self.start]
|
||||
json['length'] = [i for i in self.length]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
layout_index=[int(i) for i in json['layoutIndex']],
|
||||
bounds=[Rectangle.from_json(i) for i in json['bounds']],
|
||||
start=[int(i) for i in json['start']],
|
||||
length=[int(i) for i in json['length']],
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables DOM snapshot agent for the given page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables DOM snapshot agent for the given page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_snapshot(
|
||||
computed_style_whitelist: typing.List[str],
|
||||
include_event_listeners: typing.Optional[bool] = None,
|
||||
include_paint_order: typing.Optional[bool] = None,
|
||||
include_user_agent_shadow_tree: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DOMNode], typing.List[LayoutTreeNode], typing.List[ComputedStyle]]]:
|
||||
'''
|
||||
Returns a document snapshot, including the full DOM tree of the root node (including iframes,
|
||||
template contents, and imported documents) in a flattened array, as well as layout and
|
||||
white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is
|
||||
flattened.
|
||||
|
||||
:param computed_style_whitelist: Whitelist of computed styles to return.
|
||||
:param include_event_listeners: *(Optional)* Whether or not to retrieve details of DOM listeners (default false).
|
||||
:param include_paint_order: *(Optional)* Whether to determine and include the paint order index of LayoutTreeNodes (default false).
|
||||
:param include_user_agent_shadow_tree: *(Optional)* Whether to include UA shadow tree in the snapshot (default false).
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **domNodes** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document.
|
||||
1. **layoutTreeNodes** - The nodes in the layout tree.
|
||||
2. **computedStyles** - Whitelisted ComputedStyle properties for each node in the layout tree.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['computedStyleWhitelist'] = [i for i in computed_style_whitelist]
|
||||
if include_event_listeners is not None:
|
||||
params['includeEventListeners'] = include_event_listeners
|
||||
if include_paint_order is not None:
|
||||
params['includePaintOrder'] = include_paint_order
|
||||
if include_user_agent_shadow_tree is not None:
|
||||
params['includeUserAgentShadowTree'] = include_user_agent_shadow_tree
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.getSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DOMNode.from_json(i) for i in json['domNodes']],
|
||||
[LayoutTreeNode.from_json(i) for i in json['layoutTreeNodes']],
|
||||
[ComputedStyle.from_json(i) for i in json['computedStyles']]
|
||||
)
|
||||
|
||||
|
||||
def capture_snapshot(
|
||||
computed_styles: typing.List[str],
|
||||
include_paint_order: typing.Optional[bool] = None,
|
||||
include_dom_rects: typing.Optional[bool] = None,
|
||||
include_blended_background_colors: typing.Optional[bool] = None,
|
||||
include_text_color_opacities: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DocumentSnapshot], typing.List[str]]]:
|
||||
'''
|
||||
Returns a document snapshot, including the full DOM tree of the root node (including iframes,
|
||||
template contents, and imported documents) in a flattened array, as well as layout and
|
||||
white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is
|
||||
flattened.
|
||||
|
||||
:param computed_styles: Whitelist of computed styles to return.
|
||||
:param include_paint_order: *(Optional)* Whether to include layout object paint orders into the snapshot.
|
||||
:param include_dom_rects: *(Optional)* Whether to include DOM rectangles (offsetRects, clientRects, scrollRects) into the snapshot
|
||||
:param include_blended_background_colors: **(EXPERIMENTAL)** *(Optional)* Whether to include blended background colors in the snapshot (default: false). Blended background color is achieved by blending background colors of all elements that overlap with the current element.
|
||||
:param include_text_color_opacities: **(EXPERIMENTAL)** *(Optional)* Whether to include text color opacity in the snapshot (default: false). An element might have the opacity property set that affects the text color of the element. The final text color opacity is computed based on the opacity of all overlapping elements.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **documents** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document.
|
||||
1. **strings** - Shared string table that all string properties refer to with indexes.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['computedStyles'] = [i for i in computed_styles]
|
||||
if include_paint_order is not None:
|
||||
params['includePaintOrder'] = include_paint_order
|
||||
if include_dom_rects is not None:
|
||||
params['includeDOMRects'] = include_dom_rects
|
||||
if include_blended_background_colors is not None:
|
||||
params['includeBlendedBackgroundColors'] = include_blended_background_colors
|
||||
if include_text_color_opacities is not None:
|
||||
params['includeTextColorOpacities'] = include_text_color_opacities
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.captureSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DocumentSnapshot.from_json(i) for i in json['documents']],
|
||||
[str(i) for i in json['strings']]
|
||||
)
|
||||
@@ -0,0 +1,201 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMStorage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class StorageId:
|
||||
'''
|
||||
DOM Storage identifier.
|
||||
'''
|
||||
#: Security origin for the storage.
|
||||
security_origin: str
|
||||
|
||||
#: Whether the storage is local storage (not session storage).
|
||||
is_local_storage: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['securityOrigin'] = self.security_origin
|
||||
json['isLocalStorage'] = self.is_local_storage
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
security_origin=str(json['securityOrigin']),
|
||||
is_local_storage=bool(json['isLocalStorage']),
|
||||
)
|
||||
|
||||
|
||||
class Item(list):
|
||||
'''
|
||||
DOM Storage item.
|
||||
'''
|
||||
def to_json(self) -> typing.List[str]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[str]) -> Item:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Item({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def clear(
|
||||
storage_id: StorageId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.clear',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables storage tracking, prevents storage events from being sent to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables storage tracking, storage events will now be delivered to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_dom_storage_items(
|
||||
storage_id: StorageId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Item]]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.getDOMStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Item.from_json(i) for i in json['entries']]
|
||||
|
||||
|
||||
def remove_dom_storage_item(
|
||||
storage_id: StorageId,
|
||||
key: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:param key:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
params['key'] = key
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.removeDOMStorageItem',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dom_storage_item(
|
||||
storage_id: StorageId,
|
||||
key: str,
|
||||
value: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:param key:
|
||||
:param value:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
params['key'] = key
|
||||
params['value'] = value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.setDOMStorageItem',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemAdded')
|
||||
@dataclass
|
||||
class DomStorageItemAdded:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
new_value: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemAdded:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key']),
|
||||
new_value=str(json['newValue'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemRemoved')
|
||||
@dataclass
|
||||
class DomStorageItemRemoved:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemRemoved:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemUpdated')
|
||||
@dataclass
|
||||
class DomStorageItemUpdated:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
old_value: str
|
||||
new_value: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemUpdated:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key']),
|
||||
old_value=str(json['oldValue']),
|
||||
new_value=str(json['newValue'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemsCleared')
|
||||
@dataclass
|
||||
class DomStorageItemsCleared:
|
||||
storage_id: StorageId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemsCleared:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId'])
|
||||
)
|
||||
@@ -0,0 +1,804 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Emulation
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScreenOrientation:
|
||||
'''
|
||||
Screen orientation.
|
||||
'''
|
||||
#: Orientation type.
|
||||
type_: str
|
||||
|
||||
#: Orientation angle.
|
||||
angle: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['angle'] = self.angle
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
angle=int(json['angle']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DisplayFeature:
|
||||
#: Orientation of a display feature in relation to screen
|
||||
orientation: str
|
||||
|
||||
#: The offset from the screen origin in either the x (for vertical
|
||||
#: orientation) or y (for horizontal orientation) direction.
|
||||
offset: int
|
||||
|
||||
#: A display feature may mask content such that it is not physically
|
||||
#: displayed - this length along with the offset describes this area.
|
||||
#: A display feature that only splits content will have a 0 mask_length.
|
||||
mask_length: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['orientation'] = self.orientation
|
||||
json['offset'] = self.offset
|
||||
json['maskLength'] = self.mask_length
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
orientation=str(json['orientation']),
|
||||
offset=int(json['offset']),
|
||||
mask_length=int(json['maskLength']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MediaFeature:
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
class VirtualTimePolicy(enum.Enum):
|
||||
'''
|
||||
advance: If the scheduler runs out of immediate work, the virtual time base may fast forward to
|
||||
allow the next delayed task (if any) to run; pause: The virtual time base may not advance;
|
||||
pauseIfNetworkFetchesPending: The virtual time base may not advance if there are any pending
|
||||
resource fetches.
|
||||
'''
|
||||
ADVANCE = "advance"
|
||||
PAUSE = "pause"
|
||||
PAUSE_IF_NETWORK_FETCHES_PENDING = "pauseIfNetworkFetchesPending"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UserAgentBrandVersion:
|
||||
'''
|
||||
Used to specify User Agent Cient Hints to emulate. See https://wicg.github.io/ua-client-hints
|
||||
'''
|
||||
brand: str
|
||||
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['brand'] = self.brand
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
brand=str(json['brand']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UserAgentMetadata:
|
||||
'''
|
||||
Used to specify User Agent Cient Hints to emulate. See https://wicg.github.io/ua-client-hints
|
||||
Missing optional values will be filled in by the target with what it would normally use.
|
||||
'''
|
||||
platform: str
|
||||
|
||||
platform_version: str
|
||||
|
||||
architecture: str
|
||||
|
||||
model: str
|
||||
|
||||
mobile: bool
|
||||
|
||||
brands: typing.Optional[typing.List[UserAgentBrandVersion]] = None
|
||||
|
||||
full_version_list: typing.Optional[typing.List[UserAgentBrandVersion]] = None
|
||||
|
||||
full_version: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['platform'] = self.platform
|
||||
json['platformVersion'] = self.platform_version
|
||||
json['architecture'] = self.architecture
|
||||
json['model'] = self.model
|
||||
json['mobile'] = self.mobile
|
||||
if self.brands is not None:
|
||||
json['brands'] = [i.to_json() for i in self.brands]
|
||||
if self.full_version_list is not None:
|
||||
json['fullVersionList'] = [i.to_json() for i in self.full_version_list]
|
||||
if self.full_version is not None:
|
||||
json['fullVersion'] = self.full_version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
platform=str(json['platform']),
|
||||
platform_version=str(json['platformVersion']),
|
||||
architecture=str(json['architecture']),
|
||||
model=str(json['model']),
|
||||
mobile=bool(json['mobile']),
|
||||
brands=[UserAgentBrandVersion.from_json(i) for i in json['brands']] if 'brands' in json else None,
|
||||
full_version_list=[UserAgentBrandVersion.from_json(i) for i in json['fullVersionList']] if 'fullVersionList' in json else None,
|
||||
full_version=str(json['fullVersion']) if 'fullVersion' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class DisabledImageType(enum.Enum):
|
||||
'''
|
||||
Enum of image types that can be disabled.
|
||||
'''
|
||||
AVIF = "avif"
|
||||
JXL = "jxl"
|
||||
WEBP = "webp"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def can_emulate() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,bool]:
|
||||
'''
|
||||
Tells whether emulation is supported.
|
||||
|
||||
:returns: True if emulation is supported.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.canEmulate',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return bool(json['result'])
|
||||
|
||||
|
||||
def clear_device_metrics_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden device metrics.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.clearDeviceMetricsOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_geolocation_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden Geolocation Position and Error.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.clearGeolocationOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def reset_page_scale_factor() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Requests that page scale factor is reset to initial values.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.resetPageScaleFactor',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_focus_emulation_enabled(
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables or disables simulating a focused and active page.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: Whether to enable to disable focus emulation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setFocusEmulationEnabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_auto_dark_mode_override(
|
||||
enabled: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Automatically render all web contents using a dark theme.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: *(Optional)* Whether to enable or disable automatic dark mode. If not specified, any existing override will be cleared.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if enabled is not None:
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setAutoDarkModeOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_cpu_throttling_rate(
|
||||
rate: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables CPU throttling to emulate slow CPUs.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param rate: Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['rate'] = rate
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setCPUThrottlingRate',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_default_background_color_override(
|
||||
color: typing.Optional[dom.RGBA] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets or clears an override of the default background color of the frame. This override is used
|
||||
if the content does not specify one.
|
||||
|
||||
:param color: *(Optional)* RGBA of the default background color. If not specified, any existing override will be cleared.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if color is not None:
|
||||
params['color'] = color.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDefaultBackgroundColorOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_device_metrics_override(
|
||||
width: int,
|
||||
height: int,
|
||||
device_scale_factor: float,
|
||||
mobile: bool,
|
||||
scale: typing.Optional[float] = None,
|
||||
screen_width: typing.Optional[int] = None,
|
||||
screen_height: typing.Optional[int] = None,
|
||||
position_x: typing.Optional[int] = None,
|
||||
position_y: typing.Optional[int] = None,
|
||||
dont_set_visible_size: typing.Optional[bool] = None,
|
||||
screen_orientation: typing.Optional[ScreenOrientation] = None,
|
||||
viewport: typing.Optional[page.Viewport] = None,
|
||||
display_feature: typing.Optional[DisplayFeature] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the values of device screen dimensions (window.screen.width, window.screen.height,
|
||||
window.innerWidth, window.innerHeight, and "device-width"/"device-height"-related CSS media
|
||||
query results).
|
||||
|
||||
:param width: Overriding width value in pixels (minimum 0, maximum 10000000). 0 disables the override.
|
||||
:param height: Overriding height value in pixels (minimum 0, maximum 10000000). 0 disables the override.
|
||||
:param device_scale_factor: Overriding device scale factor value. 0 disables the override.
|
||||
:param mobile: Whether to emulate mobile device. This includes viewport meta tag, overlay scrollbars, text autosizing and more.
|
||||
:param scale: **(EXPERIMENTAL)** *(Optional)* Scale to apply to resulting view image.
|
||||
:param screen_width: **(EXPERIMENTAL)** *(Optional)* Overriding screen width value in pixels (minimum 0, maximum 10000000).
|
||||
:param screen_height: **(EXPERIMENTAL)** *(Optional)* Overriding screen height value in pixels (minimum 0, maximum 10000000).
|
||||
:param position_x: **(EXPERIMENTAL)** *(Optional)* Overriding view X position on screen in pixels (minimum 0, maximum 10000000).
|
||||
:param position_y: **(EXPERIMENTAL)** *(Optional)* Overriding view Y position on screen in pixels (minimum 0, maximum 10000000).
|
||||
:param dont_set_visible_size: **(EXPERIMENTAL)** *(Optional)* Do not set visible view size, rely upon explicit setVisibleSize call.
|
||||
:param screen_orientation: *(Optional)* Screen orientation override.
|
||||
:param viewport: **(EXPERIMENTAL)** *(Optional)* If set, the visible area of the page will be overridden to this viewport. This viewport change is not observed by the page, e.g. viewport-relative elements do not change positions.
|
||||
:param display_feature: **(EXPERIMENTAL)** *(Optional)* If set, the display feature of a multi-segment screen. If not set, multi-segment support is turned-off.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['width'] = width
|
||||
params['height'] = height
|
||||
params['deviceScaleFactor'] = device_scale_factor
|
||||
params['mobile'] = mobile
|
||||
if scale is not None:
|
||||
params['scale'] = scale
|
||||
if screen_width is not None:
|
||||
params['screenWidth'] = screen_width
|
||||
if screen_height is not None:
|
||||
params['screenHeight'] = screen_height
|
||||
if position_x is not None:
|
||||
params['positionX'] = position_x
|
||||
if position_y is not None:
|
||||
params['positionY'] = position_y
|
||||
if dont_set_visible_size is not None:
|
||||
params['dontSetVisibleSize'] = dont_set_visible_size
|
||||
if screen_orientation is not None:
|
||||
params['screenOrientation'] = screen_orientation.to_json()
|
||||
if viewport is not None:
|
||||
params['viewport'] = viewport.to_json()
|
||||
if display_feature is not None:
|
||||
params['displayFeature'] = display_feature.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDeviceMetricsOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_scrollbars_hidden(
|
||||
hidden: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param hidden: Whether scrollbars should be always hidden.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['hidden'] = hidden
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setScrollbarsHidden',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_document_cookie_disabled(
|
||||
disabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param disabled: Whether document.coookie API should be disabled.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['disabled'] = disabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDocumentCookieDisabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_emit_touch_events_for_mouse(
|
||||
enabled: bool,
|
||||
configuration: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: Whether touch emulation based on mouse input should be enabled.
|
||||
:param configuration: *(Optional)* Touch/gesture events configuration. Default: current platform.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
if configuration is not None:
|
||||
params['configuration'] = configuration
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setEmitTouchEventsForMouse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_emulated_media(
|
||||
media: typing.Optional[str] = None,
|
||||
features: typing.Optional[typing.List[MediaFeature]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Emulates the given media type or media feature for CSS media queries.
|
||||
|
||||
:param media: *(Optional)* Media type to emulate. Empty string disables the override.
|
||||
:param features: *(Optional)* Media features to emulate.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if media is not None:
|
||||
params['media'] = media
|
||||
if features is not None:
|
||||
params['features'] = [i.to_json() for i in features]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setEmulatedMedia',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_emulated_vision_deficiency(
|
||||
type_: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Emulates the given vision deficiency.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param type_: Vision deficiency to emulate.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setEmulatedVisionDeficiency',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_geolocation_override(
|
||||
latitude: typing.Optional[float] = None,
|
||||
longitude: typing.Optional[float] = None,
|
||||
accuracy: typing.Optional[float] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Geolocation Position or Error. Omitting any of the parameters emulates position
|
||||
unavailable.
|
||||
|
||||
:param latitude: *(Optional)* Mock latitude
|
||||
:param longitude: *(Optional)* Mock longitude
|
||||
:param accuracy: *(Optional)* Mock accuracy
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if latitude is not None:
|
||||
params['latitude'] = latitude
|
||||
if longitude is not None:
|
||||
params['longitude'] = longitude
|
||||
if accuracy is not None:
|
||||
params['accuracy'] = accuracy
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setGeolocationOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_idle_override(
|
||||
is_user_active: bool,
|
||||
is_screen_unlocked: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Idle state.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param is_user_active: Mock isUserActive
|
||||
:param is_screen_unlocked: Mock isScreenUnlocked
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['isUserActive'] = is_user_active
|
||||
params['isScreenUnlocked'] = is_screen_unlocked
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setIdleOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_idle_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears Idle state overrides.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.clearIdleOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_navigator_overrides(
|
||||
platform: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides value returned by the javascript navigator object.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param platform: The platform navigator.platform should return.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['platform'] = platform
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setNavigatorOverrides',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_page_scale_factor(
|
||||
page_scale_factor: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets a specified page scale factor.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param page_scale_factor: Page scale factor.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['pageScaleFactor'] = page_scale_factor
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setPageScaleFactor',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_script_execution_disabled(
|
||||
value: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Switches script execution in the page.
|
||||
|
||||
:param value: Whether script execution should be disabled in the page.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['value'] = value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setScriptExecutionDisabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_touch_emulation_enabled(
|
||||
enabled: bool,
|
||||
max_touch_points: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables touch on platforms which do not support them.
|
||||
|
||||
:param enabled: Whether the touch event emulation should be enabled.
|
||||
:param max_touch_points: *(Optional)* Maximum touch points supported. Defaults to one.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
if max_touch_points is not None:
|
||||
params['maxTouchPoints'] = max_touch_points
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setTouchEmulationEnabled',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_virtual_time_policy(
|
||||
policy: VirtualTimePolicy,
|
||||
budget: typing.Optional[float] = None,
|
||||
max_virtual_time_task_starvation_count: typing.Optional[int] = None,
|
||||
initial_virtual_time: typing.Optional[network.TimeSinceEpoch] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Turns on virtual time for all frames (replacing real-time with a synthetic time source) and sets
|
||||
the current virtual time policy. Note this supersedes any previous time budget.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param policy:
|
||||
:param budget: *(Optional)* If set, after this many virtual milliseconds have elapsed virtual time will be paused and a virtualTimeBudgetExpired event is sent.
|
||||
:param max_virtual_time_task_starvation_count: *(Optional)* If set this specifies the maximum number of tasks that can be run before virtual is forced forwards to prevent deadlock.
|
||||
:param initial_virtual_time: *(Optional)* If set, base::Time::Now will be overridden to initially return this value.
|
||||
:returns: Absolute timestamp at which virtual time was first enabled (up time in milliseconds).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['policy'] = policy.to_json()
|
||||
if budget is not None:
|
||||
params['budget'] = budget
|
||||
if max_virtual_time_task_starvation_count is not None:
|
||||
params['maxVirtualTimeTaskStarvationCount'] = max_virtual_time_task_starvation_count
|
||||
if initial_virtual_time is not None:
|
||||
params['initialVirtualTime'] = initial_virtual_time.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setVirtualTimePolicy',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['virtualTimeTicksBase'])
|
||||
|
||||
|
||||
def set_locale_override(
|
||||
locale: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides default host system locale with the specified one.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param locale: *(Optional)* ICU style C locale (e.g. "en_US"). If not specified or empty, disables the override and restores default host system locale.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if locale is not None:
|
||||
params['locale'] = locale
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setLocaleOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_timezone_override(
|
||||
timezone_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides default host system timezone with the specified one.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param timezone_id: The timezone identifier. If empty, disables the override and restores default host system timezone.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['timezoneId'] = timezone_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setTimezoneOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_visible_size(
|
||||
width: int,
|
||||
height: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Resizes the frame/viewport of the page. Note that this does not affect the frame's container
|
||||
(e.g. browser window). Can be used to produce screenshots of the specified size. Not supported
|
||||
on Android.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param width: Frame width (DIP).
|
||||
:param height: Frame height (DIP).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['width'] = width
|
||||
params['height'] = height
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setVisibleSize',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_disabled_image_types(
|
||||
image_types: typing.List[DisabledImageType]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param image_types: Image types to disable.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['imageTypes'] = [i.to_json() for i in image_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setDisabledImageTypes',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_user_agent_override(
|
||||
user_agent: str,
|
||||
accept_language: typing.Optional[str] = None,
|
||||
platform: typing.Optional[str] = None,
|
||||
user_agent_metadata: typing.Optional[UserAgentMetadata] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Allows overriding user agent with the given string.
|
||||
|
||||
:param user_agent: User agent to use.
|
||||
:param accept_language: *(Optional)* Browser langugage to emulate.
|
||||
:param platform: *(Optional)* The platform navigator.platform should return.
|
||||
:param user_agent_metadata: **(EXPERIMENTAL)** *(Optional)* To be sent in Sec-CH-UA-* headers and returned in navigator.userAgentData
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['userAgent'] = user_agent
|
||||
if accept_language is not None:
|
||||
params['acceptLanguage'] = accept_language
|
||||
if platform is not None:
|
||||
params['platform'] = platform
|
||||
if user_agent_metadata is not None:
|
||||
params['userAgentMetadata'] = user_agent_metadata.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setUserAgentOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_automation_override(
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Allows overriding the automation flag.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled: Whether the override should be enabled.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Emulation.setAutomationOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Emulation.virtualTimeBudgetExpired')
|
||||
@dataclass
|
||||
class VirtualTimeBudgetExpired:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Notification sent after the virtual time budget for the current VirtualTimePolicy has run out.
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> VirtualTimeBudgetExpired:
|
||||
return cls(
|
||||
|
||||
)
|
||||
@@ -0,0 +1,44 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: EventBreakpoints (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def set_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular native event.
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.setInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular native event.
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.removeInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user