okay fine

This commit is contained in:
pacnpal
2024-11-03 17:47:26 +00:00
parent 387c4740e7
commit 27f3326e22
10020 changed files with 1935769 additions and 2364 deletions

View File

@@ -0,0 +1,12 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Internet: Asynchronous I/O and Events.
Twisted Internet is a collection of compatible event-loops for Python. It contains
the code to dispatch events to interested observers and a portable API so that
observers need not care about which event loop is running. Thus, it is possible
to use the same code for different loops, from Twisted's basic, yet portable,
select-based loop to the loops of various GUI toolkits like GTK+ or Tk.
"""

View File

@@ -0,0 +1,66 @@
# -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Cross-platform process-related functionality used by different
L{IReactorProcess} implementations.
"""
from typing import Optional
from twisted.logger import Logger
from twisted.python.deprecate import getWarningMethod
from twisted.python.failure import Failure
from twisted.python.reflect import qual
_log = Logger()
_missingProcessExited = (
"Since Twisted 8.2, IProcessProtocol.processExited "
"is required. %s must implement it."
)
class BaseProcess:
pid: Optional[int] = None
status: Optional[int] = None
lostProcess = 0
proto = None
def __init__(self, protocol):
self.proto = protocol
def _callProcessExited(self, reason):
default = object()
processExited = getattr(self.proto, "processExited", default)
if processExited is default:
getWarningMethod()(
_missingProcessExited % (qual(self.proto.__class__),),
DeprecationWarning,
stacklevel=0,
)
else:
with _log.failuresHandled("while calling processExited:"):
processExited(Failure(reason))
def processEnded(self, status):
"""
This is called when the child terminates.
"""
self.status = status
self.lostProcess += 1
self.pid = None
self._callProcessExited(self._getReason(status))
self.maybeCallProcessEnded()
def maybeCallProcessEnded(self):
"""
Call processEnded on protocol after final cleanup.
"""
if self.proto is not None:
reason = self._getReason(self.status)
proto = self.proto
self.proto = None
with _log.failuresHandled("while calling processEnded:"):
proto.processEnded(Failure(reason))

View File

@@ -0,0 +1,25 @@
"""
Support similar deprecation of several reactors.
"""
import warnings
from incremental import Version, getVersionString
from twisted.python.deprecate import DEPRECATION_WARNING_FORMAT
def deprecatedGnomeReactor(name: str, version: Version) -> None:
"""
Emit a deprecation warning about a gnome-related reactor.
@param name: The name of the reactor. For example, C{"gtk2reactor"}.
@param version: The version in which the deprecation was introduced.
"""
stem = DEPRECATION_WARNING_FORMAT % {
"fqpn": "twisted.internet." + name,
"version": getVersionString(version),
}
msg = stem + ". Please use twisted.internet.gireactor instead."
warnings.warn(msg, category=DeprecationWarning)

View File

@@ -0,0 +1,397 @@
# -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows Process Management, used with reactor.spawnProcess
"""
import os
import sys
from zope.interface import implementer
import pywintypes
# Win32 imports
import win32api
import win32con
import win32event
import win32file
import win32pipe
import win32process
import win32security
from twisted.internet import _pollingfile, error
from twisted.internet._baseprocess import BaseProcess
from twisted.internet.interfaces import IConsumer, IProcessTransport, IProducer
from twisted.python.win32 import quoteArguments
# Security attributes for pipes
PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
def debug(msg):
print(msg)
sys.stdout.flush()
class _Reaper(_pollingfile._PollableResource):
def __init__(self, proc):
self.proc = proc
def checkWork(self):
if (
win32event.WaitForSingleObject(self.proc.hProcess, 0)
!= win32event.WAIT_OBJECT_0
):
return 0
exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
self.deactivate()
self.proc.processEnded(exitCode)
return 0
def _findShebang(filename):
"""
Look for a #! line, and return the value following the #! if one exists, or
None if this file is not a script.
I don't know if there are any conventions for quoting in Windows shebang
lines, so this doesn't support any; therefore, you may not pass any
arguments to scripts invoked as filters. That's probably wrong, so if
somebody knows more about the cultural expectations on Windows, please feel
free to fix.
This shebang line support was added in support of the CGI tests;
appropriately enough, I determined that shebang lines are culturally
accepted in the Windows world through this page::
http://www.cgi101.com/learn/connect/winxp.html
@param filename: str representing a filename
@return: a str representing another filename.
"""
with open(filename) as f:
if f.read(2) == "#!":
exe = f.readline(1024).strip("\n")
return exe
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
@implementer(IProcessTransport, IConsumer, IProducer)
class Process(_pollingfile._PollingTimer, BaseProcess):
"""
A process that integrates with the Twisted event loop.
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
closedNotifies = 0
def __init__(self, reactor, protocol, command, args, environment, path):
"""
Create a new child process.
"""
_pollingfile._PollingTimer.__init__(self, reactor)
BaseProcess.__init__(self, protocol)
# security attributes for pipes
sAttrs = win32security.SECURITY_ATTRIBUTES()
sAttrs.bInheritHandle = 1
# create the pipes which will connect to the secondary process
self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
win32pipe.SetNamedPipeHandleState(
self.hStdinW, win32pipe.PIPE_NOWAIT, None, None
)
# set the info structure for the new process.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = hStdoutW
StartupInfo.hStdError = hStderrW
StartupInfo.hStdInput = hStdinR
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# Create new handles whose inheritance property is false
currentPid = win32api.GetCurrentProcess()
tmp = win32api.DuplicateHandle(
currentPid, self.hStdoutR, currentPid, 0, 0, win32con.DUPLICATE_SAME_ACCESS
)
win32file.CloseHandle(self.hStdoutR)
self.hStdoutR = tmp
tmp = win32api.DuplicateHandle(
currentPid, self.hStderrR, currentPid, 0, 0, win32con.DUPLICATE_SAME_ACCESS
)
win32file.CloseHandle(self.hStderrR)
self.hStderrR = tmp
tmp = win32api.DuplicateHandle(
currentPid, self.hStdinW, currentPid, 0, 0, win32con.DUPLICATE_SAME_ACCESS
)
win32file.CloseHandle(self.hStdinW)
self.hStdinW = tmp
# Add the specified environment to the current environment - this is
# necessary because certain operations are only supported on Windows
# if certain environment variables are present.
env = os.environ.copy()
env.update(environment or {})
env = {os.fsdecode(key): os.fsdecode(value) for key, value in env.items()}
# Make sure all the arguments are Unicode.
args = [os.fsdecode(x) for x in args]
cmdline = quoteArguments(args)
# The command, too, needs to be Unicode, if it is a value.
command = os.fsdecode(command) if command else command
path = os.fsdecode(path) if path else path
# TODO: error detection here. See #2787 and #4184.
def doCreate():
flags = win32con.CREATE_NO_WINDOW
self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
command, cmdline, None, None, 1, flags, env, path, StartupInfo
)
try:
doCreate()
except pywintypes.error as pwte:
if not _invalidWin32App(pwte):
# This behavior isn't _really_ documented, but let's make it
# consistent with the behavior that is documented.
raise OSError(pwte)
else:
# look for a shebang line. Insert the original 'command'
# (actually a script) into the new arguments list.
sheb = _findShebang(command)
if sheb is None:
raise OSError(
"%r is neither a Windows executable, "
"nor a script with a shebang line" % command
)
else:
args = list(args)
args.insert(0, command)
cmdline = quoteArguments(args)
origcmd = command
command = sheb
try:
# Let's try again.
doCreate()
except pywintypes.error as pwte2:
# d'oh, failed again!
if _invalidWin32App(pwte2):
raise OSError(
"%r has an invalid shebang line: "
"%r is not a valid executable" % (origcmd, sheb)
)
raise OSError(pwte2)
# close handles which only the child will use
win32file.CloseHandle(hStderrW)
win32file.CloseHandle(hStdoutW)
win32file.CloseHandle(hStdinR)
# set up everything
self.stdout = _pollingfile._PollableReadPipe(
self.hStdoutR,
lambda data: self.proto.childDataReceived(1, data),
self.outConnectionLost,
)
self.stderr = _pollingfile._PollableReadPipe(
self.hStderrR,
lambda data: self.proto.childDataReceived(2, data),
self.errConnectionLost,
)
self.stdin = _pollingfile._PollableWritePipe(
self.hStdinW, self.inConnectionLost
)
for pipewatcher in self.stdout, self.stderr, self.stdin:
self._addPollableResource(pipewatcher)
# notify protocol
self.proto.makeConnection(self)
self._addPollableResource(_Reaper(self))
def signalProcess(self, signalID):
if self.pid is None:
raise error.ProcessExitedAlready()
if signalID in ("INT", "TERM", "KILL"):
win32process.TerminateProcess(self.hProcess, 1)
def _getReason(self, status):
if status == 0:
return error.ProcessDone(status)
return error.ProcessTerminated(status)
def write(self, data):
"""
Write data to the process' stdin.
@type data: C{bytes}
"""
self.stdin.write(data)
def writeSequence(self, seq):
"""
Write data to the process' stdin.
@type seq: C{list} of C{bytes}
"""
self.stdin.writeSequence(seq)
def writeToChild(self, fd, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
This implementation is limited to writing to the child's standard input.
@param fd: The file descriptor to which to write. Only stdin (C{0}) is
supported.
@type fd: C{int}
@param data: The bytes to write.
@type data: C{bytes}
@return: L{None}
@raise KeyError: If C{fd} is anything other than the stdin file
descriptor (C{0}).
"""
if fd == 0:
self.stdin.write(data)
else:
raise KeyError(fd)
def closeChildFD(self, fd):
if fd == 0:
self.closeStdin()
elif fd == 1:
self.closeStdout()
elif fd == 2:
self.closeStderr()
else:
raise NotImplementedError(
"Only standard-IO file descriptors available on win32"
)
def closeStdin(self):
"""Close the process' stdin."""
self.stdin.close()
def closeStderr(self):
self.stderr.close()
def closeStdout(self):
self.stdout.close()
def loseConnection(self):
"""
Close the process' stdout, in and err.
"""
self.closeStdin()
self.closeStdout()
self.closeStderr()
def outConnectionLost(self):
self.proto.childConnectionLost(1)
self.connectionLostNotify()
def errConnectionLost(self):
self.proto.childConnectionLost(2)
self.connectionLostNotify()
def inConnectionLost(self):
self.proto.childConnectionLost(0)
self.connectionLostNotify()
def connectionLostNotify(self):
"""
Will be called 3 times, by stdout/err threads and process handle.
"""
self.closedNotifies += 1
self.maybeCallProcessEnded()
def maybeCallProcessEnded(self):
if self.closedNotifies == 3 and self.lostProcess:
win32file.CloseHandle(self.hProcess)
win32file.CloseHandle(self.hThread)
self.hProcess = None
self.hThread = None
BaseProcess.maybeCallProcessEnded(self)
# IConsumer
def registerProducer(self, producer, streaming):
self.stdin.registerProducer(producer, streaming)
def unregisterProducer(self):
self.stdin.unregisterProducer()
# IProducer
def pauseProducing(self):
self._pause()
def resumeProducing(self):
self._unpause()
def stopProducing(self):
self.loseConnection()
def getHost(self):
# ITransport.getHost
raise NotImplementedError("Unimplemented: Process.getHost")
def getPeer(self):
# ITransport.getPeer
raise NotImplementedError("Unimplemented: Process.getPeer")
def __repr__(self) -> str:
"""
Return a string representation of the process.
"""
return f"<{self.__class__.__name__} pid={self.pid}>"

View File

@@ -0,0 +1,369 @@
# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides base support for Twisted to interact with the glib/gtk
mainloops.
The classes in this module should not be used directly, but rather you should
import gireactor or gtk3reactor for GObject Introspection based applications,
or glib2reactor or gtk2reactor for applications using legacy static bindings.
"""
import sys
from typing import Any, Callable, Dict, Set
from zope.interface import implementer
from twisted.internet import posixbase
from twisted.internet.abstract import FileDescriptor
from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor, IWriteDescriptor
from twisted.python import log
from twisted.python.monkey import MonkeyPatcher
from ._signals import _IWaker, _UnixWaker
def ensureNotImported(moduleNames, errorMessage, preventImports=[]):
"""
Check whether the given modules were imported, and if requested, ensure
they will not be importable in the future.
@param moduleNames: A list of module names we make sure aren't imported.
@type moduleNames: C{list} of C{str}
@param preventImports: A list of module name whose future imports should
be prevented.
@type preventImports: C{list} of C{str}
@param errorMessage: Message to use when raising an C{ImportError}.
@type errorMessage: C{str}
@raise ImportError: with given error message if a given module name
has already been imported.
"""
for name in moduleNames:
if sys.modules.get(name) is not None:
raise ImportError(errorMessage)
# Disable module imports to avoid potential problems.
for name in preventImports:
sys.modules[name] = None
class GlibWaker(_UnixWaker):
"""
Run scheduled events after waking up.
"""
def __init__(self, reactor):
super().__init__()
self.reactor = reactor
def doRead(self) -> None:
super().doRead()
self.reactor._simulate()
def _signalGlue():
"""
Integrate glib's wakeup file descriptor usage and our own.
Python supports only one wakeup file descriptor at a time and both Twisted
and glib want to use it.
This is a context manager that can be wrapped around the whole glib
reactor main loop which makes our signal handling work with glib's signal
handling.
"""
from gi import _ossighelper as signalGlue
patcher = MonkeyPatcher()
patcher.addPatch(signalGlue, "_wakeup_fd_is_active", True)
return patcher
def _loopQuitter(
idleAdd: Callable[[Callable[[], None]], None], loopQuit: Callable[[], None]
) -> Callable[[], None]:
"""
Combine the C{glib.idle_add} and C{glib.MainLoop.quit} functions into a
function suitable for crashing the reactor.
"""
return lambda: idleAdd(loopQuit)
@implementer(IReactorFDSet)
class GlibReactorBase(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
Base class for GObject event loop reactors.
Notification for I/O events (reads and writes on file descriptors) is done
by the gobject-based event loop. File descriptors are registered with
gobject with the appropriate flags for read/write/disconnect notification.
Time-based events, the results of C{callLater} and C{callFromThread}, are
handled differently. Rather than registering each event with gobject, a
single gobject timeout is registered for the earliest scheduled event, the
output of C{reactor.timeout()}. For example, if there are timeouts in 1, 2
and 3.4 seconds, a single timeout is registered for 1 second in the
future. When this timeout is hit, C{_simulate} is called, which calls the
appropriate Twisted-level handlers, and a new timeout is added to gobject
by the C{_reschedule} method.
To handle C{callFromThread} events, we use a custom waker that calls
C{_simulate} whenever it wakes up.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to
GSource handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A GSource handle for the next L{simulate} call.
"""
# Install a waker that knows it needs to call C{_simulate} in order to run
# callbacks queued from a thread:
def _wakerFactory(self) -> _IWaker:
return GlibWaker(self)
def __init__(self, glib_module: Any, gtk_module: Any, useGtk: bool = False) -> None:
self._simtag = None
self._reads: Set[IReadDescriptor] = set()
self._writes: Set[IWriteDescriptor] = set()
self._sources: Dict[FileDescriptor, int] = {}
self._glib = glib_module
self._POLL_DISCONNECTED = (
glib_module.IOCondition.HUP
| glib_module.IOCondition.ERR
| glib_module.IOCondition.NVAL
)
self._POLL_IN = glib_module.IOCondition.IN
self._POLL_OUT = glib_module.IOCondition.OUT
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
self.INFLAGS = self._POLL_IN | self._POLL_DISCONNECTED
self.OUTFLAGS = self._POLL_OUT | self._POLL_DISCONNECTED
super().__init__()
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
self.context = self._glib.main_context_default()
self._pending = self.context.pending
self._iteration = self.context.iteration
self.loop = self._glib.MainLoop()
self._crash = _loopQuitter(self._glib.idle_add, self.loop.quit)
self._run = self.loop.run
def _reallyStartRunning(self):
"""
Make sure the reactor's signal handlers are installed despite any
outside interference.
"""
# First, install SIGINT and friends:
super()._reallyStartRunning()
# Next, since certain versions of gtk will clobber our signal handler,
# set all signal handlers again after the event loop has started to
# ensure they're *really* set.
#
# We don't actually know which versions of gtk do this so this might
# be obsolete. If so, that would be great and this whole method can
# go away. Someone needs to find out, though.
#
# https://github.com/twisted/twisted/issues/11762
def reinitSignals():
self._signals.uninstall()
self._signals.install()
self.callLater(0, reinitSignals)
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, "fileno"):
# handle python objects
def wrapper(ignored, condition):
return callback(source, condition)
fileno = source.fileno()
else:
fileno = source
wrapper = callback
return self._glib.io_add_watch(
fileno,
self._glib.PRIORITY_DEFAULT_IDLE,
condition,
wrapper,
)
def _ioEventCallback(self, source, condition):
"""
Called by event loop when an I/O event occurs.
"""
log.callWithLogger(source, self._doReadOrWrite, source, source, condition)
return True # True = don't auto-remove the source
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
self._source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(source, flags, self._ioEventCallback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes, self.INFLAGS, self.OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads, self.OUTFLAGS, self.INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
self._source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(source, flags, self._ioEventCallback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, self.OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, self.INFLAGS)
def iterate(self, delay=0):
"""
One iteration of the event loop, for trial's use.
This is not used for actual reactor runs.
"""
self.runUntilCurrent()
while self._pending():
self._iteration(0)
def crash(self):
"""
Crash the reactor.
"""
posixbase.PosixReactorBase.crash(self)
self._crash()
def stop(self):
"""
Stop the reactor.
"""
posixbase.PosixReactorBase.stop(self)
# The base implementation only sets a flag, to ensure shutting down is
# not reentrant. Unfortunately, this flag is not meaningful to the
# gobject event loop. We therefore call wakeUp() to ensure the event
# loop will call back into Twisted once this iteration is done. This
# will result in self.runUntilCurrent() being called, where the stop
# flag will trigger the actual shutdown process, eventually calling
# crash() which will do the actual gobject event loop shutdown.
self.wakeUp()
def run(self, installSignalHandlers=True):
"""
Run the reactor.
"""
with _signalGlue():
self.callWhenRunning(self._reschedule)
self.startRunning(installSignalHandlers=installSignalHandlers)
if self._started:
self._run()
def callLater(self, *args, **kwargs):
"""
Schedule a C{DelayedCall}.
"""
result = posixbase.PosixReactorBase.callLater(self, *args, **kwargs)
# Make sure we'll get woken up at correct time to handle this new
# scheduled call:
self._reschedule()
return result
def _reschedule(self):
"""
Schedule a glib timeout for C{_simulate}.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self._simtag = None
timeout = self.timeout()
if timeout is not None:
self._simtag = self._timeout_add(
int(timeout * 1000),
self._simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE,
)
def _simulate(self):
"""
Run timers, and then reschedule glib timeout for next scheduled event.
"""
self.runUntilCurrent()
self._reschedule()

View File

@@ -0,0 +1,51 @@
# -*- test-case-name: twisted.test.test_sslverify -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Shared interface to IDNA encoding and decoding, using the C{idna} PyPI package
if available, otherwise the stdlib implementation.
"""
def _idnaBytes(text: str) -> bytes:
"""
Convert some text typed by a human into some ASCII bytes.
This is provided to allow us to use the U{partially-broken IDNA
implementation in the standard library <http://bugs.python.org/issue17305>}
if the more-correct U{idna <https://pypi.python.org/pypi/idna>} package is
not available; C{service_identity} is somewhat stricter about this.
@param text: A domain name, hopefully.
@type text: L{unicode}
@return: The domain name's IDNA representation, encoded as bytes.
@rtype: L{bytes}
"""
try:
import idna
except ImportError:
return text.encode("idna")
else:
return idna.encode(text)
def _idnaText(octets: bytes) -> str:
"""
Convert some IDNA-encoded octets into some human-readable text.
Currently only used by the tests.
@param octets: Some bytes representing a hostname.
@type octets: L{bytes}
@return: A human-readable domain name.
@rtype: L{unicode}
"""
try:
import idna
except ImportError:
return octets.decode("idna")
else:
return idna.decode(octets)

View File

@@ -0,0 +1,256 @@
# -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements memory BIO based TLS support. It is the preferred
implementation and will be used whenever pyOpenSSL 0.10 or newer is installed
(whenever L{twisted.protocols.tls} is importable).
@since: 11.1
"""
from zope.interface import directlyProvides
from twisted.internet.abstract import FileDescriptor
from twisted.internet.interfaces import ISSLTransport
from twisted.protocols.tls import TLSMemoryBIOFactory
class _BypassTLS:
"""
L{_BypassTLS} is used as the transport object for the TLS protocol object
used to implement C{startTLS}. Its methods skip any TLS logic which
C{startTLS} enables.
@ivar _base: A transport class L{_BypassTLS} has been mixed in with to which
methods will be forwarded. This class is only responsible for sending
bytes over the connection, not doing TLS.
@ivar _connection: A L{Connection} which TLS has been started on which will
be proxied to by this object. Any method which has its behavior
altered after C{startTLS} will be skipped in favor of the base class's
implementation. This allows the TLS protocol object to have direct
access to the transport, necessary to actually implement TLS.
"""
def __init__(self, base, connection):
self._base = base
self._connection = connection
def __getattr__(self, name):
"""
Forward any extra attribute access to the original transport object.
For example, this exposes C{getHost}, the behavior of which does not
change after TLS is enabled.
"""
return getattr(self._connection, name)
def write(self, data):
"""
Write some bytes directly to the connection.
"""
return self._base.write(self._connection, data)
def writeSequence(self, iovec):
"""
Write a some bytes directly to the connection.
"""
return self._base.writeSequence(self._connection, iovec)
def loseConnection(self, *args, **kwargs):
"""
Close the underlying connection.
"""
return self._base.loseConnection(self._connection, *args, **kwargs)
def registerProducer(self, producer, streaming):
"""
Register a producer with the underlying connection.
"""
return self._base.registerProducer(self._connection, producer, streaming)
def unregisterProducer(self):
"""
Unregister a producer with the underlying connection.
"""
return self._base.unregisterProducer(self._connection)
def startTLS(transport, contextFactory, normal, bypass):
"""
Add a layer of SSL to a transport.
@param transport: The transport which will be modified. This can either by
a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} or a
L{FileHandle<twisted.internet.iocpreactor.abstract.FileHandle>}. The
actual requirements of this instance are that it have:
- a C{_tlsClientDefault} attribute indicating whether the transport is
a client (C{True}) or a server (C{False})
- a settable C{TLS} attribute which can be used to mark the fact
that SSL has been started
- settable C{getHandle} and C{getPeerCertificate} attributes so
these L{ISSLTransport} methods can be added to it
- a C{protocol} attribute referring to the L{IProtocol} currently
connected to the transport, which can also be set to a new
L{IProtocol} for the transport to deliver data to
@param contextFactory: An SSL context factory defining SSL parameters for
the new SSL layer.
@type contextFactory: L{twisted.internet.interfaces.IOpenSSLContextFactory}
@param normal: A flag indicating whether SSL will go in the same direction
as the underlying transport goes. That is, if the SSL client will be
the underlying client and the SSL server will be the underlying server.
C{True} means it is the same, C{False} means they are switched.
@type normal: L{bool}
@param bypass: A transport base class to call methods on to bypass the new
SSL layer (so that the SSL layer itself can send its bytes).
@type bypass: L{type}
"""
# Figure out which direction the SSL goes in. If normal is True,
# we'll go in the direction indicated by the subclass. Otherwise,
# we'll go the other way (client = not normal ^ _tlsClientDefault,
# in other words).
if normal:
client = transport._tlsClientDefault
else:
client = not transport._tlsClientDefault
# If we have a producer, unregister it, and then re-register it below once
# we've switched to TLS mode, so it gets hooked up correctly:
producer, streaming = None, None
if transport.producer is not None:
producer, streaming = transport.producer, transport.streamingProducer
transport.unregisterProducer()
tlsFactory = TLSMemoryBIOFactory(contextFactory, client, None)
tlsProtocol = tlsFactory.protocol(tlsFactory, transport.protocol, False)
# Hook up the new TLS protocol to the transport:
transport.protocol = tlsProtocol
transport.getHandle = tlsProtocol.getHandle
transport.getPeerCertificate = tlsProtocol.getPeerCertificate
# Mark the transport as secure.
directlyProvides(transport, ISSLTransport)
# Remember we did this so that write and writeSequence can send the
# data to the right place.
transport.TLS = True
# Hook it up
transport.protocol.makeConnection(_BypassTLS(bypass, transport))
# Restore producer if necessary:
if producer:
transport.registerProducer(producer, streaming)
class ConnectionMixin:
"""
A mixin for L{twisted.internet.abstract.FileDescriptor} which adds an
L{ITLSTransport} implementation.
@ivar TLS: A flag indicating whether TLS is currently in use on this
transport. This is not a good way for applications to check for TLS,
instead use L{twisted.internet.interfaces.ISSLTransport}.
"""
TLS = False
def startTLS(self, ctx, normal=True):
"""
@see: L{ITLSTransport.startTLS}
"""
startTLS(self, ctx, normal, FileDescriptor)
def write(self, bytes):
"""
Write some bytes to this connection, passing them through a TLS layer if
necessary, or discarding them if the connection has already been lost.
"""
if self.TLS:
if self.connected:
self.protocol.write(bytes)
else:
FileDescriptor.write(self, bytes)
def writeSequence(self, iovec):
"""
Write some bytes to this connection, scatter/gather-style, passing them
through a TLS layer if necessary, or discarding them if the connection
has already been lost.
"""
if self.TLS:
if self.connected:
self.protocol.writeSequence(iovec)
else:
FileDescriptor.writeSequence(self, iovec)
def loseConnection(self):
"""
Close this connection after writing all pending data.
If TLS has been negotiated, perform a TLS shutdown.
"""
if self.TLS:
if self.connected and not self.disconnecting:
self.protocol.loseConnection()
else:
FileDescriptor.loseConnection(self)
def registerProducer(self, producer, streaming):
"""
Register a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
# Registering a producer before we're connected shouldn't be a
# problem. If we end up with a write(), that's already handled in
# the write() code above, and there are no other potential
# side-effects.
self.protocol.registerProducer(producer, streaming)
else:
FileDescriptor.registerProducer(self, producer, streaming)
def unregisterProducer(self):
"""
Unregister a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
self.protocol.unregisterProducer()
else:
FileDescriptor.unregisterProducer(self)
class ClientMixin:
"""
A mixin for L{twisted.internet.tcp.Client} which just marks it as a client
for the purposes of the default TLS handshake.
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
connection, and by default when TLS is negotiated this class will act as
a TLS client.
"""
_tlsClientDefault = True
class ServerMixin:
"""
A mixin for L{twisted.internet.tcp.Server} which just marks it as a server
for the purposes of the default TLS handshake.
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
connection, and by default when TLS is negotiated this class will act as
a TLS server.
"""
_tlsClientDefault = False

View File

@@ -0,0 +1,291 @@
# -*- test-case-name: twisted.internet.test.test_pollingfile -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implements a simple polling interface for file descriptors that don't work with
select() - this is pretty much only useful on Windows.
"""
from zope.interface import implementer
from twisted.internet.interfaces import IConsumer, IPushProducer
MIN_TIMEOUT = 0.000000001
MAX_TIMEOUT = 0.1
class _PollableResource:
active = True
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class _PollingTimer:
# Everything is private here because it is really an implementation detail.
def __init__(self, reactor):
self.reactor = reactor
self._resources = []
self._pollTimer = None
self._currentTimeout = MAX_TIMEOUT
self._paused = False
def _addPollableResource(self, res):
self._resources.append(res)
self._checkPollingState()
def _checkPollingState(self):
for resource in self._resources:
if resource.active:
self._startPolling()
break
else:
self._stopPolling()
def _startPolling(self):
if self._pollTimer is None:
self._pollTimer = self._reschedule()
def _stopPolling(self):
if self._pollTimer is not None:
self._pollTimer.cancel()
self._pollTimer = None
def _pause(self):
self._paused = True
def _unpause(self):
self._paused = False
self._checkPollingState()
def _reschedule(self):
if not self._paused:
return self.reactor.callLater(self._currentTimeout, self._pollEvent)
def _pollEvent(self):
workUnits = 0.0
anyActive = []
for resource in self._resources:
if resource.active:
workUnits += resource.checkWork()
# Check AFTER work has been done
if resource.active:
anyActive.append(resource)
newTimeout = self._currentTimeout
if workUnits:
newTimeout = self._currentTimeout / (workUnits + 1.0)
if newTimeout < MIN_TIMEOUT:
newTimeout = MIN_TIMEOUT
else:
newTimeout = self._currentTimeout * 2.0
if newTimeout > MAX_TIMEOUT:
newTimeout = MAX_TIMEOUT
self._currentTimeout = newTimeout
if anyActive:
self._pollTimer = self._reschedule()
# If we ever (let's hope not) need the above functionality on UNIX, this could
# be factored into a different module.
import pywintypes
import win32api
import win32file
import win32pipe
@implementer(IPushProducer)
class _PollableReadPipe(_PollableResource):
def __init__(self, pipe, receivedCallback, lostCallback):
# security attributes for pipes
self.pipe = pipe
self.receivedCallback = receivedCallback
self.lostCallback = lostCallback
def checkWork(self):
finished = 0
fullDataRead = []
while 1:
try:
buffer, bytesToRead, result = win32pipe.PeekNamedPipe(self.pipe, 1)
# finished = (result == -1)
if not bytesToRead:
break
hr, data = win32file.ReadFile(self.pipe, bytesToRead, None)
fullDataRead.append(data)
except win32api.error:
finished = 1
break
dataBuf = b"".join(fullDataRead)
if dataBuf:
self.receivedCallback(dataBuf)
if finished:
self.cleanup()
return len(dataBuf)
def cleanup(self):
self.deactivate()
self.lostCallback()
def close(self):
try:
win32api.CloseHandle(self.pipe)
except pywintypes.error:
# You can't close std handles...?
pass
def stopProducing(self):
self.close()
def pauseProducing(self):
self.deactivate()
def resumeProducing(self):
self.activate()
FULL_BUFFER_SIZE = 64 * 1024
@implementer(IConsumer)
class _PollableWritePipe(_PollableResource):
def __init__(self, writePipe, lostCallback):
self.disconnecting = False
self.producer = None
self.producerPaused = False
self.streamingProducer = 0
self.outQueue = []
self.writePipe = writePipe
self.lostCallback = lostCallback
try:
win32pipe.SetNamedPipeHandleState(
writePipe, win32pipe.PIPE_NOWAIT, None, None
)
except pywintypes.error:
# Maybe it's an invalid handle. Who knows.
pass
def close(self):
self.disconnecting = True
def bufferFull(self):
if self.producer is not None:
self.producerPaused = True
self.producer.pauseProducing()
def bufferEmpty(self):
if self.producer is not None and (
(not self.streamingProducer) or self.producerPaused
):
self.producer.producerPaused = False
self.producer.resumeProducing()
return True
return False
# almost-but-not-quite-exact copy-paste from abstract.FileDescriptor... ugh
def registerProducer(self, producer, streaming):
"""Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). A producer should implement the IProducer
interface.
FileDescriptor provides some infrastructure for producer methods.
"""
if self.producer is not None:
raise RuntimeError(
"Cannot register producer %s, because producer %s was never "
"unregistered." % (producer, self.producer)
)
if not self.active:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""Stop consuming data from a producer, without disconnecting."""
self.producer = None
def writeConnectionLost(self):
self.deactivate()
try:
win32api.CloseHandle(self.writePipe)
except pywintypes.error:
# OMG what
pass
self.lostCallback()
def writeSequence(self, seq):
"""
Append a C{list} or C{tuple} of bytes to the output buffer.
@param seq: C{list} or C{tuple} of C{str} instances to be appended to
the output buffer.
@raise TypeError: If C{seq} contains C{unicode}.
"""
if str in map(type, seq):
raise TypeError("Unicode not allowed in output buffer.")
self.outQueue.extend(seq)
def write(self, data):
"""
Append some bytes to the output buffer.
@param data: C{str} to be appended to the output buffer.
@type data: C{str}.
@raise TypeError: If C{data} is C{unicode} instead of C{str}.
"""
if isinstance(data, str):
raise TypeError("Unicode not allowed in output buffer.")
if self.disconnecting:
return
self.outQueue.append(data)
if sum(map(len, self.outQueue)) > FULL_BUFFER_SIZE:
self.bufferFull()
def checkWork(self):
numBytesWritten = 0
if not self.outQueue:
if self.disconnecting:
self.writeConnectionLost()
return 0
try:
win32file.WriteFile(self.writePipe, b"", None)
except pywintypes.error:
self.writeConnectionLost()
return numBytesWritten
while self.outQueue:
data = self.outQueue.pop(0)
errCode = 0
try:
errCode, nBytesWritten = win32file.WriteFile(self.writePipe, data, None)
except win32api.error:
self.writeConnectionLost()
break
else:
# assert not errCode, "wtf an error code???"
numBytesWritten += nBytesWritten
if len(data) > nBytesWritten:
self.outQueue.insert(0, data[nBytesWritten:])
break
else:
resumed = self.bufferEmpty()
if not resumed and self.disconnecting:
self.writeConnectionLost()
return numBytesWritten

View File

@@ -0,0 +1,80 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial Port Protocol
"""
# dependent on pyserial ( http://pyserial.sf.net/ )
# only tested w/ 1.18 (5 Dec 2002)
from serial import EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from twisted.internet import abstract, fdesc
from twisted.internet.serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""
A select()able serial device, acting as a transport.
"""
connected = 1
def __init__(
self,
protocol,
deviceNameOrPortNumber,
reactor,
baudrate=9600,
bytesize=EIGHTBITS,
parity=PARITY_NONE,
stopbits=STOPBITS_ONE,
timeout=0,
xonxoff=0,
rtscts=0,
):
abstract.FileDescriptor.__init__(self, reactor)
self._serial = self._serialFactory(
deviceNameOrPortNumber,
baudrate=baudrate,
bytesize=bytesize,
parity=parity,
stopbits=stopbits,
timeout=timeout,
xonxoff=xonxoff,
rtscts=rtscts,
)
self.reactor = reactor
self.flushInput()
self.flushOutput()
self.protocol = protocol
self.protocol.makeConnection(self)
self.startReading()
def fileno(self):
return self._serial.fd
def writeSomeData(self, data):
"""
Write some data to the serial device.
"""
return fdesc.writeToFD(self.fileno(), data)
def doRead(self):
"""
Some data's readable from serial device.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)

View File

@@ -0,0 +1,182 @@
# -*- test-case-name: twisted.test.test_stdio -*-
"""Standard input/out/err support.
Future Plans::
support for stderr, perhaps
Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
protocols to transport.
Maintainer: James Y Knight
"""
from __future__ import annotations
from zope.interface import implementer
from twisted.internet import interfaces, process
from twisted.internet.interfaces import IProtocol, IReactorFDSet
from twisted.logger import Logger
from twisted.python.failure import Failure
_log = Logger()
@implementer(interfaces.IAddress)
class PipeAddress:
pass
@implementer(
interfaces.ITransport,
interfaces.IProducer,
interfaces.IConsumer,
interfaces.IHalfCloseableDescriptor,
)
class StandardIO:
_reader = None
_writer = None
disconnected = False
disconnecting = False
def __init__(
self,
proto: IProtocol,
stdin: int = 0,
stdout: int = 1,
reactor: IReactorFDSet | None = None,
):
if reactor is None:
from twisted.internet import reactor # type:ignore[assignment]
self.protocol: IProtocol = proto
self._writer = process.ProcessWriter(reactor, self, "write", stdout)
self._reader = process.ProcessReader(reactor, self, "read", stdin)
self._reader.startReading()
self.protocol.makeConnection(self)
# ITransport
# XXX Actually, see #3597.
def loseWriteConnection(self):
if self._writer is not None:
self._writer.loseConnection()
def write(self, data):
if self._writer is not None:
self._writer.write(data)
def writeSequence(self, data):
if self._writer is not None:
self._writer.writeSequence(data)
def loseConnection(self):
self.disconnecting = True
if self._writer is not None:
self._writer.loseConnection()
if self._reader is not None:
# Don't loseConnection, because we don't want to SIGPIPE it.
self._reader.stopReading()
def getPeer(self):
return PipeAddress()
def getHost(self):
return PipeAddress()
# Callbacks from process.ProcessReader/ProcessWriter
def childDataReceived(self, fd: str, data: bytes) -> None:
self.protocol.dataReceived(data)
def childConnectionLost(self, fd: str, reason: Failure) -> None:
if self.disconnected:
return
if fd == "read":
self._readConnectionLost(reason)
else:
self._writeConnectionLost(reason)
def connectionLost(self, reason):
self.disconnected = True
# Make sure to cleanup the other half
_reader = self._reader
_writer = self._writer
protocol = self.protocol
self._reader = self._writer = None
self.protocol = None # type:ignore[assignment]
if _writer is not None and not _writer.disconnected:
_writer.connectionLost(reason)
if _reader is not None and not _reader.disconnected:
_reader.connectionLost(reason)
with _log.failuresHandled("while calling stdio connectionLost:"):
protocol.connectionLost(reason)
def _writeConnectionLost(self, reason: Failure) -> None:
self._writer = None
if self.disconnecting:
self.connectionLost(reason)
return
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
with _log.failuresHandled(
"while calling stdio writeConnectionLost:"
) as wcl:
p.writeConnectionLost()
if wcl.failed:
self.connectionLost(wcl.failure)
def _readConnectionLost(self, reason: Failure) -> None:
self._reader = None
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
with _log.failuresHandled("while calling stdio readConnectionLost:") as rcl:
p.readConnectionLost()
if rcl.failed:
self.connectionLost(rcl.failure)
else:
self.connectionLost(reason)
# IConsumer
def registerProducer(self, producer, streaming):
if self._writer is None:
producer.stopProducing()
else:
self._writer.registerProducer(producer, streaming)
def unregisterProducer(self):
if self._writer is not None:
self._writer.unregisterProducer()
# IProducer
def stopProducing(self):
self.loseConnection()
def pauseProducing(self):
if self._reader is not None:
self._reader.pauseProducing()
def resumeProducing(self):
if self._reader is not None:
self._reader.resumeProducing()
def stopReading(self):
"""Compatibility only, don't use. Call pauseProducing."""
self.pauseProducing()
def startReading(self):
"""Compatibility only, don't use. Call resumeProducing."""
self.resumeProducing()
def readConnectionLost(self, reason):
# L{IHalfCloseableDescriptor.readConnectionLost}
raise NotImplementedError()
def writeConnectionLost(self, reason):
# L{IHalfCloseableDescriptor.writeConnectionLost}
raise NotImplementedError()

View File

@@ -0,0 +1,118 @@
# -*- test-case-name: twisted.protocols.test.test_tls,twisted.web.test.test_http2 -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Helpers for working with producers.
"""
from typing import List
from zope.interface import implementer
from twisted.internet.interfaces import IPushProducer
from twisted.internet.task import cooperate
from twisted.logger import Logger
_log = Logger()
# This module exports nothing public, it's for internal Twisted use only.
__all__: List[str] = []
@implementer(IPushProducer)
class _PullToPush:
"""
An adapter that converts a non-streaming to a streaming producer.
Because of limitations of the producer API, this adapter requires the
cooperation of the consumer. When the consumer's C{registerProducer} is
called with a non-streaming producer, it must wrap it with L{_PullToPush}
and then call C{startStreaming} on the resulting object. When the
consumer's C{unregisterProducer} is called, it must call
C{stopStreaming} on the L{_PullToPush} instance.
If the underlying producer throws an exception from C{resumeProducing},
the producer will be unregistered from the consumer.
@ivar _producer: the underling non-streaming producer.
@ivar _consumer: the consumer with which the underlying producer was
registered.
@ivar _finished: C{bool} indicating whether the producer has finished.
@ivar _coopTask: the result of calling L{cooperate}, the task driving the
streaming producer.
"""
_finished = False
def __init__(self, pullProducer, consumer):
self._producer = pullProducer
self._consumer = consumer
def _pull(self):
"""
A generator that calls C{resumeProducing} on the underlying producer
forever.
If C{resumeProducing} throws an exception, the producer is
unregistered, which should result in streaming stopping.
"""
while True:
with _log.failuresHandled(
"while calling resumeProducing on {producer}", producer=self._producer
) as op:
self._producer.resumeProducing()
if op.failed:
with _log.failuresHandled(
"while calling unregisterProducer on {consumer}",
consumer=self._consumer,
) as handlingop:
self._consumer.unregisterProducer()
if handlingop.failed:
# The consumer should now call stopStreaming() on us,
# thus stopping the streaming.
self._finished = True
return
yield None
def startStreaming(self):
"""
This should be called by the consumer when the producer is registered.
Start streaming data to the consumer.
"""
self._coopTask = cooperate(self._pull())
def stopStreaming(self):
"""
This should be called by the consumer when the producer is
unregistered.
Stop streaming data to the consumer.
"""
if self._finished:
return
self._finished = True
self._coopTask.stop()
def pauseProducing(self):
"""
@see: C{IPushProducer.pauseProducing}
"""
self._coopTask.pause()
def resumeProducing(self):
"""
@see: C{IPushProducer.resumeProducing}
"""
self._coopTask.resume()
def stopProducing(self):
"""
@see: C{IPushProducer.stopProducing}
"""
self.stopStreaming()
self._producer.stopProducing()

View File

@@ -0,0 +1,342 @@
# -*- test-case-name: twisted.internet.test.test_resolver -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
IPv6-aware hostname resolution.
@see: L{IHostnameResolver}
"""
from socket import (
AF_INET,
AF_INET6,
AF_UNSPEC,
SOCK_DGRAM,
SOCK_STREAM,
AddressFamily,
SocketKind,
gaierror,
getaddrinfo,
)
from typing import (
TYPE_CHECKING,
Callable,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from zope.interface import implementer
from twisted.internet._idna import _idnaBytes
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IAddress,
IHostnameResolver,
IHostResolution,
IReactorThreads,
IResolutionReceiver,
IResolverSimple,
)
from twisted.internet.threads import deferToThreadPool
from twisted.logger import Logger
from twisted.python.compat import nativeString
if TYPE_CHECKING:
from twisted.python.threadpool import ThreadPool
@implementer(IHostResolution)
class HostResolution:
"""
The in-progress resolution of a given hostname.
"""
def __init__(self, name: str):
"""
Create a L{HostResolution} with the given name.
"""
self.name = name
def cancel(self) -> NoReturn:
# IHostResolution.cancel
raise NotImplementedError()
_any = frozenset([IPv4Address, IPv6Address])
_typesToAF = {
frozenset([IPv4Address]): AF_INET,
frozenset([IPv6Address]): AF_INET6,
_any: AF_UNSPEC,
}
_afToType = {
AF_INET: IPv4Address,
AF_INET6: IPv6Address,
}
_transportToSocket = {
"TCP": SOCK_STREAM,
"UDP": SOCK_DGRAM,
}
_socktypeToType = {
SOCK_STREAM: "TCP",
SOCK_DGRAM: "UDP",
}
_GETADDRINFO_RESULT = List[
Tuple[
AddressFamily,
SocketKind,
int,
str,
Union[Tuple[str, int], Tuple[str, int, int, int]],
]
]
@implementer(IHostnameResolver)
class GAIResolver:
"""
L{IHostnameResolver} implementation that resolves hostnames by calling
L{getaddrinfo} in a thread.
"""
def __init__(
self,
reactor: IReactorThreads,
getThreadPool: Optional[Callable[[], "ThreadPool"]] = None,
getaddrinfo: Callable[[str, int, int, int], _GETADDRINFO_RESULT] = getaddrinfo,
):
"""
Create a L{GAIResolver}.
@param reactor: the reactor to schedule result-delivery on
@type reactor: L{IReactorThreads}
@param getThreadPool: a function to retrieve the thread pool to use for
scheduling name resolutions. If not supplied, the use the given
C{reactor}'s thread pool.
@type getThreadPool: 0-argument callable returning a
L{twisted.python.threadpool.ThreadPool}
@param getaddrinfo: a reference to the L{getaddrinfo} to use - mainly
parameterized for testing.
@type getaddrinfo: callable with the same signature as L{getaddrinfo}
"""
self._reactor = reactor
self._getThreadPool = (
reactor.getThreadPool if getThreadPool is None else getThreadPool
)
self._getaddrinfo = getaddrinfo
def resolveHostName(
self,
resolutionReceiver: IResolutionReceiver,
hostName: str,
portNumber: int = 0,
addressTypes: Optional[Sequence[Type[IAddress]]] = None,
transportSemantics: str = "TCP",
) -> IHostResolution:
"""
See L{IHostnameResolver.resolveHostName}
@param resolutionReceiver: see interface
@param hostName: see interface
@param portNumber: see interface
@param addressTypes: see interface
@param transportSemantics: see interface
@return: see interface
"""
pool = self._getThreadPool()
addressFamily = _typesToAF[
_any if addressTypes is None else frozenset(addressTypes)
]
socketType = _transportToSocket[transportSemantics]
def get() -> _GETADDRINFO_RESULT:
try:
return self._getaddrinfo(
hostName, portNumber, addressFamily, socketType
)
except gaierror:
return []
d = deferToThreadPool(self._reactor, pool, get)
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
@d.addCallback
def deliverResults(result: _GETADDRINFO_RESULT) -> None:
for family, socktype, proto, cannoname, sockaddr in result:
addrType = _afToType[family]
resolutionReceiver.addressResolved(
addrType(_socktypeToType.get(socktype, "TCP"), *sockaddr)
)
resolutionReceiver.resolutionComplete()
return resolution
@implementer(IHostnameResolver)
class SimpleResolverComplexifier:
"""
A converter from L{IResolverSimple} to L{IHostnameResolver}.
"""
_log = Logger()
def __init__(self, simpleResolver: IResolverSimple):
"""
Construct a L{SimpleResolverComplexifier} with an L{IResolverSimple}.
"""
self._simpleResolver = simpleResolver
def resolveHostName(
self,
resolutionReceiver: IResolutionReceiver,
hostName: str,
portNumber: int = 0,
addressTypes: Optional[Sequence[Type[IAddress]]] = None,
transportSemantics: str = "TCP",
) -> IHostResolution:
"""
See L{IHostnameResolver.resolveHostName}
@param resolutionReceiver: see interface
@param hostName: see interface
@param portNumber: see interface
@param addressTypes: see interface
@param transportSemantics: see interface
@return: see interface
"""
# If it's str, we need to make sure that it's just ASCII.
try:
hostName_bytes = hostName.encode("ascii")
except UnicodeEncodeError:
# If it's not just ASCII, IDNA it. We don't want to give a Unicode
# string with non-ASCII in it to Python 3, as if anyone passes that
# to a Python 3 stdlib function, it will probably use the wrong
# IDNA version and break absolutely everything
hostName_bytes = _idnaBytes(hostName)
# Make sure it's passed down as a native str, to maintain the interface
hostName = nativeString(hostName_bytes)
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
(
self._simpleResolver.getHostByName(hostName)
.addCallback(
lambda address: resolutionReceiver.addressResolved(
IPv4Address("TCP", address, portNumber)
)
)
.addErrback(
lambda error: None
if error.check(DNSLookupError)
else self._log.failure(
"while looking up {name} with {resolver}",
error,
name=hostName,
resolver=self._simpleResolver,
)
)
.addCallback(lambda nothing: resolutionReceiver.resolutionComplete())
)
return resolution
@implementer(IResolutionReceiver)
class FirstOneWins:
"""
An L{IResolutionReceiver} which fires a L{Deferred} with its first result.
"""
def __init__(self, deferred: "Deferred[str]"):
"""
@param deferred: The L{Deferred} to fire when the first resolution
result arrives.
"""
self._deferred = deferred
self._resolved = False
def resolutionBegan(self, resolution: IHostResolution) -> None:
"""
See L{IResolutionReceiver.resolutionBegan}
@param resolution: See L{IResolutionReceiver.resolutionBegan}
"""
self._resolution = resolution
def addressResolved(self, address: IAddress) -> None:
"""
See L{IResolutionReceiver.addressResolved}
@param address: See L{IResolutionReceiver.addressResolved}
"""
if self._resolved:
return
self._resolved = True
# This is used by ComplexResolverSimplifier which specifies only results
# of IPv4Address.
assert isinstance(address, IPv4Address)
self._deferred.callback(address.host)
def resolutionComplete(self) -> None:
"""
See L{IResolutionReceiver.resolutionComplete}
"""
if self._resolved:
return
self._deferred.errback(DNSLookupError(self._resolution.name))
@implementer(IResolverSimple)
class ComplexResolverSimplifier:
"""
A converter from L{IHostnameResolver} to L{IResolverSimple}
"""
def __init__(self, nameResolver: IHostnameResolver):
"""
Create a L{ComplexResolverSimplifier} with an L{IHostnameResolver}.
@param nameResolver: The L{IHostnameResolver} to use.
"""
self._nameResolver = nameResolver
def getHostByName(self, name: str, timeouts: Sequence[int] = ()) -> "Deferred[str]":
"""
See L{IResolverSimple.getHostByName}
@param name: see L{IResolverSimple.getHostByName}
@param timeouts: see L{IResolverSimple.getHostByName}
@return: see L{IResolverSimple.getHostByName}
"""
result: "Deferred[str]" = Deferred()
self._nameResolver.resolveHostName(FirstOneWins(result), name, 0, [IPv4Address])
return result

View File

@@ -0,0 +1,445 @@
# -*- test-case-name: twisted.internet.test.test_sigchld -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module is used to integrate child process termination into a
reactor event loop. This is a challenging feature to provide because
most platforms indicate process termination via SIGCHLD and do not
provide a way to wait for that signal and arbitrary I/O events at the
same time. The naive implementation involves installing a Python
SIGCHLD handler; unfortunately this leads to other syscalls being
interrupted (whenever SIGCHLD is received) and failing with EINTR
(which almost no one is prepared to handle). This interruption can be
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
however, if the SIGCHLD is delivered by the platform to a non-main
thread (not a common occurrence, but difficult to prove impossible),
the main thread (waiting on select() or another event notification
API) may not wake up leading to an arbitrary delay before the child
termination is noticed.
The basic solution to all these issues involves enabling SA_RESTART (ie,
disabling system call interruption) and registering a C signal handler which
writes a byte to a pipe. The other end of the pipe is registered with the
event loop, allowing it to wake up shortly after SIGCHLD is received. See
L{_SIGCHLDWaker} for the implementation of the event loop side of this
solution. The use of a pipe this way is known as the U{self-pipe
trick<http://cr.yp.to/docs/selfpipe.html>}.
From Python version 2.6, C{signal.siginterrupt} and C{signal.set_wakeup_fd}
provide the necessary C signal handler which writes to the pipe to be
registered with C{SA_RESTART}.
"""
from __future__ import annotations
import contextlib
import errno
import os
import signal
import socket
from types import FrameType
from typing import Callable, Optional, Sequence
from zope.interface import Attribute, Interface, implementer
from attrs import define, frozen
from typing_extensions import Protocol, TypeAlias
from twisted.internet.interfaces import IReadDescriptor
from twisted.python import failure, log, util
from twisted.python.runtime import platformType
if platformType == "posix":
from . import fdesc, process
SignalHandler: TypeAlias = Callable[[int, Optional[FrameType]], None]
def installHandler(fd: int) -> int:
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by installing a SIGCHLD handler that does nothing,
setting the I{SIGCHLD} handler as not allowed to interrupt system calls,
and using L{signal.set_wakeup_fd} to do the actual writing.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@return: The file descriptor previously configured for this use.
"""
if fd == -1:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
def noopSignalHandler(*args):
pass
signal.signal(signal.SIGCHLD, noopSignalHandler)
signal.siginterrupt(signal.SIGCHLD, False)
return signal.set_wakeup_fd(fd)
def isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
class SignalHandling(Protocol):
"""
The L{SignalHandling} protocol enables customizable signal-handling
behaviors for reactors.
A value that conforms to L{SignalHandling} has install and uninstall hooks
that are called by a reactor at the correct times to have the (typically)
process-global effects necessary for dealing with signals.
"""
def install(self) -> None:
"""
Install the signal handlers.
"""
def uninstall(self) -> None:
"""
Restore signal handlers to their original state.
"""
@frozen
class _WithoutSignalHandling:
"""
A L{SignalHandling} implementation that does no signal handling.
This is the implementation of C{installSignalHandlers=False}.
"""
def install(self) -> None:
"""
Do not install any signal handlers.
"""
def uninstall(self) -> None:
"""
Do nothing because L{install} installed nothing.
"""
@frozen
class _WithSignalHandling:
"""
A reactor core helper that can manage signals: it installs signal handlers
at start time.
"""
_sigInt: SignalHandler
_sigBreak: SignalHandler
_sigTerm: SignalHandler
def install(self) -> None:
"""
Install the signal handlers for the Twisted event loop.
"""
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
# only handle if there isn't already a handler, e.g. for Pdb.
signal.signal(signal.SIGINT, self._sigInt)
signal.signal(signal.SIGTERM, self._sigTerm)
# Catch Ctrl-Break in windows
SIGBREAK = getattr(signal, "SIGBREAK", None)
if SIGBREAK is not None:
signal.signal(SIGBREAK, self._sigBreak)
def uninstall(self) -> None:
"""
At the moment, do nothing (for historical reasons).
"""
# This should really do something.
# https://github.com/twisted/twisted/issues/11761
@define
class _MultiSignalHandling:
"""
An implementation of L{SignalHandling} which propagates protocol
method calls to a number of other implementations.
This supports composition of multiple signal handling implementations into
a single object so the reactor doesn't have to be concerned with how those
implementations are factored.
@ivar _signalHandlings: The other C{SignalHandling} implementations to
which to propagate calls.
@ivar _installed: If L{install} has been called but L{uninstall} has not.
This is used to avoid double cleanup which otherwise results (at least
during test suite runs) because twisted.internet.reactormixins doesn't
keep track of whether a reactor has run or not but always invokes its
cleanup logic.
"""
_signalHandlings: Sequence[SignalHandling]
_installed: bool = False
def install(self) -> None:
for d in self._signalHandlings:
d.install()
self._installed = True
def uninstall(self) -> None:
if self._installed:
for d in self._signalHandlings:
d.uninstall()
self._installed = False
@define
class _ChildSignalHandling:
"""
Signal handling behavior which supports I{SIGCHLD} for notification about
changes to child process state.
@ivar _childWaker: L{None} or a reference to the L{_SIGCHLDWaker} which is
used to properly notice child process termination. This is L{None}
when this handling behavior is not installed and non-C{None}
otherwise. This is mostly an unfortunate implementation detail due to
L{_SIGCHLDWaker} allocating file descriptors as a side-effect of its
initializer.
"""
_addInternalReader: Callable[[IReadDescriptor], object]
_removeInternalReader: Callable[[IReadDescriptor], object]
_childWaker: Optional[_SIGCHLDWaker] = None
def install(self) -> None:
"""
Extend the basic signal handling logic to also support handling
SIGCHLD to know when to try to reap child processes.
"""
# This conditional should probably not be necessary.
# https://github.com/twisted/twisted/issues/11763
if self._childWaker is None:
self._childWaker = _SIGCHLDWaker()
self._addInternalReader(self._childWaker)
self._childWaker.install()
# Also reap all processes right now, in case we missed any
# signals before we installed the SIGCHLD waker/handler.
# This should only happen if someone used spawnProcess
# before calling reactor.run (and the process also exited
# already).
process.reapAllProcesses()
def uninstall(self) -> None:
"""
If a child waker was created and installed, uninstall it now.
Since this disables reactor functionality and is only called when the
reactor is stopping, it doesn't provide any directly useful
functionality, but the cleanup of reactor-related process-global state
that it does helps in unit tests involving multiple reactors and is
generally just a nice thing.
"""
assert self._childWaker is not None
# XXX This would probably be an alright place to put all of the
# cleanup code for all internal readers (here and in the base class,
# anyway). See #3063 for that cleanup task.
self._removeInternalReader(self._childWaker)
self._childWaker.uninstall()
self._childWaker.connectionLost(failure.Failure(Exception("uninstalled")))
# We just spoiled the current _childWaker so throw it away. We can
# make a new one later if need be.
self._childWaker = None
class _IWaker(Interface):
"""
Interface to wake up the event loop based on the self-pipe trick.
The U{I{self-pipe trick}<http://cr.yp.to/docs/selfpipe.html>}, used to wake
up the main loop from another thread or a signal handler.
This is why we have wakeUp together with doRead
This is used by threads or signals to wake up the event loop.
"""
disconnected = Attribute("")
def wakeUp() -> None:
"""
Called when the event should be wake up.
"""
def doRead() -> None:
"""
Read some data from my connection and discard it.
"""
def connectionLost(reason: failure.Failure) -> None:
"""
Called when connection was closed and the pipes.
"""
@implementer(_IWaker)
class _SocketWaker(log.Logger):
"""
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, implemented
using a pair of sockets rather than pipes (due to the lack of support in
select() on Windows for pipes), used to wake up the main loop from
another thread.
"""
disconnected = 0
def __init__(self) -> None:
"""Initialize."""
# Following select_trigger (from asyncore)'s example;
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as server:
server.bind(("127.0.0.1", 0))
server.listen(1)
client.connect(server.getsockname())
reader, clientaddr = server.accept()
client.setblocking(False)
reader.setblocking(False)
self.r = reader
self.w = client
self.fileno = self.r.fileno
def wakeUp(self):
"""Send a byte to my connection."""
try:
util.untilConcludes(self.w.send, b"x")
except OSError as e:
if e.args[0] != errno.WSAEWOULDBLOCK:
raise
def doRead(self):
"""
Read some data from my connection.
"""
try:
self.r.recv(8192)
except OSError:
pass
def connectionLost(self, reason):
self.r.close()
self.w.close()
@implementer(IReadDescriptor)
class _FDWaker(log.Logger):
"""
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, used to wake
up the main loop from another thread or a signal handler.
L{_FDWaker} is a base class for waker implementations based on
writing to a pipe being monitored by the reactor.
@ivar o: The file descriptor for the end of the pipe which can be
written to wake up a reactor monitoring this waker.
@ivar i: The file descriptor which should be monitored in order to
be awoken by this waker.
"""
disconnected = 0
i: int
o: int
def __init__(self) -> None:
"""Initialize."""
self.i, self.o = os.pipe()
fdesc.setNonBlocking(self.i)
fdesc._setCloseOnExec(self.i)
fdesc.setNonBlocking(self.o)
fdesc._setCloseOnExec(self.o)
self.fileno = lambda: self.i
def doRead(self) -> None:
"""
Read some bytes from the pipe and discard them.
"""
fdesc.readFromFD(self.fileno(), lambda data: None)
def connectionLost(self, reason):
"""Close both ends of my pipe."""
if not hasattr(self, "o"):
return
for fd in self.i, self.o:
try:
os.close(fd)
except OSError:
pass
del self.i, self.o
@implementer(_IWaker)
class _UnixWaker(_FDWaker):
"""
This class provides a simple interface to wake up the event loop.
This is used by threads or signals to wake up the event loop.
"""
def wakeUp(self):
"""Write one byte to the pipe, and flush it."""
# We don't use fdesc.writeToFD since we need to distinguish
# between EINTR (try again) and EAGAIN (do nothing).
if self.o is not None:
try:
util.untilConcludes(os.write, self.o, b"x")
except OSError as e:
# XXX There is no unit test for raising the exception
# for other errnos. See #4285.
if e.errno != errno.EAGAIN:
raise
if platformType == "posix":
_Waker = _UnixWaker
else:
# Primarily Windows and Jython.
_Waker = _SocketWaker # type: ignore[misc,assignment]
class _SIGCHLDWaker(_FDWaker):
"""
L{_SIGCHLDWaker} can wake up a reactor whenever C{SIGCHLD} is received.
"""
def install(self) -> None:
"""
Install the handler necessary to make this waker active.
"""
installHandler(self.o)
def uninstall(self) -> None:
"""
Remove the handler which makes this waker active.
"""
installHandler(-1)
def doRead(self) -> None:
"""
Having woken up the reactor in response to receipt of
C{SIGCHLD}, reap the process which exited.
This is called whenever the reactor notices the waker pipe is
writeable, which happens soon after any call to the C{wakeUp}
method.
"""
super().doRead()
process.reapAllProcesses()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,342 @@
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Threaded select reactor
The threadedselectreactor is a specialized reactor for integrating with
arbitrary foreign event loop, such as those you find in GUI toolkits.
There are three things you'll need to do to use this reactor.
Install the reactor at the beginning of your program, before importing the rest
of Twisted::
| from twisted.internet import _threadedselect
| _threadedselect.install()
Interleave this reactor with your foreign event loop, at some point after your
event loop is initialized::
| from twisted.internet import reactor
| reactor.interleave(foreignEventLoopWakerFunction)
| self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop)
Instead of shutting down the foreign event loop directly, shut down the
reactor::
| from twisted.internet import reactor
| reactor.stop()
In order for Twisted to do its work in the main thread (the thread that
interleave is called from), a waker function is necessary. The waker function
will be called from a "background" thread with one argument: func. The waker
function's purpose is to call func() from the main thread. Many GUI toolkits
ship with appropriate waker functions. One example of this is wxPython's
wx.callAfter (may be wxCallAfter in older versions of wxPython). These would
be used in place of "foreignEventLoopWakerFunction" in the above example.
The other integration point at which the foreign event loop and this reactor
must integrate is shutdown. In order to ensure clean shutdown of Twisted, you
must allow for Twisted to come to a complete stop before quitting the
application. Typically, you will do this by setting up an after shutdown
trigger to stop your foreign event loop, and call reactor.stop() where you
would normally have initiated the shutdown procedure for the foreign event
loop. Shutdown functions that could be used in place of "foreignEventloopStop"
would be the ExitMainLoop method of the wxApp instance with wxPython.
"""
from __future__ import annotations
from errno import EBADF, EINTR
from queue import Empty, Queue
from threading import Thread
from typing import Any, Callable
from zope.interface import implementer
from twisted._threads import ThreadWorker
from twisted.internet import posixbase
from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor, IWriteDescriptor
from twisted.internet.selectreactor import _preenDescriptors, _select
from twisted.logger import Logger
from twisted.python.log import callWithLogger as _callWithLogger
_log = Logger()
def raiseException(e):
raise e
def _threadsafeSelect(
timeout: float | None,
readmap: dict[int, IReadDescriptor],
writemap: dict[int, IWriteDescriptor],
handleResult: Callable[
[
list[int],
list[int],
dict[int, IReadDescriptor],
dict[int, IWriteDescriptor],
bool,
],
None,
],
) -> None:
"""
Invoke C{select}. This will be called in a non-main thread, so it is very
careful to work only on integers and avoid calling any application code.
"""
preen = False
r = []
w = []
while 1:
readints = readmap.keys()
writeints = writemap.keys()
try:
result = _select(readints, writeints, [], timeout)
except ValueError:
# Possible problems with file descriptors that were passed:
# ValueError may indicate that a file descriptor has gone negative.
preen = True
break
except OSError as se:
# The select() system call encountered an error.
if se.args[0] == EINTR:
# EINTR is hard to replicate in tests using an actual select(),
# and I don't want to dedicate effort to testing this function
# when it needs to be refactored with selectreactor.
return # pragma: no cover
elif se.args[0] == EBADF:
preen = True
break
else:
# OK, I really don't know what's going on. Blow up. Never
# mind with the coverage here, since we are just trying to make
# sure we don't swallow an exception.
raise # pragma: no cover
else:
r, w, ignored = result
break
handleResult(r, w, readmap, writemap, preen)
@implementer(IReactorFDSet)
class ThreadedSelectReactor(posixbase.PosixReactorBase):
"""A threaded select() based reactor - runs on all POSIX platforms and on
Win32.
"""
def __init__(
self, waker: Callable[[Callable[[], None]], None] | None = None
) -> None:
self.reads: set[IReadDescriptor] = set()
self.writes: set[IWriteDescriptor] = set()
posixbase.PosixReactorBase.__init__(self)
self._selectorThread: ThreadWorker | None = None
self.mainWaker = waker
self._iterationQueue: Queue[Callable[[], None]] | None = None
def wakeUp(self):
# we want to wake up from any thread
self.waker.wakeUp()
def callLater(self, *args, **kw):
tple = posixbase.PosixReactorBase.callLater(self, *args, **kw)
self.wakeUp()
return tple
def _doReadOrWrite(self, selectable: object, method: str) -> None:
with _log.failuresHandled(
"while handling selectable {sel}", sel=selectable
) as op:
why = getattr(selectable, method)()
if (fail := op.failure) is not None:
why = fail.value
if why:
self._disconnectSelectable(selectable, why, method == "doRead")
def _selectOnce(self, timeout: float | None, keepGoing: bool) -> None:
reads: dict[int, Any] = {}
writes: dict[int, Any] = {}
for isRead, fdmap, d in [
(True, self.reads, reads),
(False, self.writes, writes),
]:
for each in fdmap: # type:ignore[attr-defined]
d[each.fileno()] = each
mainWaker = self.mainWaker
assert mainWaker is not None, (
"neither .interleave() nor .mainLoop() / .run() called, "
"but we are somehow running the reactor"
)
def callReadsAndWrites(
r: list[int],
w: list[int],
readmap: dict[int, IReadDescriptor],
writemap: dict[int, IWriteDescriptor],
preen: bool,
) -> None:
@mainWaker
def onMainThread() -> None:
if preen:
_preenDescriptors(
self.reads, self.writes, self._disconnectSelectable
)
return
_drdw = self._doReadOrWrite
for readable in r:
rselectable = readmap[readable]
if rselectable in self.reads:
_callWithLogger(rselectable, _drdw, rselectable, "doRead")
for writable in w:
wselectable = writemap[writable]
if wselectable in self.writes:
_callWithLogger(wselectable, _drdw, wselectable, "doWrite")
self.runUntilCurrent()
if self._started and keepGoing:
# see coverage note in .interleave()
self._selectOnce(self.timeout(), True) # pragma: no cover
else:
self._cleanUpThread()
if self._selectorThread is None:
self._selectorThread = ThreadWorker(
lambda target: Thread(target=target).start(), Queue()
)
self._selectorThread.do(
lambda: _threadsafeSelect(timeout, reads, writes, callReadsAndWrites)
)
def _cleanUpThread(self) -> None:
"""
Ensure that the selector thread is stopped.
"""
oldThread, self._selectorThread = self._selectorThread, None
if oldThread is not None:
oldThread.quit()
def interleave(
self,
waker: Callable[[Callable[[], None]], None],
installSignalHandlers: bool = True,
) -> None:
"""
interleave(waker) interleaves this reactor with the current application
by moving the blocking parts of the reactor (select() in this case) to
a separate thread. This is typically useful for integration with GUI
applications which have their own event loop already running.
See the module docstring for more information.
"""
# TODO: This method is excluded from coverage because it only happens
# in the case where we are actually running on a foreign event loop,
# and twisted's test suite isn't set up that way. It would be nice to
# add some dedicated tests for ThreadedSelectReactor that covered this
# case.
self.mainWaker = waker # pragma: no cover
self.startRunning(installSignalHandlers) # pragma: no cover
self._selectOnce(0.0, True) # pragma: no cover
def addReader(self, reader: IReadDescriptor) -> None:
"""Add a FileDescriptor for notification of data available to read."""
self.reads.add(reader)
self.wakeUp()
def addWriter(self, writer: IWriteDescriptor) -> None:
"""Add a FileDescriptor for notification of data available to write."""
self.writes.add(writer)
self.wakeUp()
def removeReader(self, reader: IReadDescriptor) -> None:
"""Remove a Selectable for notification of data available to read."""
if reader in self.reads:
self.reads.remove(reader)
def removeWriter(self, writer: IWriteDescriptor) -> None:
"""Remove a Selectable for notification of data available to write."""
if writer in self.writes:
self.writes.remove(writer)
def removeAll(self) -> list[IReadDescriptor | IWriteDescriptor]:
return self._removeAll(self.reads, self.writes) # type:ignore[no-any-return]
def getReaders(self) -> list[IReadDescriptor]:
return list(self.reads)
def getWriters(self) -> list[IWriteDescriptor]:
return list(self.writes)
def stop(self):
"""
Extend the base stop implementation to also wake up the select thread so
that C{runUntilCurrent} notices the reactor should stop.
"""
posixbase.PosixReactorBase.stop(self)
self.wakeUp()
def crash(self):
posixbase.PosixReactorBase.crash(self)
self.wakeUp()
# The following methods are mostly for test-suite support, to make
# ThreadedSelectReactor behave like another reactor you might call run()
# on.
def _testMainLoopSetup(self) -> None:
"""
Mostly for compliance with L{IReactorCore} and usability with the
tests, set up a fake blocking main-loop; make the "foreign" main loop
we are interfacing with be C{self.mainLoop()}, that is reading from a
basic Queue.
"""
self._iterationQueue = Queue()
self.mainWaker = self._iterationQueue.put
def _uninstallHandler(self) -> None:
"""
Handle uninstallation to ensure that cleanup is properly performed by
ReactorBuilder tests.
"""
super()._uninstallHandler()
self._cleanUpThread()
def iterate(self, timeout: float = 0.0) -> None:
if self._iterationQueue is None and self.mainWaker is None: # pragma: no branch
self._testMainLoopSetup()
self.wakeUp()
super().iterate(timeout)
def doIteration(self, timeout: float | None) -> None:
assert self._iterationQueue is not None
self._selectOnce(timeout, False)
try:
work = self._iterationQueue.get(timeout=timeout)
except Empty:
return
work()
def mainLoop(self) -> None:
"""
This should not normally be run.
"""
self._testMainLoopSetup()
super().mainLoop()
def install():
"""Configure the twisted mainloop to be run using the select() reactor."""
reactor = ThreadedSelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ["install"]

View File

@@ -0,0 +1,155 @@
# -*- test-case-name: twisted.internet.test.test_win32serialport -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial port support for Windows.
Requires PySerial and pywin32.
"""
import win32event
import win32file
# system imports
from serial import EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from serial.serialutil import to_bytes
# twisted imports
from twisted.internet import abstract
# sibling imports
from twisted.internet.serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A serial device, acting as a transport, that uses a win32 event."""
connected = 1
def __init__(
self,
protocol,
deviceNameOrPortNumber,
reactor,
baudrate=9600,
bytesize=EIGHTBITS,
parity=PARITY_NONE,
stopbits=STOPBITS_ONE,
xonxoff=0,
rtscts=0,
):
self._serial = self._serialFactory(
deviceNameOrPortNumber,
baudrate=baudrate,
bytesize=bytesize,
parity=parity,
stopbits=stopbits,
timeout=None,
xonxoff=xonxoff,
rtscts=rtscts,
)
self.flushInput()
self.flushOutput()
self.reactor = reactor
self.protocol = protocol
self.outQueue = []
self.closed = 0
self.closedNotifies = 0
self.writeInProgress = 0
self.protocol = protocol
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.reactor.addEvent(self._overlappedRead.hEvent, self, "serialReadEvent")
self.reactor.addEvent(self._overlappedWrite.hEvent, self, "serialWriteEvent")
self.protocol.makeConnection(self)
self._finishPortSetup()
def _finishPortSetup(self):
"""
Finish setting up the serial port.
This is a separate method to facilitate testing.
"""
flags, comstat = self._clearCommError()
rc, self.read_buf = win32file.ReadFile(
self._serial._port_handle,
win32file.AllocateReadBuffer(1),
self._overlappedRead,
)
def _clearCommError(self):
return win32file.ClearCommError(self._serial._port_handle)
def serialReadEvent(self):
# get that character we set up
n = win32file.GetOverlappedResult(
self._serial._port_handle, self._overlappedRead, 0
)
first = to_bytes(self.read_buf[:n])
# now we should get everything that is already in the buffer
flags, comstat = self._clearCommError()
if comstat.cbInQue:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(
self._serial._port_handle,
win32file.AllocateReadBuffer(comstat.cbInQue),
self._overlappedRead,
)
n = win32file.GetOverlappedResult(
self._serial._port_handle, self._overlappedRead, 1
)
# handle all the received data:
self.protocol.dataReceived(first + to_bytes(buf[:n]))
else:
# handle all the received data:
self.protocol.dataReceived(first)
# set up next one
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, self.read_buf = win32file.ReadFile(
self._serial._port_handle,
win32file.AllocateReadBuffer(1),
self._overlappedRead,
)
def write(self, data):
if data:
if self.writeInProgress:
self.outQueue.append(data)
else:
self.writeInProgress = 1
win32file.WriteFile(
self._serial._port_handle, data, self._overlappedWrite
)
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except IndexError:
self.writeInProgress = 0
return
else:
win32file.WriteFile(
self._serial._port_handle, dataToWrite, self._overlappedWrite
)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
self.reactor.removeEvent(self._overlappedRead.hEvent)
self.reactor.removeEvent(self._overlappedWrite.hEvent)
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)

View File

@@ -0,0 +1,136 @@
# -*- test-case-name: twisted.test.test_stdio -*-
"""
Windows-specific implementation of the L{twisted.internet.stdio} interface.
"""
import msvcrt
import os
from zope.interface import implementer
import win32api
from twisted.internet import _pollingfile, main
from twisted.internet.interfaces import (
IAddress,
IConsumer,
IHalfCloseableProtocol,
IPushProducer,
ITransport,
)
from twisted.logger import Logger
from twisted.python.failure import Failure
_log = Logger()
@implementer(IAddress)
class Win32PipeAddress:
pass
@implementer(ITransport, IConsumer, IPushProducer)
class StandardIO(_pollingfile._PollingTimer):
disconnecting = False
disconnected = False
def __init__(self, proto, reactor=None):
"""
Start talking to standard IO with the given protocol.
Also, put it stdin/stdout/stderr into binary mode.
"""
if reactor is None:
from twisted.internet import reactor
for stdfd in range(0, 1, 2):
msvcrt.setmode(stdfd, os.O_BINARY)
_pollingfile._PollingTimer.__init__(self, reactor)
self.proto = proto
hstdin = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
hstdout = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
self.stdin = _pollingfile._PollableReadPipe(
hstdin, self.dataReceived, self.readConnectionLost
)
self.stdout = _pollingfile._PollableWritePipe(hstdout, self.writeConnectionLost)
self._addPollableResource(self.stdin)
self._addPollableResource(self.stdout)
self.proto.makeConnection(self)
def dataReceived(self, data):
self.proto.dataReceived(data)
def readConnectionLost(self):
with _log.failuresHandled("read connection lost") as op:
if IHalfCloseableProtocol.providedBy(self.proto):
self.proto.readConnectionLost()
self.checkConnLost()
if not op.succeeded and not self.disconnecting:
self.loseConnection()
def writeConnectionLost(self):
with _log.failuresHandled("write connection lost") as op:
if IHalfCloseableProtocol.providedBy(self.proto):
self.proto.writeConnectionLost()
self.checkConnLost()
if not op.succeeded and not self.disconnecting:
self.loseConnection()
connsLost = 0
def checkConnLost(self):
self.connsLost += 1
if self.connsLost >= 2:
self.disconnecting = True
self.disconnected = True
self.proto.connectionLost(Failure(main.CONNECTION_DONE))
# ITransport
def write(self, data):
self.stdout.write(data)
def writeSequence(self, seq):
self.stdout.write(b"".join(seq))
def loseConnection(self):
self.disconnecting = True
self.stdin.close()
self.stdout.close()
def getPeer(self):
return Win32PipeAddress()
def getHost(self):
return Win32PipeAddress()
# IConsumer
def registerProducer(self, producer, streaming):
return self.stdout.registerProducer(producer, streaming)
def unregisterProducer(self):
return self.stdout.unregisterProducer()
# def write() above
# IProducer
def stopProducing(self):
self.stdin.stopProducing()
# IPushProducer
def pauseProducing(self):
self.stdin.pauseProducing()
def resumeProducing(self):
self.stdin.resumeProducing()

View File

@@ -0,0 +1,542 @@
# -*- test-case-name: twisted.test.test_abstract -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for generic select()able objects.
"""
from socket import AF_INET, AF_INET6, inet_pton
from typing import Iterable, List, Optional
from zope.interface import implementer
from twisted.internet import interfaces, main
from twisted.python import failure, reflect
# Twisted Imports
from twisted.python.compat import lazyByteSlice
def _dataMustBeBytes(obj):
if not isinstance(obj, bytes): # no, really, I mean it
raise TypeError("Data must be bytes")
# Python 3.4+ can join bytes and memoryviews; using a
# memoryview prevents the slice from copying
def _concatenate(bObj, offset, bArray):
return b"".join([memoryview(bObj)[offset:]] + bArray)
class _ConsumerMixin:
"""
L{IConsumer} implementations can mix this in to get C{registerProducer} and
C{unregisterProducer} methods which take care of keeping track of a
producer's state.
Subclasses must provide three attributes which L{_ConsumerMixin} will read
but not write:
- connected: A C{bool} which is C{True} as long as the consumer has
someplace to send bytes (for example, a TCP connection), and then
C{False} when it no longer does.
- disconnecting: A C{bool} which is C{False} until something like
L{ITransport.loseConnection} is called, indicating that the send buffer
should be flushed and the connection lost afterwards. Afterwards,
C{True}.
- disconnected: A C{bool} which is C{False} until the consumer no longer
has a place to send bytes, then C{True}.
Subclasses must also override the C{startWriting} method.
@ivar producer: L{None} if no producer is registered, otherwise the
registered producer.
@ivar producerPaused: A flag indicating whether the producer is currently
paused.
@type producerPaused: L{bool}
@ivar streamingProducer: A flag indicating whether the producer was
registered as a streaming (ie push) producer or not (ie a pull
producer). This will determine whether the consumer may ever need to
pause and resume it, or if it can merely call C{resumeProducing} on it
when buffer space is available.
@ivar streamingProducer: C{bool} or C{int}
"""
producer = None
producerPaused = False
streamingProducer = False
def startWriting(self):
"""
Override in a subclass to cause the reactor to monitor this selectable
for write events. This will be called once in C{unregisterProducer} if
C{loseConnection} has previously been called, so that the connection can
actually close.
"""
raise NotImplementedError("%r did not implement startWriting")
def registerProducer(self, producer, streaming):
"""
Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). When the FileDescriptor's internal data buffer is
filled, it will ask the producer to pauseProducing(). If the connection
is lost, FileDescriptor calls producer's stopProducing() method.
If streaming is true, the producer should provide the IPushProducer
interface. Otherwise, it is assumed that producer provides the
IPullProducer interface. In this case, the producer won't be asked to
pauseProducing(), but it has to be careful to write() data only when its
resumeProducing() method is called.
"""
if self.producer is not None:
raise RuntimeError(
"Cannot register producer %s, because producer %s was never "
"unregistered." % (producer, self.producer)
)
if self.disconnected:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""
Stop consuming data from a producer, without disconnecting.
"""
self.producer = None
if self.connected and self.disconnecting:
self.startWriting()
@implementer(interfaces.ILoggingContext)
class _LogOwner:
"""
Mixin to help implement L{interfaces.ILoggingContext} for transports which
have a protocol, the log prefix of which should also appear in the
transport's log prefix.
"""
def _getLogPrefix(self, applicationObject: object) -> str:
"""
Determine the log prefix to use for messages related to
C{applicationObject}, which may or may not be an
L{interfaces.ILoggingContext} provider.
@return: A C{str} giving the log prefix to use.
"""
if interfaces.ILoggingContext.providedBy(applicationObject):
return applicationObject.logPrefix()
return applicationObject.__class__.__name__
def logPrefix(self):
"""
Override this method to insert custom logging behavior. Its
return value will be inserted in front of every line. It may
be called more times than the number of output lines.
"""
return "-"
@implementer(
interfaces.IPushProducer,
interfaces.IReadWriteDescriptor,
interfaces.IConsumer,
interfaces.ITransport,
interfaces.IHalfCloseableDescriptor,
)
class FileDescriptor(_ConsumerMixin, _LogOwner):
"""
An object which can be operated on by select().
This is an abstract superclass of all objects which may be notified when
they are readable or writable; e.g. they have a file-descriptor that is
valid to be passed to select(2).
"""
connected = 0
disconnected = 0
disconnecting = 0
_writeDisconnecting = False
_writeDisconnected = False
dataBuffer = b""
offset = 0
SEND_LIMIT = 128 * 1024
def __init__(self, reactor: Optional[interfaces.IReactorFDSet] = None):
"""
@param reactor: An L{IReactorFDSet} provider which this descriptor will
use to get readable and writeable event notifications. If no value
is given, the global reactor will be used.
"""
if not reactor:
from twisted.internet import reactor as _reactor
reactor = _reactor # type: ignore[assignment]
self.reactor = reactor
# will be added to dataBuffer in doWrite
self._tempDataBuffer: List[bytes] = []
self._tempDataLen = 0
def connectionLost(self, reason):
"""The connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
Clean up state here, but make sure to call back up to FileDescriptor.
"""
self.disconnected = 1
self.connected = 0
if self.producer is not None:
self.producer.stopProducing()
self.producer = None
self.stopReading()
self.stopWriting()
def writeSomeData(self, data: bytes) -> None:
"""
Write as much as possible of the given data, immediately.
This is called to invoke the lower-level writing functionality, such
as a socket's send() method, or a file's write(); this method
returns an integer or an exception. If an integer, it is the number
of bytes written (possibly zero); if an exception, it indicates the
connection was lost.
"""
raise NotImplementedError(
"%s does not implement writeSomeData" % reflect.qual(self.__class__)
)
def doRead(self):
"""
Called when data is available for reading.
Subclasses must override this method. The result will be interpreted
in the same way as a result of doWrite().
"""
raise NotImplementedError(
"%s does not implement doRead" % reflect.qual(self.__class__)
)
def doWrite(self):
"""
Called when data can be written.
@return: L{None} on success, an exception or a negative integer on
failure.
@see: L{twisted.internet.interfaces.IWriteDescriptor.doWrite}.
"""
if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
# If there is currently less than SEND_LIMIT bytes left to send
# in the string, extend it with the array data.
self.dataBuffer = _concatenate(
self.dataBuffer, self.offset, self._tempDataBuffer
)
self.offset = 0
self._tempDataBuffer = []
self._tempDataLen = 0
# Send as much data as you can.
if self.offset:
l = self.writeSomeData(lazyByteSlice(self.dataBuffer, self.offset))
else:
l = self.writeSomeData(self.dataBuffer)
# There is no writeSomeData implementation in Twisted which returns
# < 0, but the documentation for writeSomeData used to claim negative
# integers meant connection lost. Keep supporting this here,
# although it may be worth deprecating and removing at some point.
if isinstance(l, Exception) or l < 0:
return l
self.offset += l
# If there is nothing left to send,
if self.offset == len(self.dataBuffer) and not self._tempDataLen:
self.dataBuffer = b""
self.offset = 0
# stop writing.
self.stopWriting()
# If I've got a producer who is supposed to supply me with data,
if self.producer is not None and (
(not self.streamingProducer) or self.producerPaused
):
# tell them to supply some more.
self.producerPaused = False
self.producer.resumeProducing()
elif self.disconnecting:
# But if I was previously asked to let the connection die, do
# so.
return self._postLoseConnection()
elif self._writeDisconnecting:
# I was previously asked to half-close the connection. We
# set _writeDisconnected before calling handler, in case the
# handler calls loseConnection(), which will want to check for
# this attribute.
self._writeDisconnected = True
result = self._closeWriteConnection()
return result
return None
def _postLoseConnection(self):
"""Called after a loseConnection(), when all data has been written.
Whatever this returns is then returned by doWrite.
"""
# default implementation, telling reactor we're finished
return main.CONNECTION_DONE
def _closeWriteConnection(self):
# override in subclasses
pass
def writeConnectionLost(self, reason):
# in current code should never be called
self.connectionLost(reason)
def readConnectionLost(self, reason: failure.Failure) -> None:
# override in subclasses
self.connectionLost(reason)
def getHost(self):
# ITransport.getHost
raise NotImplementedError()
def getPeer(self):
# ITransport.getPeer
raise NotImplementedError()
def _isSendBufferFull(self):
"""
Determine whether the user-space send buffer for this transport is full
or not.
When the buffer contains more than C{self.bufferSize} bytes, it is
considered full. This might be improved by considering the size of the
kernel send buffer and how much of it is free.
@return: C{True} if it is full, C{False} otherwise.
"""
return len(self.dataBuffer) + self._tempDataLen > self.bufferSize
def _maybePauseProducer(self):
"""
Possibly pause a producer, if there is one and the send buffer is full.
"""
# If we are responsible for pausing our producer,
if self.producer is not None and self.streamingProducer:
# and our buffer is full,
if self._isSendBufferFull():
# pause it.
self.producerPaused = True
self.producer.pauseProducing()
def write(self, data: bytes) -> None:
"""Reliably write some data.
The data is buffered until the underlying file descriptor is ready
for writing. If there is more than C{self.bufferSize} data in the
buffer and this descriptor has a registered streaming producer, its
C{pauseProducing()} method will be called.
"""
_dataMustBeBytes(data)
if not self.connected or self._writeDisconnected:
return
if data:
self._tempDataBuffer.append(data)
self._tempDataLen += len(data)
self._maybePauseProducer()
self.startWriting()
def writeSequence(self, iovec: Iterable[bytes]) -> None:
"""
Reliably write a sequence of data.
Currently, this is a convenience method roughly equivalent to::
for chunk in iovec:
fd.write(chunk)
It may have a more efficient implementation at a later time or in a
different reactor.
As with the C{write()} method, if a buffer size limit is reached and a
streaming producer is registered, it will be paused until the buffered
data is written to the underlying file descriptor.
"""
for i in iovec:
_dataMustBeBytes(i)
if not self.connected or not iovec or self._writeDisconnected:
return
self._tempDataBuffer.extend(iovec)
for i in iovec:
self._tempDataLen += len(i)
self._maybePauseProducer()
self.startWriting()
def loseConnection(self):
"""Close the connection at the next available opportunity.
Call this to cause this FileDescriptor to lose its connection. It will
first write any data that it has buffered.
If there is data buffered yet to be written, this method will cause the
transport to lose its connection as soon as it's done flushing its
write buffer. If you have a producer registered, the connection won't
be closed until the producer is finished. Therefore, make sure you
unregister your producer when it's finished, or the connection will
never close.
"""
if self.connected and not self.disconnecting:
if self._writeDisconnected:
# doWrite won't trigger the connection close anymore
self.stopReading()
self.stopWriting()
self.connectionLost(failure.Failure(main.CONNECTION_DONE))
else:
self.stopReading()
self.startWriting()
self.disconnecting = 1
def loseWriteConnection(self):
self._writeDisconnecting = True
self.startWriting()
def stopReading(self):
"""Stop waiting for read availability.
Call this to remove this selectable from being notified when it is
ready for reading.
"""
self.reactor.removeReader(self)
def stopWriting(self):
"""Stop waiting for write availability.
Call this to remove this selectable from being notified when it is ready
for writing.
"""
self.reactor.removeWriter(self)
def startReading(self):
"""Start waiting for read availability."""
self.reactor.addReader(self)
def startWriting(self):
"""Start waiting for write availability.
Call this to have this FileDescriptor be notified whenever it is ready for
writing.
"""
self.reactor.addWriter(self)
# Producer/consumer implementation
# first, the consumer stuff. This requires no additional work, as
# any object you can write to can be a consumer, really.
producer = None
bufferSize = 2**2**2**2
def stopConsuming(self):
"""Stop consuming data.
This is called when a producer has lost its connection, to tell the
consumer to go lose its connection (and break potential circular
references).
"""
self.unregisterProducer()
self.loseConnection()
# producer interface implementation
def resumeProducing(self):
if self.connected and not self.disconnecting:
self.startReading()
def pauseProducing(self):
self.stopReading()
def stopProducing(self):
self.loseConnection()
def fileno(self):
"""File Descriptor number for select().
This method must be overridden or assigned in subclasses to
indicate a valid file descriptor for the operating system.
"""
return -1
def isIPAddress(addr: str, family: int = AF_INET) -> bool:
"""
Determine whether the given string represents an IP address of the given
family; by default, an IPv4 address.
@param addr: A string which may or may not be the decimal dotted
representation of an IPv4 address.
@param family: The address family to test for; one of the C{AF_*} constants
from the L{socket} module. (This parameter has only been available
since Twisted 17.1.0; previously L{isIPAddress} could only test for IPv4
addresses.)
@return: C{True} if C{addr} represents an IPv4 address, C{False} otherwise.
"""
if isinstance(addr, bytes): # type: ignore[unreachable]
try: # type: ignore[unreachable]
addr = addr.decode("ascii")
except UnicodeDecodeError:
return False
if family == AF_INET6:
# On some platforms, inet_ntop fails unless the scope ID is valid; this
# is a test for whether the given string *is* an IP address, so strip
# any potential scope ID before checking.
addr = addr.split("%", 1)[0]
elif family == AF_INET:
# On Windows, where 3.5+ implement inet_pton, "0" is considered a valid
# IPv4 address, but we want to ensure we have all 4 segments.
if addr.count(".") != 3:
return False
else:
raise ValueError(f"unknown address family {family!r}")
try:
# This might be a native implementation or the one from
# twisted.python.compat.
inet_pton(family, addr)
except (ValueError, OSError):
return False
return True
def isIPv6Address(addr: str) -> bool:
"""
Determine whether the given string represents an IPv6 address.
@param addr: A string which may or may not be the hex
representation of an IPv6 address.
@type addr: C{str}
@return: C{True} if C{addr} represents an IPv6 address, C{False}
otherwise.
@rtype: C{bool}
"""
return isIPAddress(addr, AF_INET6)
__all__ = ["FileDescriptor", "isIPAddress", "isIPv6Address"]

View File

@@ -0,0 +1,182 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Address objects for network connections.
"""
import os
from typing import Optional, Union
from warnings import warn
from zope.interface import implementer
import attr
from typing_extensions import Literal
from twisted.internet.interfaces import IAddress
from twisted.python.filepath import _asFilesystemBytes, _coerceToFilesystemEncoding
from twisted.python.runtime import platform
@implementer(IAddress)
@attr.s(unsafe_hash=True, auto_attribs=True)
class IPv4Address:
"""
An L{IPv4Address} represents the address of an IPv4 socket endpoint.
@ivar type: A string describing the type of transport, either 'TCP' or
'UDP'.
@ivar host: A string containing a dotted-quad IPv4 address; for example,
"127.0.0.1".
@type host: C{str}
@ivar port: An integer representing the port number.
@type port: C{int}
"""
type: Union[Literal["TCP"], Literal["UDP"]] = attr.ib(
validator=attr.validators.in_(["TCP", "UDP"])
)
host: str
port: int
@implementer(IAddress)
@attr.s(unsafe_hash=True, auto_attribs=True)
class IPv6Address:
"""
An L{IPv6Address} represents the address of an IPv6 socket endpoint.
@ivar type: A string describing the type of transport, either 'TCP' or
'UDP'.
@ivar host: A string containing a colon-separated, hexadecimal formatted
IPv6 address; for example, "::1".
@type host: C{str}
@ivar port: An integer representing the port number.
@type port: C{int}
@ivar flowInfo: the IPv6 flow label. This can be used by QoS routers to
identify flows of traffic; you may generally safely ignore it.
@type flowInfo: L{int}
@ivar scopeID: the IPv6 scope identifier - roughly analagous to what
interface traffic destined for this address must be transmitted over.
@type scopeID: L{int} or L{str}
"""
type: Union[Literal["TCP"], Literal["UDP"]] = attr.ib(
validator=attr.validators.in_(["TCP", "UDP"])
)
host: str
port: int
flowInfo: int = 0
scopeID: Union[str, int] = 0
@implementer(IAddress)
class _ProcessAddress:
"""
An L{interfaces.IAddress} provider for process transports.
"""
@attr.s(unsafe_hash=True, auto_attribs=True)
@implementer(IAddress)
class HostnameAddress:
"""
A L{HostnameAddress} represents the address of a L{HostnameEndpoint}.
@ivar hostname: A hostname byte string; for example, b"example.com".
@type hostname: L{bytes}
@ivar port: An integer representing the port number.
@type port: L{int}
"""
hostname: bytes
port: int
@attr.s(unsafe_hash=False, repr=False, eq=False, auto_attribs=True)
@implementer(IAddress)
class UNIXAddress:
"""
Object representing a UNIX socket endpoint.
@ivar name: The filename associated with this socket.
@type name: C{bytes}
"""
name: Optional[bytes] = attr.ib(
converter=attr.converters.optional(_asFilesystemBytes)
)
if getattr(os.path, "samefile", None) is not None:
def __eq__(self, other: object) -> bool:
"""
Overriding C{attrs} to ensure the os level samefile
check is done if the name attributes do not match.
"""
if not isinstance(other, self.__class__):
return NotImplemented
res = self.name == other.name
if not res and self.name and other.name:
try:
return os.path.samefile(self.name, other.name)
except OSError:
pass
except (TypeError, ValueError) as e:
# On Linux, abstract namespace UNIX sockets start with a
# \0, which os.path doesn't like.
if not platform.isLinux():
raise e
return res
else:
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self.name == other.name
return NotImplemented
def __repr__(self) -> str:
name = self.name
show = _coerceToFilesystemEncoding("", name) if name is not None else None
return f"UNIXAddress({show!r})"
def __hash__(self):
if self.name is None:
return hash((self.__class__, None))
try:
s1 = os.stat(self.name)
return hash((s1.st_ino, s1.st_dev))
except OSError:
return hash(self.name)
# These are for buildFactory backwards compatibility due to
# stupidity-induced inconsistency.
class _ServerFactoryIPv4Address(IPv4Address):
"""Backwards compatibility hack. Just like IPv4Address in practice."""
def __eq__(self, other: object) -> bool:
if isinstance(other, tuple):
warn(
"IPv4Address.__getitem__ is deprecated. " "Use attributes instead.",
category=DeprecationWarning,
stacklevel=2,
)
return (self.host, self.port) == other
elif isinstance(other, IPv4Address):
a = (self.type, self.host, self.port)
b = (other.type, other.host, other.port)
return a == b
return NotImplemented

View File

@@ -0,0 +1,307 @@
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
asyncio-based reactor implementation.
"""
import errno
import sys
from asyncio import AbstractEventLoop, get_event_loop
from typing import Dict, Optional, Type
from zope.interface import implementer
from twisted.internet.abstract import FileDescriptor
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet.posixbase import (
_NO_FILEDESC,
PosixReactorBase,
_ContinuousPolling,
)
from twisted.logger import Logger
from twisted.python.log import callWithLogger
@implementer(IReactorFDSet)
class AsyncioSelectorReactor(PosixReactorBase):
"""
Reactor running on top of L{asyncio.SelectorEventLoop}.
On POSIX platforms, the default event loop is
L{asyncio.SelectorEventLoop}.
On Windows, the default event loop on Python 3.7 and older
is C{asyncio.WindowsSelectorEventLoop}, but on Python 3.8 and newer
the default event loop is C{asyncio.WindowsProactorEventLoop} which
is incompatible with L{AsyncioSelectorReactor}.
Applications that use L{AsyncioSelectorReactor} on Windows
with Python 3.8+ must call
C{asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())}
before instantiating and running L{AsyncioSelectorReactor}.
"""
_asyncClosed = False
_log = Logger()
def __init__(self, eventloop: Optional[AbstractEventLoop] = None):
if eventloop is None:
_eventloop: AbstractEventLoop = get_event_loop()
else:
_eventloop = eventloop
# On Python 3.8+, asyncio.get_event_loop() on
# Windows was changed to return a ProactorEventLoop
# unless the loop policy has been changed.
if sys.platform == "win32":
from asyncio import ProactorEventLoop
if isinstance(_eventloop, ProactorEventLoop):
raise TypeError(
f"ProactorEventLoop is not supported, got: {_eventloop}"
)
self._asyncioEventloop: AbstractEventLoop = _eventloop
self._writers: Dict[Type[FileDescriptor], int] = {}
self._readers: Dict[Type[FileDescriptor], int] = {}
self._continuousPolling = _ContinuousPolling(self)
self._scheduledAt = None
self._timerHandle = None
super().__init__()
def _unregisterFDInAsyncio(self, fd):
"""
Compensate for a bug in asyncio where it will not unregister a FD that
it cannot handle in the epoll loop. It touches internal asyncio code.
A description of the bug by markrwilliams:
The C{add_writer} method of asyncio event loops isn't atomic because
all the Selector classes in the selector module internally record a
file object before passing it to the platform's selector
implementation. If the platform's selector decides the file object
isn't acceptable, the resulting exception doesn't cause the Selector to
un-track the file object.
The failing/hanging stdio test goes through the following sequence of
events (roughly):
* The first C{connection.write(intToByte(value))} call hits the asyncio
reactor's C{addWriter} method.
* C{addWriter} calls the asyncio loop's C{add_writer} method, which
happens to live on C{_BaseSelectorEventLoop}.
* The asyncio loop's C{add_writer} method checks if the file object has
been registered before via the selector's C{get_key} method.
* It hasn't, so the KeyError block runs and calls the selector's
register method
* Code examples that follow use EpollSelector, but the code flow holds
true for any other selector implementation. The selector's register
method first calls through to the next register method in the MRO
* That next method is always C{_BaseSelectorImpl.register} which
creates a C{SelectorKey} instance for the file object, stores it under
the file object's file descriptor, and then returns it.
* Control returns to the concrete selector implementation, which asks
the operating system to track the file descriptor using the right API.
* The operating system refuses! An exception is raised that, in this
case, the asyncio reactor handles by creating a C{_ContinuousPolling}
object to watch the file descriptor.
* The second C{connection.write(intToByte(value))} call hits the
asyncio reactor's C{addWriter} method, which hits the C{add_writer}
method. But the loop's selector's get_key method now returns a
C{SelectorKey}! Now the asyncio reactor's C{addWriter} method thinks
the asyncio loop will watch the file descriptor, even though it won't.
"""
try:
self._asyncioEventloop._selector.unregister(fd)
except BaseException:
pass
def _readOrWrite(self, selectable, read):
method = selectable.doRead if read else selectable.doWrite
if selectable.fileno() == -1:
self._disconnectSelectable(selectable, _NO_FILEDESC, read)
return
try:
why = method()
except Exception as e:
why = e
self._log.failure(None)
if why:
self._disconnectSelectable(selectable, why, read)
def addReader(self, reader):
if reader in self._readers.keys() or reader in self._continuousPolling._readers:
return
fd = reader.fileno()
try:
self._asyncioEventloop.add_reader(
fd, callWithLogger, reader, self._readOrWrite, reader, True
)
self._readers[reader] = fd
except OSError as e:
self._unregisterFDInAsyncio(fd)
if e.errno == errno.EPERM:
# epoll(7) doesn't support certain file descriptors,
# e.g. filesystem files, so for those we just poll
# continuously:
self._continuousPolling.addReader(reader)
else:
raise
def addWriter(self, writer):
if writer in self._writers.keys() or writer in self._continuousPolling._writers:
return
fd = writer.fileno()
try:
self._asyncioEventloop.add_writer(
fd, callWithLogger, writer, self._readOrWrite, writer, False
)
self._writers[writer] = fd
except PermissionError:
self._unregisterFDInAsyncio(fd)
# epoll(7) doesn't support certain file descriptors,
# e.g. filesystem files, so for those we just poll
# continuously:
self._continuousPolling.addWriter(writer)
except BrokenPipeError:
# The kqueuereactor will raise this if there is a broken pipe
self._unregisterFDInAsyncio(fd)
except BaseException:
self._unregisterFDInAsyncio(fd)
raise
def removeReader(self, reader):
# First, see if they're trying to remove a reader that we don't have.
if not (
reader in self._readers.keys() or self._continuousPolling.isReading(reader)
):
# We don't have it, so just return OK.
return
# If it was a cont. polling reader, check there first.
if self._continuousPolling.isReading(reader):
self._continuousPolling.removeReader(reader)
return
fd = reader.fileno()
if fd == -1:
# If the FD is -1, we want to know what its original FD was, to
# remove it.
fd = self._readers.pop(reader)
else:
self._readers.pop(reader)
self._asyncioEventloop.remove_reader(fd)
def removeWriter(self, writer):
# First, see if they're trying to remove a writer that we don't have.
if not (
writer in self._writers.keys() or self._continuousPolling.isWriting(writer)
):
# We don't have it, so just return OK.
return
# If it was a cont. polling writer, check there first.
if self._continuousPolling.isWriting(writer):
self._continuousPolling.removeWriter(writer)
return
fd = writer.fileno()
if fd == -1:
# If the FD is -1, we want to know what its original FD was, to
# remove it.
fd = self._writers.pop(writer)
else:
self._writers.pop(writer)
self._asyncioEventloop.remove_writer(fd)
def removeAll(self):
return (
self._removeAll(self._readers.keys(), self._writers.keys())
+ self._continuousPolling.removeAll()
)
def getReaders(self):
return list(self._readers.keys()) + self._continuousPolling.getReaders()
def getWriters(self):
return list(self._writers.keys()) + self._continuousPolling.getWriters()
def iterate(self, timeout):
self._asyncioEventloop.call_later(timeout + 0.01, self._asyncioEventloop.stop)
self._asyncioEventloop.run_forever()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self._asyncioEventloop.run_forever()
if self._justStopped:
self._justStopped = False
def stop(self):
super().stop()
# This will cause runUntilCurrent which in its turn
# will call fireSystemEvent("shutdown")
self.callLater(0, lambda: None)
def crash(self):
super().crash()
self._asyncioEventloop.stop()
def _onTimer(self):
self._scheduledAt = None
self.runUntilCurrent()
self._reschedule()
def _reschedule(self):
timeout = self.timeout()
if timeout is not None:
abs_time = self._asyncioEventloop.time() + timeout
self._scheduledAt = abs_time
if self._timerHandle is not None:
self._timerHandle.cancel()
self._timerHandle = self._asyncioEventloop.call_at(abs_time, self._onTimer)
def _moveCallLaterSooner(self, tple):
PosixReactorBase._moveCallLaterSooner(self, tple)
self._reschedule()
def callLater(self, seconds, f, *args, **kwargs):
dc = PosixReactorBase.callLater(self, seconds, f, *args, **kwargs)
abs_time = self._asyncioEventloop.time() + self.timeout()
if self._scheduledAt is None or abs_time < self._scheduledAt:
self._reschedule()
return dc
def callFromThread(self, f, *args, **kwargs):
g = lambda: self.callLater(0, f, *args, **kwargs)
self._asyncioEventloop.call_soon_threadsafe(g)
def install(eventloop=None):
"""
Install an asyncio-based reactor.
@param eventloop: The asyncio eventloop to wrap. If default, the global one
is selected.
"""
reactor = AsyncioSelectorReactor(eventloop)
from twisted.internet.main import installReactor
installReactor(reactor)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,587 @@
# -*- test-case-name: twisted.internet.test.test_core -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the
CoreFoundation main loop used by macOS.
This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}
applications.
"""
from __future__ import annotations
__all__ = ["install", "CFReactor"]
import sys
from zope.interface import implementer
from CFNetwork import (
CFSocketCreateRunLoopSource,
CFSocketCreateWithNative,
CFSocketDisableCallBacks,
CFSocketEnableCallBacks,
CFSocketInvalidate,
CFSocketSetSocketFlags,
kCFSocketAutomaticallyReenableReadCallBack,
kCFSocketAutomaticallyReenableWriteCallBack,
kCFSocketConnectCallBack,
kCFSocketReadCallBack,
kCFSocketWriteCallBack,
)
from CoreFoundation import (
CFAbsoluteTimeGetCurrent,
CFRunLoopAddSource,
CFRunLoopAddTimer,
CFRunLoopGetCurrent,
CFRunLoopRemoveSource,
CFRunLoopRun,
CFRunLoopStop,
CFRunLoopTimerCreate,
CFRunLoopTimerInvalidate,
kCFAllocatorDefault,
kCFRunLoopCommonModes,
)
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet.posixbase import _NO_FILEDESC, PosixReactorBase
from twisted.python import log
# We know that we're going to run on macOS so we can just pick the
# POSIX-appropriate waker. This also avoids having a dynamic base class and
# so lets more things get type checked.
from ._signals import _UnixWaker
_READ = 0
_WRITE = 1
_preserveSOError = 1 << 6
class _WakerPlus(_UnixWaker):
"""
The normal Twisted waker will simply wake up the main loop, which causes an
iteration to run, which in turn causes L{ReactorBase.runUntilCurrent}
to get invoked.
L{CFReactor} has a slightly different model of iteration, though: rather
than have each iteration process the thread queue, then timed calls, then
file descriptors, each callback is run as it is dispatched by the CFRunLoop
observer which triggered it.
So this waker needs to not only unblock the loop, but also make sure the
work gets done; so, it reschedules the invocation of C{runUntilCurrent} to
be immediate (0 seconds from now) even if there is no timed call work to
do.
"""
def __init__(self, reactor):
super().__init__()
self.reactor = reactor
def doRead(self):
"""
Wake up the loop and force C{runUntilCurrent} to run immediately in the
next timed iteration.
"""
result = super().doRead()
self.reactor._scheduleSimulate()
return result
@implementer(IReactorFDSet)
class CFReactor(PosixReactorBase):
"""
The CoreFoundation reactor.
You probably want to use this via the L{install} API.
@ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a
4-tuple of:
- source: a C{CFRunLoopSource}; the source associated with this
socket.
- socket: a C{CFSocket} wrapping the file descriptor.
- descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}
provider.
- read-write: a 2-C{list} of booleans: respectively, whether this
descriptor is currently registered for reading or registered for
writing.
@ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or
L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this
manner so that we don't have to rely (even more) on the hashability of
L{IReadDescriptor} providers, and we know that they won't be collected
since these are kept in sync with C{_fdmap}. Necessary because the
.fileno() of a file descriptor may change at will, so we need to be
able to look up what its file descriptor I{used} to be, so that we can
look it up in C{_fdmap}
@ivar _cfrunloop: the C{CFRunLoop} pyobjc object wrapped
by this reactor.
@ivar _inCFLoop: Is C{CFRunLoopRun} currently running?
@type _inCFLoop: L{bool}
@ivar _currentSimulator: if a CFTimer is currently scheduled with the CF
run loop to run Twisted callLater calls, this is a reference to it.
Otherwise, it is L{None}
"""
def __init__(self, runLoop=None, runner=None):
self._fdmap = {}
self._idmap = {}
if runner is None:
runner = CFRunLoopRun
self._runner = runner
if runLoop is None:
runLoop = CFRunLoopGetCurrent()
self._cfrunloop = runLoop
PosixReactorBase.__init__(self)
def _wakerFactory(self) -> _WakerPlus:
return _WakerPlus(self)
def _socketCallback(
self, cfSocket, callbackType, ignoredAddress, ignoredData, context
):
"""
The socket callback issued by CFRunLoop. This will issue C{doRead} or
C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}
registered with the file descriptor that we are being notified of.
@param cfSocket: The C{CFSocket} which has got some activity.
@param callbackType: The type of activity that we are being notified
of. Either C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}.
@param ignoredAddress: Unused, because this is not used for either of
the callback types we register for.
@param ignoredData: Unused, because this is not used for either of the
callback types we register for.
@param context: The data associated with this callback by
C{CFSocketCreateWithNative} (in C{CFReactor._watchFD}). A 2-tuple
of C{(int, CFRunLoopSource)}.
"""
(fd, smugglesrc) = context
if fd not in self._fdmap:
# Spurious notifications seem to be generated sometimes if you
# CFSocketDisableCallBacks in the middle of an event. I don't know
# about this FD, any more, so let's get rid of it.
CFRunLoopRemoveSource(self._cfrunloop, smugglesrc, kCFRunLoopCommonModes)
return
src, skt, readWriteDescriptor, rw = self._fdmap[fd]
def _drdw():
why = None
isRead = False
try:
if readWriteDescriptor.fileno() == -1:
why = _NO_FILEDESC
else:
isRead = callbackType == kCFSocketReadCallBack
# CFSocket seems to deliver duplicate read/write
# notifications sometimes, especially a duplicate
# writability notification when first registering the
# socket. This bears further investigation, since I may
# have been mis-interpreting the behavior I was seeing.
# (Running the full Twisted test suite, while thorough, is
# not always entirely clear.) Until this has been more
# thoroughly investigated , we consult our own
# reading/writing state flags to determine whether we
# should actually attempt a doRead/doWrite first. -glyph
if isRead:
if rw[_READ]:
why = readWriteDescriptor.doRead()
else:
if rw[_WRITE]:
why = readWriteDescriptor.doWrite()
except BaseException:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(readWriteDescriptor, why, isRead)
log.callWithLogger(readWriteDescriptor, _drdw)
def _watchFD(self, fd, descr, flag):
"""
Register a file descriptor with the C{CFRunLoop}, or modify its state
so that it's listening for both notifications (read and write) rather
than just one; used to implement C{addReader} and C{addWriter}.
@param fd: The file descriptor.
@type fd: L{int}
@param descr: the L{IReadDescriptor} or L{IWriteDescriptor}
@param flag: the flag to register for callbacks on, either
C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
"""
if fd == -1:
raise RuntimeError("Invalid file descriptor.")
if fd in self._fdmap:
src, cfs, gotdescr, rw = self._fdmap[fd]
# do I need to verify that it's the same descr?
else:
ctx = []
ctx.append(fd)
cfs = CFSocketCreateWithNative(
kCFAllocatorDefault,
fd,
kCFSocketReadCallBack
| kCFSocketWriteCallBack
| kCFSocketConnectCallBack,
self._socketCallback,
ctx,
)
CFSocketSetSocketFlags(
cfs,
kCFSocketAutomaticallyReenableReadCallBack
| kCFSocketAutomaticallyReenableWriteCallBack
|
# This extra flag is to ensure that CF doesn't (destructively,
# because destructively is the only way to do it) retrieve
# SO_ERROR and thereby break twisted.internet.tcp.BaseClient,
# which needs SO_ERROR to tell it whether or not it needs to
# call connect_ex a second time.
_preserveSOError,
)
src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)
ctx.append(src)
CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)
CFSocketDisableCallBacks(
cfs,
kCFSocketReadCallBack
| kCFSocketWriteCallBack
| kCFSocketConnectCallBack,
)
rw = [False, False]
self._idmap[id(descr)] = fd
self._fdmap[fd] = src, cfs, descr, rw
rw[self._flag2idx(flag)] = True
CFSocketEnableCallBacks(cfs, flag)
def _flag2idx(self, flag):
"""
Convert a C{kCFSocket...} constant to an index into the read/write
state list (C{_READ} or C{_WRITE}) (the 4th element of the value of
C{self._fdmap}).
@param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
@return: C{_READ} or C{_WRITE}
"""
return {kCFSocketReadCallBack: _READ, kCFSocketWriteCallBack: _WRITE}[flag]
def _unwatchFD(self, fd, descr, flag):
"""
Unregister a file descriptor with the C{CFRunLoop}, or modify its state
so that it's listening for only one notification (read or write) as
opposed to both; used to implement C{removeReader} and C{removeWriter}.
@param fd: a file descriptor
@type fd: C{int}
@param descr: an L{IReadDescriptor} or L{IWriteDescriptor}
@param flag: C{kCFSocketWriteCallBack} C{kCFSocketReadCallBack}
"""
if id(descr) not in self._idmap:
return
if fd == -1:
# need to deal with it in this case, I think.
realfd = self._idmap[id(descr)]
else:
realfd = fd
src, cfs, descr, rw = self._fdmap[realfd]
CFSocketDisableCallBacks(cfs, flag)
rw[self._flag2idx(flag)] = False
if not rw[_READ] and not rw[_WRITE]:
del self._idmap[id(descr)]
del self._fdmap[realfd]
CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)
CFSocketInvalidate(cfs)
def addReader(self, reader):
"""
Implement L{IReactorFDSet.addReader}.
"""
self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)
def addWriter(self, writer):
"""
Implement L{IReactorFDSet.addWriter}.
"""
self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
def removeReader(self, reader):
"""
Implement L{IReactorFDSet.removeReader}.
"""
self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)
def removeWriter(self, writer):
"""
Implement L{IReactorFDSet.removeWriter}.
"""
self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
def removeAll(self):
"""
Implement L{IReactorFDSet.removeAll}.
"""
allDesc = {descr for src, cfs, descr, rw in self._fdmap.values()}
allDesc -= set(self._internalReaders)
for desc in allDesc:
self.removeReader(desc)
self.removeWriter(desc)
return list(allDesc)
def getReaders(self):
"""
Implement L{IReactorFDSet.getReaders}.
"""
return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_READ]]
def getWriters(self):
"""
Implement L{IReactorFDSet.getWriters}.
"""
return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_WRITE]]
def _moveCallLaterSooner(self, tple):
"""
Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}
so that it will immediately reschedule. Normally
C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is
always run before the mainloop goes back to sleep, so this forces it to
immediately recompute how long the loop needs to stay asleep.
"""
result = PosixReactorBase._moveCallLaterSooner(self, tple)
self._scheduleSimulate()
return result
def startRunning(self, installSignalHandlers: bool = True) -> None:
"""
Start running the reactor, then kick off the timer that advances
Twisted's clock to keep pace with CFRunLoop's.
"""
super().startRunning(installSignalHandlers)
# Before 'startRunning' is called, the reactor is not attached to the
# CFRunLoop[1]; specifically, the CFTimer that runs all of Twisted's
# timers is not active and will not have been added to the loop by any
# application code. Now that _running is probably[2] True, we need to
# ensure that timed calls will actually run on the main loop. This
# call needs to be here, rather than at the top of mainLoop, because
# it's possible to use startRunning to *attach* a reactor to an
# already-running CFRunLoop, i.e. within a plugin for an application
# that doesn't otherwise use Twisted, rather than calling it via run().
self._scheduleSimulate()
# [1]: readers & writers are still active in the loop, but arguably
# they should not be.
# [2]: application code within a 'startup' system event trigger *may*
# have already crashed the reactor and thus set _started to False,
# but that specific case is handled by mainLoop, since that case
# is inherently irrelevant in an attach-to-application case and is
# only necessary to handle mainLoop spuriously blocking.
_inCFLoop = False
def mainLoop(self) -> None:
"""
Run the runner (C{CFRunLoopRun} or something that calls it), which runs
the run loop until C{crash()} is called.
"""
if not self._started:
# If we arrive here, we were crashed by application code in a
# 'startup' system event trigger, (or crashed manually before the
# application calls 'mainLoop' directly for whatever reason; sigh,
# this method should not be public). However, application code
# doing obscure things will expect an invocation of this loop to
# have at least *one* pass over ready readers, writers, and delayed
# calls. iterate(), in particular, is emulated in exactly this way
# in this reactor implementation. In order to ensure that we enter
# the real implementation of the mainloop and do all of those
# things, we need to set _started back to True so that callLater
# actually schedules itself against the CFRunLoop, but immediately
# crash once we are in the context of the loop where we've run
# ready I/O and timers.
def docrash() -> None:
self.crash()
self._started = True
self.callLater(0, docrash)
already = False
try:
while self._started:
if already:
# Sometimes CFRunLoopRun (or its equivalents) may exit
# without CFRunLoopStop being called.
# This is really only *supposed* to happen when it runs out
# of sources & timers to process. However, in full Twisted
# test-suite runs we have observed, extremely rarely (once
# in every 3000 tests or so) CFRunLoopRun exiting in cases
# where it seems as though there *is* still some work to
# do. However, given the difficulty of reproducing the
# race conditions necessary to make this happen, it's
# possible that we have missed some nuance of when
# CFRunLoop considers the list of work "empty" and various
# callbacks and timers to be "invalidated". Therefore we
# are not fully confident that this is a platform bug, but
# it is nevertheless unexpected behavior from our reading
# of the documentation.
# To accommodate this rare and slightly ambiguous stress
# case, we make extra sure that our scheduled timer is
# re-created on the loop as a CFRunLoopTimer, which
# reliably gives the loop some work to do and 'fixes' it if
# it exited due to having no active sources or timers.
self._scheduleSimulate()
# At this point, there may be a little more code that we
# would need to put here for full correctness for a very
# peculiar type of application: if you're writing a
# command-line tool using CFReactor, adding *nothing* to
# the reactor itself, disabling even the internal Waker
# file descriptors, then there's a possibility that
# CFRunLoopRun will exit early, and if we have no timers,
# we might busy-loop here. Because we cannot seem to force
# this to happen under normal circumstances, we're leaving
# that code out.
already = True
self._inCFLoop = True
try:
self._runner()
finally:
self._inCFLoop = False
finally:
self._stopSimulating()
_currentSimulator: object | None = None
def _stopSimulating(self) -> None:
"""
If we have a CFRunLoopTimer registered with the CFRunLoop, invalidate
it and set it to None.
"""
if self._currentSimulator is None:
return
CFRunLoopTimerInvalidate(self._currentSimulator)
self._currentSimulator = None
def _scheduleSimulate(self) -> None:
"""
Schedule a call to C{self.runUntilCurrent}. This will cancel the
currently scheduled call if it is already scheduled.
"""
self._stopSimulating()
if not self._started:
# If the reactor is not running (e.g. we are scheduling callLater
# calls before starting the reactor) we should not be scheduling
# CFRunLoopTimers against the global CFRunLoop.
return
# runUntilCurrent acts on 3 things: _justStopped to process the
# side-effect of reactor.stop(), threadCallQueue to handle any calls
# from threads, and _pendingTimedCalls.
timeout = 0.0 if (self._justStopped or self.threadCallQueue) else self.timeout()
if timeout is None:
return
fireDate = CFAbsoluteTimeGetCurrent() + timeout
def simulate(cftimer, extra):
self._currentSimulator = None
self.runUntilCurrent()
self._scheduleSimulate()
c = self._currentSimulator = CFRunLoopTimerCreate(
kCFAllocatorDefault, fireDate, 0, 0, 0, simulate, None
)
CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)
def callLater(self, _seconds, _f, *args, **kw):
"""
Implement L{IReactorTime.callLater}.
"""
delayedCall = PosixReactorBase.callLater(self, _seconds, _f, *args, **kw)
self._scheduleSimulate()
return delayedCall
def stop(self) -> None:
"""
Implement L{IReactorCore.stop}.
"""
PosixReactorBase.stop(self)
self._scheduleSimulate()
def crash(self):
"""
Implement L{IReactorCore.crash}
"""
PosixReactorBase.crash(self)
if not self._inCFLoop:
return
CFRunLoopStop(self._cfrunloop)
def iterate(self, delay=0):
"""
Emulate the behavior of C{iterate()} for things that want to call it,
by letting the loop run for a little while and then scheduling a timed
call to exit it.
"""
self._started = True
# Since the CoreFoundation loop doesn't have the concept of "iterate"
# we can't ask it to do this. Instead we will make arrangements to
# crash it *very* soon and then make it run. This is a rough
# approximation of "an iteration". Using crash and mainLoop here
# means that it's safe (as safe as anything using "iterate" can be) to
# do this repeatedly.
self.callLater(0, self.crash)
self.mainLoop()
def install(runLoop=None, runner=None):
"""
Configure the twisted mainloop to be run inside CFRunLoop.
@param runLoop: the run loop to use.
@param runner: the function to call in order to actually invoke the main
loop. This will default to C{CFRunLoopRun} if not specified. However,
this is not an appropriate choice for GUI applications, as you need to
run NSApplicationMain (or something like it). For example, to run the
Twisted mainloop in a PyObjC application, your C{main.py} should look
something like this::
from PyObjCTools import AppHelper
from twisted.internet.cfreactor import install
install(runner=AppHelper.runEventLoop)
# initialize your application
reactor.run()
@return: The installed reactor.
@rtype: C{CFReactor}
"""
reactor = CFReactor(runLoop=runLoop, runner=runner)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor

View File

@@ -0,0 +1,55 @@
# -*- test-case-name: twisted.internet.test.test_default -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The most suitable default reactor for the current platform.
Depending on a specific application's needs, some other reactor may in
fact be better.
"""
__all__ = ["install"]
from twisted.python.runtime import platform
def _getInstallFunction(platform):
"""
Return a function to install the reactor most suited for the given platform.
@param platform: The platform for which to select a reactor.
@type platform: L{twisted.python.runtime.Platform}
@return: A zero-argument callable which will install the selected
reactor.
"""
# Linux: epoll(7) is the default, since it scales well.
#
# macOS: poll(2) is not exposed by Python because it doesn't support all
# file descriptors (in particular, lack of PTY support is a problem) --
# see <http://bugs.python.org/issue5154>. kqueue has the same restrictions
# as poll(2) as far PTY support goes.
#
# Windows: IOCP should eventually be default, but still has some serious
# bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
#
# We therefore choose epoll(7) on Linux, poll(2) on other non-macOS POSIX
# platforms, and select(2) everywhere else.
try:
if platform.isLinux():
try:
from twisted.internet.epollreactor import install
except ImportError:
from twisted.internet.pollreactor import install
elif platform.getType() == "posix" and not platform.isMacOSX():
from twisted.internet.pollreactor import install
else:
from twisted.internet.selectreactor import install
except ImportError:
from twisted.internet.selectreactor import install
return install
install = _getInstallFunction(platform)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,259 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An epoll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import epollreactor
epollreactor.install()
"""
import errno
import select
from zope.interface import implementer
from twisted.internet import posixbase
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import log
try:
# This is to keep mypy from complaining
# We don't use type: ignore[attr-defined] on import, because mypy only complains
# on on some platforms, and then the unused ignore is an issue if the undefined
# attribute isn't.
epoll = getattr(select, "epoll")
EPOLLHUP = getattr(select, "EPOLLHUP")
EPOLLERR = getattr(select, "EPOLLERR")
EPOLLIN = getattr(select, "EPOLLIN")
EPOLLOUT = getattr(select, "EPOLLOUT")
except AttributeError as e:
raise ImportError(e)
@implementer(IReactorFDSet)
class EPollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
A reactor that uses epoll(7).
@ivar _poller: A C{epoll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of C{FileDescriptor} which have been registered with the
reactor. All C{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A set containing integer file descriptors. Values in this
set will be registered with C{_poller} for read readiness notifications
which will be dispatched to the corresponding C{FileDescriptor}
instances in C{_selectables}.
@ivar _writes: A set containing integer file descriptors. Values in this
set will be registered with C{_poller} for write readiness
notifications which will be dispatched to the corresponding
C{FileDescriptor} instances in C{_selectables}.
@ivar _continuousPolling: A L{_ContinuousPolling} instance, used to handle
file descriptors (e.g. filesystem files) that are not supported by
C{epoll(7)}.
"""
# Attributes for _PollLikeMixin
_POLL_DISCONNECTED = EPOLLHUP | EPOLLERR
_POLL_IN = EPOLLIN
_POLL_OUT = EPOLLOUT
def __init__(self):
"""
Initialize epoll object, file descriptor tracking dictionaries, and the
base class.
"""
# Create the poller we're going to use. The 1024 here is just a hint
# to the kernel, it is not a hard maximum. After Linux 2.6.8, the size
# argument is completely ignored.
self._poller = epoll(1024)
self._reads = set()
self._writes = set()
self._selectables = {}
self._continuousPolling = posixbase._ContinuousPolling(self)
posixbase.PosixReactorBase.__init__(self)
def _add(self, xer, primary, other, selectables, event, antievent):
"""
Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example).
"""
fd = xer.fileno()
if fd not in primary:
flags = event
# epoll_ctl can raise all kinds of IOErrors, and every one
# indicates a bug either in the reactor or application-code.
# Let them all through so someone sees a traceback and fixes
# something. We'll do the same thing for every other call to
# this method in this file.
if fd in other:
flags |= antievent
self._poller.modify(fd, flags)
else:
self._poller.register(fd, flags)
# Update our own tracking state *only* after the epoll call has
# succeeded. Otherwise we may get out of sync.
primary.add(fd)
selectables[fd] = xer
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
try:
self._add(
reader, self._reads, self._writes, self._selectables, EPOLLIN, EPOLLOUT
)
except OSError as e:
if e.errno == errno.EPERM:
# epoll(7) doesn't support certain file descriptors,
# e.g. filesystem files, so for those we just poll
# continuously:
self._continuousPolling.addReader(reader)
else:
raise
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
try:
self._add(
writer, self._writes, self._reads, self._selectables, EPOLLOUT, EPOLLIN
)
except OSError as e:
if e.errno == errno.EPERM:
# epoll(7) doesn't support certain file descriptors,
# e.g. filesystem files, so for those we just poll
# continuously:
self._continuousPolling.addWriter(writer)
else:
raise
def _remove(self, xer, primary, other, selectables, event, antievent):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
fd = xer.fileno()
if fd == -1:
for fd, fdes in selectables.items():
if xer is fdes:
break
else:
return
if fd in primary:
if fd in other:
flags = antievent
# See comment above modify call in _add.
self._poller.modify(fd, flags)
else:
del selectables[fd]
# See comment above _control call in _add.
self._poller.unregister(fd)
primary.remove(fd)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
if self._continuousPolling.isReading(reader):
self._continuousPolling.removeReader(reader)
return
self._remove(
reader, self._reads, self._writes, self._selectables, EPOLLIN, EPOLLOUT
)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
if self._continuousPolling.isWriting(writer):
self._continuousPolling.removeWriter(writer)
return
self._remove(
writer, self._writes, self._reads, self._selectables, EPOLLOUT, EPOLLIN
)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return (
self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes],
)
+ self._continuousPolling.removeAll()
)
def getReaders(self):
return [
self._selectables[fd] for fd in self._reads
] + self._continuousPolling.getReaders()
def getWriters(self):
return [
self._selectables[fd] for fd in self._writes
] + self._continuousPolling.getWriters()
def doPoll(self, timeout):
"""
Poll the poller for new events.
"""
if timeout is None:
timeout = -1 # Wait indefinitely.
try:
# Limit the number of events to the number of io objects we're
# currently tracking (because that's maybe a good heuristic) and
# the amount of time we block to the value specified by our
# caller.
l = self._poller.poll(timeout, len(self._selectables))
except OSError as err:
if err.errno == errno.EINTR:
return
# See epoll_wait(2) for documentation on the other conditions
# under which this can fail. They can only be due to a serious
# programming error on our part, so let's just announce them
# loudly.
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
pass
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def install():
"""
Install the epoll() reactor.
"""
p = EPollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["EPollReactor", "install"]

View File

@@ -0,0 +1,510 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exceptions and errors for use in twisted.internet modules.
"""
import socket
from incremental import Version
from twisted.python import deprecate
class BindError(Exception):
__doc__ = MESSAGE = "An error occurred binding to an interface"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class CannotListenError(BindError):
"""
This gets raised by a call to startListening, when the object cannotstart
listening.
@ivar interface: the interface I tried to listen on
@ivar port: the port I tried to listen on
@ivar socketError: the exception I got when I tried to listen
@type socketError: L{socket.error}
"""
def __init__(self, interface, port, socketError):
BindError.__init__(self, interface, port, socketError)
self.interface = interface
self.port = port
self.socketError = socketError
def __str__(self) -> str:
iface = self.interface or "any"
return "Couldn't listen on {}:{}: {}.".format(
iface, self.port, self.socketError
)
class MulticastJoinError(Exception):
"""
An attempt to join a multicast group failed.
"""
class MessageLengthError(Exception):
__doc__ = MESSAGE = "Message is too long to send"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class DNSLookupError(IOError):
__doc__ = MESSAGE = "DNS lookup failed"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class ConnectInProgressError(Exception):
"""A connect operation was started and isn't done yet."""
# connection errors
class ConnectError(Exception):
__doc__ = MESSAGE = "An error occurred while connecting"
def __init__(self, osError=None, string=""):
self.osError = osError
Exception.__init__(self, string)
def __str__(self) -> str:
s = self.MESSAGE
if self.osError:
s = f"{s}: {self.osError}"
if self.args[0]:
s = f"{s}: {self.args[0]}"
s = "%s." % s
return s
class ConnectBindError(ConnectError):
__doc__ = MESSAGE = "Couldn't bind"
class UnknownHostError(ConnectError):
__doc__ = MESSAGE = "Hostname couldn't be looked up"
class NoRouteError(ConnectError):
__doc__ = MESSAGE = "No route to host"
class ConnectionRefusedError(ConnectError):
__doc__ = MESSAGE = "Connection was refused by other side"
class TCPTimedOutError(ConnectError):
__doc__ = MESSAGE = "TCP connection timed out"
class BadFileError(ConnectError):
__doc__ = MESSAGE = "File used for UNIX socket is no good"
class ServiceNameUnknownError(ConnectError):
__doc__ = MESSAGE = "Service name given as port is unknown"
class UserError(ConnectError):
__doc__ = MESSAGE = "User aborted connection"
class TimeoutError(UserError):
__doc__ = MESSAGE = "User timeout caused connection failure"
class SSLError(ConnectError):
__doc__ = MESSAGE = "An SSL error occurred"
class VerifyError(Exception):
__doc__ = MESSAGE = "Could not verify something that was supposed to be signed."
class PeerVerifyError(VerifyError):
__doc__ = MESSAGE = "The peer rejected our verify error."
class CertificateError(Exception):
__doc__ = MESSAGE = "We did not find a certificate where we expected to find one."
try:
import errno
errnoMapping = {
errno.ENETUNREACH: NoRouteError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ETIMEDOUT: TCPTimedOutError,
}
if hasattr(errno, "WSAECONNREFUSED"):
errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
errnoMapping[errno.WSAENETUNREACH] = NoRouteError # type: ignore[attr-defined]
except ImportError:
errnoMapping = {}
def getConnectError(e):
"""Given a socket exception, return connection error."""
if isinstance(e, Exception):
args = e.args
else:
args = e
try:
number, string = args
except ValueError:
return ConnectError(string=e)
if hasattr(socket, "gaierror") and isinstance(e, socket.gaierror):
# Only works in 2.2 in newer. Really that means always; #5978 covers
# this and other weirdnesses in this function.
klass = UnknownHostError
else:
klass = errnoMapping.get(number, ConnectError)
return klass(number, string)
class ConnectionClosed(Exception):
"""
Connection was closed, whether cleanly or non-cleanly.
"""
class ConnectionLost(ConnectionClosed):
__doc__ = MESSAGE = """
Connection to the other side was lost in a non-clean fashion
"""
def __str__(self) -> str:
s = self.MESSAGE.strip().splitlines()[:1]
if self.args:
s.append(": ")
s.append(" ".join(self.args))
s.append(".")
return "".join(s)
class ConnectionAborted(ConnectionLost):
"""
Connection was aborted locally, using
L{twisted.internet.interfaces.ITCPTransport.abortConnection}.
@since: 11.1
"""
MESSAGE = "Connection was aborted locally using " "ITCPTransport.abortConnection"
class ConnectionDone(ConnectionClosed):
__doc__ = MESSAGE = "Connection was closed cleanly"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class FileDescriptorOverrun(ConnectionLost):
"""
A mis-use of L{IUNIXTransport.sendFileDescriptor} caused the connection to
be closed.
Each file descriptor sent using C{sendFileDescriptor} must be associated
with at least one byte sent using L{ITransport.write}. If at any point
fewer bytes have been written than file descriptors have been sent, the
connection is closed with this exception.
"""
MESSAGE = (
"A mis-use of IUNIXTransport.sendFileDescriptor caused "
"the connection to be closed."
)
class ConnectionFdescWentAway(ConnectionLost):
__doc__ = MESSAGE = "Uh" # TODO
class AlreadyCalled(ValueError):
__doc__ = MESSAGE = "Tried to cancel an already-called event"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class AlreadyCancelled(ValueError):
__doc__ = MESSAGE = "Tried to cancel an already-cancelled event"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class PotentialZombieWarning(Warning):
"""
Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
result in termination of the created child process not being reported.
Deprecated in Twisted 10.0.
"""
MESSAGE = (
"spawnProcess called, but the SIGCHLD handler is not "
"installed. This probably means you have not yet "
"called reactor.run, or called "
"reactor.run(installSignalHandler=0). You will probably "
"never see this process finish, and it may become a "
"zombie process."
)
deprecate.deprecatedModuleAttribute(
Version("Twisted", 10, 0, 0),
"There is no longer any potential for zombie process.",
__name__,
"PotentialZombieWarning",
)
class ProcessDone(ConnectionDone):
__doc__ = MESSAGE = "A process has ended without apparent errors"
def __init__(self, status):
Exception.__init__(self, "process finished with exit code 0")
self.exitCode = 0
self.signal = None
self.status = status
class ProcessTerminated(ConnectionLost):
__doc__ = MESSAGE = """
A process has ended with a probable error condition
@ivar exitCode: See L{__init__}
@ivar signal: See L{__init__}
@ivar status: See L{__init__}
"""
def __init__(self, exitCode=None, signal=None, status=None):
"""
@param exitCode: The exit status of the process. This is roughly like
the value you might pass to L{os._exit}. This is L{None} if the
process exited due to a signal.
@type exitCode: L{int} or L{None}
@param signal: The exit signal of the process. This is L{None} if the
process did not exit due to a signal.
@type signal: L{int} or L{None}
@param status: The exit code of the process. This is a platform
specific combination of the exit code and the exit signal. See
L{os.WIFEXITED} and related functions.
@type status: L{int}
"""
self.exitCode = exitCode
self.signal = signal
self.status = status
s = "process ended"
if exitCode is not None:
s = s + " with exit code %s" % exitCode
if signal is not None:
s = s + " by signal %s" % signal
Exception.__init__(self, s)
class ProcessExitedAlready(Exception):
"""
The process has already exited and the operation requested can no longer
be performed.
"""
class NotConnectingError(RuntimeError):
__doc__ = (
MESSAGE
) = "The Connector was not connecting when it was asked to stop connecting"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class NotListeningError(RuntimeError):
__doc__ = MESSAGE = "The Port was not listening when it was asked to stop listening"
def __str__(self) -> str:
s = self.MESSAGE
if self.args:
s = "{}: {}".format(s, " ".join(self.args))
s = "%s." % s
return s
class ReactorNotRunning(RuntimeError):
"""
Error raised when trying to stop a reactor which is not running.
"""
class ReactorNotRestartable(RuntimeError):
"""
Error raised when trying to run a reactor which was stopped.
"""
class ReactorAlreadyRunning(RuntimeError):
"""
Error raised when trying to start the reactor multiple times.
"""
class ReactorAlreadyInstalledError(AssertionError):
"""
Could not install reactor because one is already installed.
"""
class ConnectingCancelledError(Exception):
"""
An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
cancelled before it connects.
@ivar address: The L{IAddress} that is the destination of the
cancelled L{IStreamClientEndpoint}.
"""
def __init__(self, address):
"""
@param address: The L{IAddress} that is the destination of the
L{IStreamClientEndpoint} that was cancelled.
"""
Exception.__init__(self, address)
self.address = address
class NoProtocol(Exception):
"""
An C{Exception} that will be raised when the factory given to a
L{IStreamClientEndpoint} returns L{None} from C{buildProtocol}.
"""
class UnsupportedAddressFamily(Exception):
"""
An attempt was made to use a socket with an address family (eg I{AF_INET},
I{AF_INET6}, etc) which is not supported by the reactor.
"""
class UnsupportedSocketType(Exception):
"""
An attempt was made to use a socket of a type (eg I{SOCK_STREAM},
I{SOCK_DGRAM}, etc) which is not supported by the reactor.
"""
class AlreadyListened(Exception):
"""
An attempt was made to listen on a file descriptor which can only be
listened on once.
"""
class InvalidAddressError(ValueError):
"""
An invalid address was specified (i.e. neither IPv4 or IPv6, or expected
one and got the other).
@ivar address: See L{__init__}
@ivar message: See L{__init__}
"""
def __init__(self, address, message):
"""
@param address: The address that was provided.
@type address: L{bytes}
@param message: A native string of additional information provided by
the calling context.
@type address: L{str}
"""
self.address = address
self.message = message
__all__ = [
"BindError",
"CannotListenError",
"MulticastJoinError",
"MessageLengthError",
"DNSLookupError",
"ConnectInProgressError",
"ConnectError",
"ConnectBindError",
"UnknownHostError",
"NoRouteError",
"ConnectionRefusedError",
"TCPTimedOutError",
"BadFileError",
"ServiceNameUnknownError",
"UserError",
"TimeoutError",
"SSLError",
"VerifyError",
"PeerVerifyError",
"CertificateError",
"getConnectError",
"ConnectionClosed",
"ConnectionLost",
"ConnectionDone",
"ConnectionFdescWentAway",
"AlreadyCalled",
"AlreadyCancelled",
"PotentialZombieWarning",
"ProcessDone",
"ProcessTerminated",
"ProcessExitedAlready",
"NotConnectingError",
"NotListeningError",
"ReactorNotRunning",
"ReactorAlreadyRunning",
"ReactorAlreadyInstalledError",
"ConnectingCancelledError",
"UnsupportedAddressFamily",
"UnsupportedSocketType",
"InvalidAddressError",
]

View File

@@ -0,0 +1,121 @@
# -*- test-case-name: twisted.test.test_fdesc -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utility functions for dealing with POSIX file descriptors.
"""
import errno
import os
try:
import fcntl as _fcntl
except ImportError:
fcntl = None
else:
fcntl = _fcntl
# twisted imports
from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
def setNonBlocking(fd):
"""
Set the file description of the given file descriptor to non-blocking.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def setBlocking(fd):
"""
Set the file description of the given file descriptor to blocking.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags & ~os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
if fcntl is None:
# fcntl isn't available on Windows. By default, handles aren't
# inherited on Windows, so we can do nothing here.
_setCloseOnExec = _unsetCloseOnExec = lambda fd: None
else:
def _setCloseOnExec(fd):
"""
Make a file descriptor close-on-exec.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags = flags | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def _unsetCloseOnExec(fd):
"""
Make a file descriptor close-on-exec.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags = flags & ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def readFromFD(fd, callback):
"""
Read from file descriptor, calling callback with resulting data.
If successful, call 'callback' with a single argument: the
resulting data.
Returns same thing FileDescriptor.doRead would: CONNECTION_LOST,
CONNECTION_DONE, or None.
@type fd: C{int}
@param fd: non-blocking file descriptor to be read from.
@param callback: a callable which accepts a single argument. If
data is read from the file descriptor it will be called with this
data. Handling exceptions from calling the callback is up to the
caller.
Note that if the descriptor is still connected but no data is read,
None will be returned but callback will not be called.
@return: CONNECTION_LOST on error, CONNECTION_DONE when fd is
closed, otherwise None.
"""
try:
output = os.read(fd, 8192)
except OSError as ioe:
if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
return
else:
return CONNECTION_LOST
if not output:
return CONNECTION_DONE
callback(output)
def writeToFD(fd, data):
"""
Write data to file descriptor.
Returns same thing FileDescriptor.writeSomeData would.
@type fd: C{int}
@param fd: non-blocking file descriptor to be written to.
@type data: C{str} or C{buffer}
@param data: bytes to write to fd.
@return: number of bytes written, or CONNECTION_LOST.
"""
try:
return os.write(fd, data)
except OSError as io:
if io.errno in (errno.EAGAIN, errno.EINTR):
return 0
return CONNECTION_LOST
__all__ = ["setNonBlocking", "setBlocking", "readFromFD", "writeToFD"]

View File

@@ -0,0 +1,122 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib
mainloop via GObject Introspection.
In order to use this support, simply do the following::
from twisted.internet import gireactor
gireactor.install()
If you wish to use a GApplication, register it with the reactor::
from twisted.internet import reactor
reactor.registerGApplication(app)
Then use twisted.internet APIs as usual.
On Python 3, pygobject v3.4 or later is required.
"""
from typing import Union
from gi.repository import GLib
from twisted.internet import _glibbase
from twisted.internet.error import ReactorAlreadyRunning
from twisted.python import runtime
if getattr(GLib, "threads_init", None) is not None:
GLib.threads_init()
class GIReactor(_glibbase.GlibReactorBase):
"""
GObject-introspection event loop reactor.
@ivar _gapplication: A C{Gio.Application} instance that was registered
with C{registerGApplication}.
"""
# By default no Application is registered:
_gapplication = None
def __init__(self, useGtk=False):
_glibbase.GlibReactorBase.__init__(self, GLib, None)
def registerGApplication(self, app):
"""
Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
will be used instead of the default one.
We will C{hold} the application so it doesn't exit on its own. In
versions of C{python-gi} 3.2 and later, we exit the event loop using
the C{app.quit} method which overrides any holds. Older versions are
not supported.
"""
if self._gapplication is not None:
raise RuntimeError("Can't register more than one application instance.")
if self._started:
raise ReactorAlreadyRunning(
"Can't register application after reactor was started."
)
if not hasattr(app, "quit"):
raise RuntimeError(
"Application registration is not supported in"
" versions of PyGObject prior to 3.2."
)
self._gapplication = app
def run():
app.hold()
app.run(None)
self._run = run
self._crash = app.quit
class PortableGIReactor(_glibbase.GlibReactorBase):
"""
Portable GObject Introspection event loop reactor.
"""
def __init__(self, useGtk=False):
super().__init__(GLib, None, useGtk=useGtk)
def registerGApplication(self, app):
"""
Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
will be used instead of the default one.
"""
raise NotImplementedError("GApplication is not currently supported on Windows.")
def simulate(self) -> None:
"""
For compatibility only. Do nothing.
"""
def install(useGtk: bool = False) -> Union[GIReactor, PortableGIReactor]:
"""
Configure the twisted mainloop to be run inside the glib mainloop.
@param useGtk: A hint that the Gtk GUI will or will not be used. Currently
does not modify any behavior.
"""
reactor: Union[GIReactor, PortableGIReactor]
if runtime.platform.getType() == "posix":
reactor = GIReactor(useGtk=useGtk)
else:
reactor = PortableGIReactor(useGtk=useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ["install"]

View File

@@ -0,0 +1,50 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib mainloop.
This is like gtk2, but slightly faster and does not require a working
$DISPLAY. However, you cannot run GUIs under this reactor: for that you must
use the gtk2reactor instead.
In order to use this support, simply do the following::
from twisted.internet import glib2reactor
glib2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
"""
from incremental import Version
from ._deprecate import deprecatedGnomeReactor
deprecatedGnomeReactor("glib2reactor", Version("Twisted", 23, 8, 0))
from twisted.internet import gtk2reactor
class Glib2Reactor(gtk2reactor.Gtk2Reactor):
"""
The reactor using the glib mainloop.
"""
def __init__(self):
"""
Override init to set the C{useGtk} flag.
"""
gtk2reactor.Gtk2Reactor.__init__(self, useGtk=False)
def install():
"""
Configure the twisted mainloop to be run inside the glib mainloop.
"""
reactor = Glib2Reactor()
from twisted.internet.main import installReactor
installReactor(reactor)
__all__ = ["install"]

View File

@@ -0,0 +1,119 @@
# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2
mainloop.
In order to use this support, simply do the following::
from twisted.internet import gtk2reactor
gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
"""
from incremental import Version
from ._deprecate import deprecatedGnomeReactor
deprecatedGnomeReactor("gtk2reactor", Version("Twisted", 23, 8, 0))
# System Imports
import sys
# Twisted Imports
from twisted.internet import _glibbase
from twisted.python import runtime
# Certain old versions of pygtk and gi crash if imported at the same
# time. This is a problem when running Twisted's unit tests, since they will
# attempt to run both gtk2 and gtk3/gi tests. However, gireactor makes sure
# that if we are in such an old version, and gireactor was imported,
# gtk2reactor will not be importable. So we don't *need* to enforce that here
# as well; whichever is imported first will still win. Moreover, additional
# enforcement in this module is unnecessary in modern versions, and downright
# problematic in certain versions where for some reason importing gtk also
# imports some subset of gi. So we do nothing here, relying on gireactor to
# prevent the crash.
try:
if not hasattr(sys, "frozen"):
# Don't want to check this for py2exe
import pygtk
pygtk.require("2.0")
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if not hasattr(gobject, "IO_HUP"):
# gi.repository's legacy compatibility helper raises an AttributeError with
# a custom error message rather than a useful ImportError, so things tend
# to fail loudly. Things that import this module expect an ImportError if,
# well, something failed to import, and treat an AttributeError as an
# arbitrary application code failure, so we satisfy that expectation here.
raise ImportError("pygobject 2.x is not installed. Use the `gi` reactor.")
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
class Gtk2Reactor(_glibbase.GlibReactorBase):
"""
PyGTK+ 2 event loop reactor.
"""
def __init__(self, useGtk=True):
_gtk = None
if useGtk is True:
import gtk as _gtk
_glibbase.GlibReactorBase.__init__(self, gobject, _gtk, useGtk=useGtk)
# We don't bother deprecating the PortableGtkReactor.
# The original code was removed and replaced with the
# backward compatible generic GTK reactor.
PortableGtkReactor = Gtk2Reactor
def install(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
def portableInstall(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
if runtime.platform.getType() == "posix":
install = install
else:
install = portableInstall
__all__ = ["install"]

View File

@@ -0,0 +1,22 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module is a legacy compatibility alias for L{twisted.internet.gireactor}.
See that module instead.
"""
from incremental import Version
from ._deprecate import deprecatedGnomeReactor
deprecatedGnomeReactor("gtk3reactor", Version("Twisted", 23, 8, 0))
from twisted.internet import gireactor
Gtk3Reactor = gireactor.GIReactor
PortableGtk3Reactor = gireactor.PortableGIReactor
install = gireactor.install
__all__ = ["install"]

View File

@@ -0,0 +1,426 @@
# -*- test-case-name: twisted.internet.test.test_inotify -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to linux inotify API.
In order to use this support, simply do the following (and start a reactor
at some point)::
from twisted.internet import inotify
from twisted.python import filepath
def notify(ignored, filepath, mask):
\"""
For historical reasons, an opaque handle is passed as first
parameter. This object should never be used.
@param filepath: FilePath on which the event happened.
@param mask: inotify event as hexadecimal masks
\"""
print("event %s on %s" % (
', '.join(inotify.humanReadableMask(mask)), filepath))
notifier = inotify.INotify()
notifier.startReading()
notifier.watch(filepath.FilePath("/some/directory"), callbacks=[notify])
notifier.watch(filepath.FilePath(b"/some/directory2"), callbacks=[notify])
Note that in the above example, a L{FilePath} which is a L{bytes} path name
or L{str} path name may be used. However, no matter what type of
L{FilePath} is passed to this module, internally the L{FilePath} is
converted to L{bytes} according to L{sys.getfilesystemencoding}.
For any L{FilePath} returned by this module, the caller is responsible for
converting from a L{bytes} path name to a L{str} path name.
@since: 10.1
"""
import os
import struct
from twisted.internet import fdesc
from twisted.internet.abstract import FileDescriptor
from twisted.python import _inotify, log
# from /usr/src/linux/include/linux/inotify.h
IN_ACCESS = 0x00000001 # File was accessed
IN_MODIFY = 0x00000002 # File was modified
IN_ATTRIB = 0x00000004 # Metadata changed
IN_CLOSE_WRITE = 0x00000008 # Writeable file was closed
IN_CLOSE_NOWRITE = 0x00000010 # Unwriteable file closed
IN_OPEN = 0x00000020 # File was opened
IN_MOVED_FROM = 0x00000040 # File was moved from X
IN_MOVED_TO = 0x00000080 # File was moved to Y
IN_CREATE = 0x00000100 # Subfile was created
IN_DELETE = 0x00000200 # Subfile was delete
IN_DELETE_SELF = 0x00000400 # Self was deleted
IN_MOVE_SELF = 0x00000800 # Self was moved
IN_UNMOUNT = 0x00002000 # Backing fs was unmounted
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed
IN_IGNORED = 0x00008000 # File was ignored
IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory
IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link
IN_MASK_ADD = 0x20000000 # add to the mask of an already existing watch
IN_ISDIR = 0x40000000 # event occurred against dir
IN_ONESHOT = 0x80000000 # only send event once
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # closes
IN_MOVED = IN_MOVED_FROM | IN_MOVED_TO # moves
IN_CHANGED = IN_MODIFY | IN_ATTRIB # changes
IN_WATCH_MASK = (
IN_MODIFY
| IN_ATTRIB
| IN_CREATE
| IN_DELETE
| IN_DELETE_SELF
| IN_MOVE_SELF
| IN_UNMOUNT
| IN_MOVED_FROM
| IN_MOVED_TO
)
_FLAG_TO_HUMAN = [
(IN_ACCESS, "access"),
(IN_MODIFY, "modify"),
(IN_ATTRIB, "attrib"),
(IN_CLOSE_WRITE, "close_write"),
(IN_CLOSE_NOWRITE, "close_nowrite"),
(IN_OPEN, "open"),
(IN_MOVED_FROM, "moved_from"),
(IN_MOVED_TO, "moved_to"),
(IN_CREATE, "create"),
(IN_DELETE, "delete"),
(IN_DELETE_SELF, "delete_self"),
(IN_MOVE_SELF, "move_self"),
(IN_UNMOUNT, "unmount"),
(IN_Q_OVERFLOW, "queue_overflow"),
(IN_IGNORED, "ignored"),
(IN_ONLYDIR, "only_dir"),
(IN_DONT_FOLLOW, "dont_follow"),
(IN_MASK_ADD, "mask_add"),
(IN_ISDIR, "is_dir"),
(IN_ONESHOT, "one_shot"),
]
def humanReadableMask(mask):
"""
Auxiliary function that converts a hexadecimal mask into a series
of human readable flags.
"""
s = []
for k, v in _FLAG_TO_HUMAN:
if k & mask:
s.append(v)
return s
class _Watch:
"""
Watch object that represents a Watch point in the filesystem. The
user should let INotify to create these objects
@ivar path: The path over which this watch point is monitoring
@ivar mask: The events monitored by this watchpoint
@ivar autoAdd: Flag that determines whether this watch point
should automatically add created subdirectories
@ivar callbacks: L{list} of callback functions that will be called
when an event occurs on this watch.
"""
def __init__(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None):
self.path = path.asBytesMode()
self.mask = mask
self.autoAdd = autoAdd
if callbacks is None:
callbacks = []
self.callbacks = callbacks
def _notify(self, filepath, events):
"""
Callback function used by L{INotify} to dispatch an event.
"""
filepath = filepath.asBytesMode()
for callback in self.callbacks:
callback(self, filepath, events)
class INotify(FileDescriptor):
"""
The INotify file descriptor, it basically does everything related
to INotify, from reading to notifying watch points.
@ivar _buffer: a L{bytes} containing the data read from the inotify fd.
@ivar _watchpoints: a L{dict} that maps from inotify watch ids to
watchpoints objects
@ivar _watchpaths: a L{dict} that maps from watched paths to the
inotify watch ids
"""
_inotify = _inotify
def __init__(self, reactor=None):
FileDescriptor.__init__(self, reactor=reactor)
# Smart way to allow parametrization of libc so I can override
# it and test for the system errors.
self._fd = self._inotify.init()
fdesc.setNonBlocking(self._fd)
fdesc._setCloseOnExec(self._fd)
# The next 2 lines are needed to have self.loseConnection()
# to call connectionLost() on us. Since we already created the
# fd that talks to inotify we want to be notified even if we
# haven't yet started reading.
self.connected = 1
self._writeDisconnected = True
self._buffer = b""
self._watchpoints = {}
self._watchpaths = {}
def _addWatch(self, path, mask, autoAdd, callbacks):
"""
Private helper that abstracts the use of ctypes.
Calls the internal inotify API and checks for any errors after the
call. If there's an error L{INotify._addWatch} can raise an
INotifyError. If there's no error it proceeds creating a watchpoint and
adding a watchpath for inverse lookup of the file descriptor from the
path.
"""
path = path.asBytesMode()
wd = self._inotify.add(self._fd, path, mask)
iwp = _Watch(path, mask, autoAdd, callbacks)
self._watchpoints[wd] = iwp
self._watchpaths[path] = wd
return wd
def _rmWatch(self, wd):
"""
Private helper that abstracts the use of ctypes.
Calls the internal inotify API to remove an fd from inotify then
removes the corresponding watchpoint from the internal mapping together
with the file descriptor from the watchpath.
"""
self._inotify.remove(self._fd, wd)
iwp = self._watchpoints.pop(wd)
self._watchpaths.pop(iwp.path)
def connectionLost(self, reason):
"""
Release the inotify file descriptor and do the necessary cleanup
"""
FileDescriptor.connectionLost(self, reason)
if self._fd >= 0:
try:
os.close(self._fd)
except OSError as e:
log.err(e, "Couldn't close INotify file descriptor.")
def fileno(self):
"""
Get the underlying file descriptor from this inotify observer.
Required by L{abstract.FileDescriptor} subclasses.
"""
return self._fd
def doRead(self):
"""
Read some data from the observed file descriptors
"""
fdesc.readFromFD(self._fd, self._doRead)
def _doRead(self, in_):
"""
Work on the data just read from the file descriptor.
"""
self._buffer += in_
while len(self._buffer) >= 16:
wd, mask, cookie, size = struct.unpack("=LLLL", self._buffer[0:16])
if size:
name = self._buffer[16 : 16 + size].rstrip(b"\0")
else:
name = None
self._buffer = self._buffer[16 + size :]
try:
iwp = self._watchpoints[wd]
except KeyError:
continue
path = iwp.path.asBytesMode()
if name:
path = path.child(name)
iwp._notify(path, mask)
if iwp.autoAdd and mask & IN_ISDIR and mask & IN_CREATE:
# mask & IN_ISDIR already guarantees that the path is a
# directory. There's no way you can get here without a
# directory anyway, so no point in checking for that again.
new_wd = self.watch(
path, mask=iwp.mask, autoAdd=True, callbacks=iwp.callbacks
)
# This is very very very hacky and I'd rather not do this but
# we have no other alternative that is less hacky other than
# surrender. We use callLater because we don't want to have
# too many events waiting while we process these subdirs, we
# must always answer events as fast as possible or the overflow
# might come.
self.reactor.callLater(0, self._addChildren, self._watchpoints[new_wd])
if mask & IN_DELETE_SELF:
self._rmWatch(wd)
self.loseConnection()
def _addChildren(self, iwp):
"""
This is a very private method, please don't even think about using it.
Note that this is a fricking hack... it's because we cannot be fast
enough in adding a watch to a directory and so we basically end up
getting here too late if some operations have already been going on in
the subdir, we basically need to catchup. This eventually ends up
meaning that we generate double events, your app must be resistant.
"""
try:
listdir = iwp.path.children()
except OSError:
# Somebody or something (like a test) removed this directory while
# we were in the callLater(0...) waiting. It doesn't make sense to
# process it anymore
return
# note that it's true that listdir will only see the subdirs inside
# path at the moment of the call but path is monitored already so if
# something is created we will receive an event.
for f in listdir:
# It's a directory, watch it and then add its children
if f.isdir():
wd = self.watch(f, mask=iwp.mask, autoAdd=True, callbacks=iwp.callbacks)
iwp._notify(f, IN_ISDIR | IN_CREATE)
# now f is watched, we can add its children the callLater is to
# avoid recursion
self.reactor.callLater(0, self._addChildren, self._watchpoints[wd])
# It's a file and we notify it.
if f.isfile():
iwp._notify(f, IN_CREATE | IN_CLOSE_WRITE)
def watch(
self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False
):
"""
Watch the 'mask' events in given path. Can raise C{INotifyError} when
there's a problem while adding a directory.
@param path: The path needing monitoring
@type path: L{FilePath}
@param mask: The events that should be watched
@type mask: L{int}
@param autoAdd: if True automatically add newly created
subdirectories
@type autoAdd: L{bool}
@param callbacks: A list of callbacks that should be called
when an event happens in the given path.
The callback should accept 3 arguments:
(ignored, filepath, mask)
@type callbacks: L{list} of callables
@param recursive: Also add all the subdirectories in this path
@type recursive: L{bool}
"""
if recursive:
# This behavior is needed to be compatible with the windows
# interface for filesystem changes:
# http://msdn.microsoft.com/en-us/library/aa365465(VS.85).aspx
# ReadDirectoryChangesW can do bWatchSubtree so it doesn't
# make sense to implement this at a higher abstraction
# level when other platforms support it already
for child in path.walk():
if child.isdir():
self.watch(child, mask, autoAdd, callbacks, recursive=False)
else:
wd = self._isWatched(path)
if wd:
return wd
mask = mask | IN_DELETE_SELF # need this to remove the watch
return self._addWatch(path, mask, autoAdd, callbacks)
def ignore(self, path):
"""
Remove the watch point monitoring the given path
@param path: The path that should be ignored
@type path: L{FilePath}
"""
path = path.asBytesMode()
wd = self._isWatched(path)
if wd is None:
raise KeyError(f"{path!r} is not watched")
else:
self._rmWatch(wd)
def _isWatched(self, path):
"""
Helper function that checks if the path is already monitored
and returns its watchdescriptor if so or None otherwise.
@param path: The path that should be checked
@type path: L{FilePath}
"""
path = path.asBytesMode()
return self._watchpaths.get(path, None)
INotifyError = _inotify.INotifyError
__all__ = [
"INotify",
"humanReadableMask",
"IN_WATCH_MASK",
"IN_ACCESS",
"IN_MODIFY",
"IN_ATTRIB",
"IN_CLOSE_NOWRITE",
"IN_CLOSE_WRITE",
"IN_OPEN",
"IN_MOVED_FROM",
"IN_MOVED_TO",
"IN_CREATE",
"IN_DELETE",
"IN_DELETE_SELF",
"IN_MOVE_SELF",
"IN_UNMOUNT",
"IN_Q_OVERFLOW",
"IN_IGNORED",
"IN_ONLYDIR",
"IN_DONT_FOLLOW",
"IN_MASK_ADD",
"IN_ISDIR",
"IN_ONESHOT",
"IN_CLOSE",
"IN_MOVED",
"IN_CHANGED",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I/O Completion Ports reactor
"""
from twisted.internet.iocpreactor.reactor import install
__all__ = ["install"]

View File

@@ -0,0 +1,387 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Abstract file handle class
"""
import errno
from zope.interface import implementer
from twisted.internet import error, interfaces, main
from twisted.internet.abstract import _ConsumerMixin, _dataMustBeBytes, _LogOwner
from twisted.internet.iocpreactor import iocpsupport as _iocp
from twisted.internet.iocpreactor.const import ERROR_HANDLE_EOF, ERROR_IO_PENDING
from twisted.python import failure
@implementer(
interfaces.IPushProducer,
interfaces.IConsumer,
interfaces.ITransport,
interfaces.IHalfCloseableDescriptor,
)
class FileHandle(_ConsumerMixin, _LogOwner):
"""
File handle that can read and write asynchronously
"""
# read stuff
maxReadBuffers = 16
readBufferSize = 4096
reading = False
dynamicReadBuffers = True # set this to false if subclass doesn't do iovecs
_readNextBuffer = 0
_readSize = 0 # how much data we have in the read buffer
_readScheduled = None
_readScheduledInOS = False
def startReading(self):
self.reactor.addActiveHandle(self)
if not self._readScheduled and not self.reading:
self.reading = True
self._readScheduled = self.reactor.callLater(0, self._resumeReading)
def stopReading(self):
if self._readScheduled:
self._readScheduled.cancel()
self._readScheduled = None
self.reading = False
def _resumeReading(self):
self._readScheduled = None
if self._dispatchData() and not self._readScheduledInOS:
self.doRead()
def _dispatchData(self):
"""
Dispatch previously read data. Return True if self.reading and we don't
have any more data
"""
if not self._readSize:
return self.reading
size = self._readSize
full_buffers = size // self.readBufferSize
while self._readNextBuffer < full_buffers:
self.dataReceived(self._readBuffers[self._readNextBuffer])
self._readNextBuffer += 1
if not self.reading:
return False
remainder = size % self.readBufferSize
if remainder:
self.dataReceived(self._readBuffers[full_buffers][0:remainder])
if self.dynamicReadBuffers:
total_buffer_size = self.readBufferSize * len(self._readBuffers)
# we have one buffer too many
if size < total_buffer_size - self.readBufferSize:
del self._readBuffers[-1]
# we filled all buffers, so allocate one more
elif (
size == total_buffer_size
and len(self._readBuffers) < self.maxReadBuffers
):
self._readBuffers.append(bytearray(self.readBufferSize))
self._readNextBuffer = 0
self._readSize = 0
return self.reading
def _cbRead(self, rc, data, evt):
self._readScheduledInOS = False
if self._handleRead(rc, data, evt):
self.doRead()
def _handleRead(self, rc, data, evt):
"""
Returns False if we should stop reading for now
"""
if self.disconnected:
return False
# graceful disconnection
if (not (rc or data)) or rc in (errno.WSAEDISCON, ERROR_HANDLE_EOF):
self.reactor.removeActiveHandle(self)
self.readConnectionLost(failure.Failure(main.CONNECTION_DONE))
return False
# XXX: not handling WSAEWOULDBLOCK
# ("too many outstanding overlapped I/O requests")
elif rc:
self.connectionLost(
failure.Failure(
error.ConnectionLost(
"read error -- %s (%s)"
% (errno.errorcode.get(rc, "unknown"), rc)
)
)
)
return False
else:
assert self._readSize == 0
assert self._readNextBuffer == 0
self._readSize = data
return self._dispatchData()
def doRead(self):
evt = _iocp.Event(self._cbRead, self)
evt.buff = buff = self._readBuffers
rc, numBytesRead = self.readFromHandle(buff, evt)
if not rc or rc == ERROR_IO_PENDING:
self._readScheduledInOS = True
else:
self._handleRead(rc, numBytesRead, evt)
def readFromHandle(self, bufflist, evt):
raise NotImplementedError() # TODO: this should default to ReadFile
def dataReceived(self, data):
raise NotImplementedError
def readConnectionLost(self, reason):
self.connectionLost(reason)
# write stuff
dataBuffer = b""
offset = 0
writing = False
_writeScheduled = None
_writeDisconnecting = False
_writeDisconnected = False
writeBufferSize = 2**2**2**2
def loseWriteConnection(self):
self._writeDisconnecting = True
self.startWriting()
def _closeWriteConnection(self):
# override in subclasses
pass
def writeConnectionLost(self, reason):
# in current code should never be called
self.connectionLost(reason)
def startWriting(self):
self.reactor.addActiveHandle(self)
if not self._writeScheduled and not self.writing:
self.writing = True
self._writeScheduled = self.reactor.callLater(0, self._resumeWriting)
def stopWriting(self):
if self._writeScheduled:
self._writeScheduled.cancel()
self._writeScheduled = None
self.writing = False
def _resumeWriting(self):
self._writeScheduled = None
self.doWrite()
def _cbWrite(self, rc, numBytesWritten, evt):
if self._handleWrite(rc, numBytesWritten, evt):
self.doWrite()
def _handleWrite(self, rc, numBytesWritten, evt):
"""
Returns false if we should stop writing for now
"""
if self.disconnected or self._writeDisconnected:
return False
# XXX: not handling WSAEWOULDBLOCK
# ("too many outstanding overlapped I/O requests")
if rc:
self.connectionLost(
failure.Failure(
error.ConnectionLost(
"write error -- %s (%s)"
% (errno.errorcode.get(rc, "unknown"), rc)
)
)
)
return False
else:
self.offset += numBytesWritten
# If there is nothing left to send,
if self.offset == len(self.dataBuffer) and not self._tempDataLen:
self.dataBuffer = b""
self.offset = 0
# stop writing
self.stopWriting()
# If I've got a producer who is supposed to supply me with data
if self.producer is not None and (
(not self.streamingProducer) or self.producerPaused
):
# tell them to supply some more.
self.producerPaused = True
self.producer.resumeProducing()
elif self.disconnecting:
# But if I was previously asked to let the connection die,
# do so.
self.connectionLost(failure.Failure(main.CONNECTION_DONE))
elif self._writeDisconnecting:
# I was previously asked to half-close the connection.
self._writeDisconnected = True
self._closeWriteConnection()
return False
else:
return True
def doWrite(self):
if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
# If there is currently less than SEND_LIMIT bytes left to send
# in the string, extend it with the array data.
self.dataBuffer = self.dataBuffer[self.offset :] + b"".join(
self._tempDataBuffer
)
self.offset = 0
self._tempDataBuffer = []
self._tempDataLen = 0
evt = _iocp.Event(self._cbWrite, self)
# Send as much data as you can.
if self.offset:
sendView = memoryview(self.dataBuffer)
evt.buff = buff = sendView[self.offset :]
else:
evt.buff = buff = self.dataBuffer
rc, data = self.writeToHandle(buff, evt)
if rc and rc != ERROR_IO_PENDING:
self._handleWrite(rc, data, evt)
def writeToHandle(self, buff, evt):
raise NotImplementedError() # TODO: this should default to WriteFile
def write(self, data):
"""Reliably write some data.
The data is buffered until his file descriptor is ready for writing.
"""
_dataMustBeBytes(data)
if not self.connected or self._writeDisconnected:
return
if data:
self._tempDataBuffer.append(data)
self._tempDataLen += len(data)
if self.producer is not None and self.streamingProducer:
if len(self.dataBuffer) + self._tempDataLen > self.writeBufferSize:
self.producerPaused = True
self.producer.pauseProducing()
self.startWriting()
def writeSequence(self, iovec):
for i in iovec:
_dataMustBeBytes(i)
if not self.connected or not iovec or self._writeDisconnected:
return
self._tempDataBuffer.extend(iovec)
for i in iovec:
self._tempDataLen += len(i)
if self.producer is not None and self.streamingProducer:
if len(self.dataBuffer) + self._tempDataLen > self.writeBufferSize:
self.producerPaused = True
self.producer.pauseProducing()
self.startWriting()
# general stuff
connected = False
disconnected = False
disconnecting = False
logstr = "Uninitialized"
SEND_LIMIT = 128 * 1024
def __init__(self, reactor=None):
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
self._tempDataBuffer = [] # will be added to dataBuffer in doWrite
self._tempDataLen = 0
self._readBuffers = [bytearray(self.readBufferSize)]
def connectionLost(self, reason):
"""
The connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
Clean up state here, but make sure to call back up to FileDescriptor.
"""
self.disconnected = True
self.connected = False
if self.producer is not None:
self.producer.stopProducing()
self.producer = None
self.stopReading()
self.stopWriting()
self.reactor.removeActiveHandle(self)
def getFileHandle(self):
return -1
def loseConnection(self, _connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Close the connection at the next available opportunity.
Call this to cause this FileDescriptor to lose its connection. It will
first write any data that it has buffered.
If there is data buffered yet to be written, this method will cause the
transport to lose its connection as soon as it's done flushing its
write buffer. If you have a producer registered, the connection won't
be closed until the producer is finished. Therefore, make sure you
unregister your producer when it's finished, or the connection will
never close.
"""
if self.connected and not self.disconnecting:
if self._writeDisconnected:
# doWrite won't trigger the connection close anymore
self.stopReading()
self.stopWriting
self.connectionLost(_connDone)
else:
self.stopReading()
self.startWriting()
self.disconnecting = 1
# Producer/consumer implementation
def stopConsuming(self):
"""
Stop consuming data.
This is called when a producer has lost its connection, to tell the
consumer to go lose its connection (and break potential circular
references).
"""
self.unregisterProducer()
self.loseConnection()
# producer interface implementation
def resumeProducing(self):
if self.connected and not self.disconnecting:
self.startReading()
def pauseProducing(self):
self.stopReading()
def stopProducing(self):
self.loseConnection()
def getHost(self):
# ITransport.getHost
raise NotImplementedError()
def getPeer(self):
# ITransport.getPeer
raise NotImplementedError()
__all__ = ["FileHandle"]

View File

@@ -0,0 +1,25 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows constants for IOCP
"""
# this stuff should really be gotten from Windows headers via pyrex, but it
# probably is not going to change
ERROR_PORT_UNREACHABLE = 1234
ERROR_NETWORK_UNREACHABLE = 1231
ERROR_CONNECTION_REFUSED = 1225
ERROR_IO_PENDING = 997
ERROR_OPERATION_ABORTED = 995
WAIT_TIMEOUT = 258
ERROR_NETNAME_DELETED = 64
ERROR_HANDLE_EOF = 38
INFINITE = -1
SO_UPDATE_CONNECT_CONTEXT = 0x7010
SO_UPDATE_ACCEPT_CONTEXT = 0x700B

View File

@@ -0,0 +1,42 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interfaces for iocpreactor
"""
from zope.interface import Interface
class IReadHandle(Interface):
def readFromHandle(bufflist, evt):
"""
Read into the given buffers from this handle.
@param bufflist: the buffers to read into
@type bufflist: list of objects implementing the read/write buffer protocol
@param evt: an IOCP Event object
@return: tuple (return code, number of bytes read)
"""
class IWriteHandle(Interface):
def writeToHandle(buff, evt):
"""
Write the given buffer to this handle.
@param buff: the buffer to write
@type buff: any object implementing the buffer protocol
@param evt: an IOCP Event object
@return: tuple (return code, number of bytes written)
"""
class IReadWriteHandle(IReadHandle, IWriteHandle):
pass

View File

@@ -0,0 +1,27 @@
__all__ = [
"CompletionPort",
"Event",
"accept",
"connect",
"get_accept_addrs",
"have_connectex",
"makesockaddr",
"maxAddrLen",
"recv",
"recvfrom",
"send",
]
from twisted_iocpsupport.iocpsupport import ( # type: ignore[import-not-found]
CompletionPort,
Event,
accept,
connect,
get_accept_addrs,
have_connectex,
makesockaddr,
maxAddrLen,
recv,
recvfrom,
send,
)

View File

@@ -0,0 +1,24 @@
test specifically:
failed accept error message -- similar to test_tcp_internals
immediate success on accept/connect/recv, including Event.ignore
parametrize iocpsupport somehow -- via reactor?
do:
break handling -- WaitForSingleObject on the IOCP handle?
iovecs for write buffer
do not wait for a mainloop iteration if resumeProducing (in _handleWrite) does startWriting
don't addActiveHandle in every call to startWriting/startReading
iocpified process support
win32er-in-a-thread (or run GQCS in a thread -- it can't receive SIGBREAK)
blocking in sendto() -- I think Windows can do that, especially with local UDP
buildbot:
run in vmware
start from a persistent snapshot
use a stub inside the vm to svnup/run tests/collect stdio
lift logs through SMB? or ship them via tcp beams to the VM host
have a timeout on the test run
if we time out, take a screenshot, save it, kill the VM

View File

@@ -0,0 +1,285 @@
# -*- test-case-name: twisted.internet.test.test_iocp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Reactor that uses IO completion ports
"""
import socket
import sys
import warnings
from typing import Tuple, Type
from zope.interface import implementer
from twisted.internet import base, error, interfaces, main
from twisted.internet._dumbwin32proc import Process
from twisted.internet.iocpreactor import iocpsupport as _iocp, tcp, udp
from twisted.internet.iocpreactor.const import WAIT_TIMEOUT
from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
from twisted.python import failure, log
try:
from twisted.protocols.tls import TLSMemoryBIOFactory as _TLSMemoryBIOFactory
except ImportError:
TLSMemoryBIOFactory = None
# Either pyOpenSSL isn't installed, or it is too old for this code to work.
# The reactor won't provide IReactorSSL.
_extraInterfaces: Tuple[Type[interfaces.IReactorSSL], ...] = ()
warnings.warn(
"pyOpenSSL 0.10 or newer is required for SSL support in iocpreactor. "
"It is missing, so the reactor will not support SSL APIs."
)
else:
TLSMemoryBIOFactory = _TLSMemoryBIOFactory
_extraInterfaces = (interfaces.IReactorSSL,)
MAX_TIMEOUT = 2000 # 2 seconds, see doIteration for explanation
EVENTS_PER_LOOP = 1000 # XXX: what's a good value here?
# keys to associate with normal and waker events
KEY_NORMAL, KEY_WAKEUP = range(2)
_NO_GETHANDLE = error.ConnectionFdescWentAway("Handler has no getFileHandle method")
_NO_FILEDESC = error.ConnectionFdescWentAway("Filedescriptor went away")
@implementer(
interfaces.IReactorTCP,
interfaces.IReactorUDP,
interfaces.IReactorMulticast,
interfaces.IReactorProcess,
*_extraInterfaces,
)
class IOCPReactor(base.ReactorBase, _ThreadedWin32EventsMixin):
port = None
def __init__(self):
base.ReactorBase.__init__(self)
self.port = _iocp.CompletionPort()
self.handles = set()
def addActiveHandle(self, handle):
self.handles.add(handle)
def removeActiveHandle(self, handle):
self.handles.discard(handle)
def doIteration(self, timeout):
"""
Poll the IO completion port for new events.
"""
# This function sits and waits for an IO completion event.
#
# There are two requirements: process IO events as soon as they arrive
# and process ctrl-break from the user in a reasonable amount of time.
#
# There are three kinds of waiting.
# 1) GetQueuedCompletionStatus (self.port.getEvent) to wait for IO
# events only.
# 2) Msg* family of wait functions that can stop waiting when
# ctrl-break is detected (then, I think, Python converts it into a
# KeyboardInterrupt)
# 3) *Ex family of wait functions that put the thread into an
# "alertable" wait state which is supposedly triggered by IO completion
#
# 2) and 3) can be combined. Trouble is, my IO completion is not
# causing 3) to trigger, possibly because I do not use an IO completion
# callback. Windows is weird.
# There are two ways to handle this. I could use MsgWaitForSingleObject
# here and GetQueuedCompletionStatus in a thread. Or I could poll with
# a reasonable interval. Guess what! Threads are hard.
processed_events = 0
if timeout is None:
timeout = MAX_TIMEOUT
else:
timeout = min(MAX_TIMEOUT, int(1000 * timeout))
rc, numBytes, key, evt = self.port.getEvent(timeout)
while 1:
if rc == WAIT_TIMEOUT:
break
if key != KEY_WAKEUP:
assert key == KEY_NORMAL
log.callWithLogger(
evt.owner, self._callEventCallback, rc, numBytes, evt
)
processed_events += 1
if processed_events >= EVENTS_PER_LOOP:
break
rc, numBytes, key, evt = self.port.getEvent(0)
def _callEventCallback(self, rc, numBytes, evt):
owner = evt.owner
why = None
try:
evt.callback(rc, numBytes, evt)
handfn = getattr(owner, "getFileHandle", None)
if not handfn:
why = _NO_GETHANDLE
elif handfn() == -1:
why = _NO_FILEDESC
if why:
return # ignore handles that were closed
except BaseException:
why = sys.exc_info()[1]
log.err()
if why:
owner.loseConnection(failure.Failure(why))
def installWaker(self):
pass
def wakeUp(self):
self.port.postEvent(0, KEY_WAKEUP, None)
def registerHandle(self, handle):
self.port.addHandle(handle, KEY_NORMAL)
def createSocket(self, af, stype):
skt = socket.socket(af, stype)
self.registerHandle(skt.fileno())
return skt
def listenTCP(self, port, factory, backlog=50, interface=""):
"""
@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
if TLSMemoryBIOFactory is not None:
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
"""
@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
port = self.listenTCP(
port,
TLSMemoryBIOFactory(contextFactory, False, factory),
backlog,
interface,
)
port._type = "TLS"
return port
def connectSSL(
self, host, port, factory, contextFactory, timeout=30, bindAddress=None
):
"""
@see: twisted.internet.interfaces.IReactorSSL.connectSSL
"""
return self.connectTCP(
host,
port,
TLSMemoryBIOFactory(contextFactory, True, factory),
timeout,
bindAddress,
)
else:
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
"""
Non-implementation of L{IReactorSSL.listenSSL}. Some dependency
is not satisfied. This implementation always raises
L{NotImplementedError}.
"""
raise NotImplementedError(
"pyOpenSSL 0.10 or newer is required for SSL support in "
"iocpreactor. It is missing, so the reactor does not support "
"SSL APIs."
)
def connectSSL(
self, host, port, factory, contextFactory, timeout=30, bindAddress=None
):
"""
Non-implementation of L{IReactorSSL.connectSSL}. Some dependency
is not satisfied. This implementation always raises
L{NotImplementedError}.
"""
raise NotImplementedError(
"pyOpenSSL 0.10 or newer is required for SSL support in "
"iocpreactor. It is missing, so the reactor does not support "
"SSL APIs."
)
def listenUDP(self, port, protocol, interface="", maxPacketSize=8192):
"""
Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
def listenMulticast(
self, port, protocol, interface="", maxPacketSize=8192, listenMultiple=False
):
"""
Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(
port, protocol, interface, maxPacketSize, self, listenMultiple
)
p.startListening()
return p
def spawnProcess(
self,
processProtocol,
executable,
args=(),
env={},
path=None,
uid=None,
gid=None,
usePTY=0,
childFDs=None,
):
"""
Spawn a process.
"""
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("PTYs are unsupported on this platform.")
if childFDs is not None:
raise ValueError(
"Custom child file descriptor mappings are unsupported on "
"this platform."
)
return Process(self, processProtocol, executable, args, env, path)
def removeAll(self):
res = list(self.handles)
self.handles.clear()
return res
def install():
r = IOCPReactor()
main.installReactor(r)
__all__ = ["IOCPReactor", "install"]

View File

@@ -0,0 +1,623 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
TCP support for IOCP reactor
"""
from __future__ import annotations
import errno
import socket
import struct
from typing import TYPE_CHECKING, Optional, Union
from zope.interface import classImplements, implementer
from twisted.internet import address, defer, error, interfaces, main
from twisted.internet.abstract import _LogOwner, isIPv6Address
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.interfaces import IProtocol
from twisted.internet.iocpreactor import abstract, iocpsupport as _iocp
from twisted.internet.iocpreactor.const import (
ERROR_CONNECTION_REFUSED,
ERROR_IO_PENDING,
ERROR_NETWORK_UNREACHABLE,
SO_UPDATE_ACCEPT_CONTEXT,
SO_UPDATE_CONNECT_CONTEXT,
)
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.protocol import Protocol
from twisted.internet.tcp import (
Connector as TCPConnector,
_AbortingMixin,
_BaseBaseClient,
_BaseTCPClient,
_getsockname,
_resolveIPv6,
_SocketCloser,
)
from twisted.python import failure, log, reflect
try:
from twisted.internet._newtls import startTLS as __startTLS
except ImportError:
_startTLS = None
else:
_startTLS = __startTLS
if TYPE_CHECKING:
# Circular import only to describe a type.
from twisted.internet.iocpreactor.reactor import IOCPReactor
# ConnectEx returns these. XXX: find out what it does for timeout
connectExErrors = {
ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED, # type: ignore[attr-defined]
ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH, # type: ignore[attr-defined]
}
@implementer(IReadWriteHandle, interfaces.ITCPTransport, interfaces.ISystemHandle)
class Connection(abstract.FileHandle, _SocketCloser, _AbortingMixin):
"""
@ivar TLS: C{False} to indicate the connection is in normal TCP mode,
C{True} to indicate that TLS has been started and that operations must
be routed through the L{TLSMemoryBIOProtocol} instance.
"""
TLS = False
def __init__(self, sock, proto, reactor=None):
abstract.FileHandle.__init__(self, reactor)
self.socket = sock
self.getFileHandle = sock.fileno
self.protocol = proto
def getHandle(self):
return self.socket
def dataReceived(self, rbuffer):
"""
@param rbuffer: Data received.
@type rbuffer: L{bytes} or L{bytearray}
"""
if isinstance(rbuffer, bytes):
pass
elif isinstance(rbuffer, bytearray):
# XXX: some day, we'll have protocols that can handle raw buffers
rbuffer = bytes(rbuffer)
else:
raise TypeError("data must be bytes or bytearray, not " + type(rbuffer))
self.protocol.dataReceived(rbuffer)
def readFromHandle(self, bufflist, evt):
return _iocp.recv(self.getFileHandle(), bufflist, evt)
def writeToHandle(self, buff, evt):
"""
Send C{buff} to current file handle using C{_iocp.send}. The buffer
sent is limited to a size of C{self.SEND_LIMIT}.
"""
writeView = memoryview(buff)
return _iocp.send(
self.getFileHandle(), writeView[0 : self.SEND_LIMIT].tobytes(), evt
)
def _closeWriteConnection(self):
try:
self.socket.shutdown(1)
except OSError:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except BaseException:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except BaseException:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
if self.disconnected:
return
abstract.FileHandle.connectionLost(self, reason)
isClean = reason is None or not reason.check(error.ConnectionAborted)
self._closeSocket(isClean)
protocol = self.protocol
del self.protocol
del self.socket
del self.getFileHandle
protocol.connectionLost(reason)
def logPrefix(self):
"""
Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return bool(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return bool(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if _startTLS is not None:
def startTLS(self, contextFactory, normal=True):
"""
@see: L{ITLSTransport.startTLS}
"""
_startTLS(self, contextFactory, normal, abstract.FileHandle)
def write(self, data):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{twisted.internet.interfaces.ITransport.write}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.write(data)
else:
abstract.FileHandle.write(self, data)
def writeSequence(self, iovec):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{twisted.internet.interfaces.ITransport.writeSequence}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.writeSequence(iovec)
else:
abstract.FileHandle.writeSequence(self, iovec)
def loseConnection(self, reason=None):
"""
Close the underlying handle or, if TLS has been started, first shut it
down.
@see: L{twisted.internet.interfaces.ITransport.loseConnection}
"""
if self.TLS:
if self.connected and not self.disconnecting:
self.protocol.loseConnection()
else:
abstract.FileHandle.loseConnection(self, reason)
def registerProducer(self, producer, streaming):
"""
Register a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
# Registering a producer before we're connected shouldn't be a
# problem. If we end up with a write(), that's already handled in
# the write() code above, and there are no other potential
# side-effects.
self.protocol.registerProducer(producer, streaming)
else:
abstract.FileHandle.registerProducer(self, producer, streaming)
def unregisterProducer(self):
"""
Unregister a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
self.protocol.unregisterProducer()
else:
abstract.FileHandle.unregisterProducer(self)
def getHost(self):
# ITCPTransport.getHost
pass
def getPeer(self):
# ITCPTransport.getPeer
pass
if _startTLS is not None:
classImplements(Connection, interfaces.ITLSTransport)
class Client(_BaseBaseClient, _BaseTCPClient, Connection):
"""
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
connection, and by default when TLS is negotiated this class will act as
a TLS client.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_tlsClientDefault = True
_commonConnection = Connection
def __init__(self, host, port, bindAddress, connector, reactor):
# ConnectEx documentation says socket _has_ to be bound
if bindAddress is None:
bindAddress = ("", 0)
self.reactor = reactor # createInternetSocket needs this
_BaseTCPClient.__init__(self, host, port, bindAddress, connector, reactor)
def createInternetSocket(self):
"""
Create a socket registered with the IOCP reactor.
@see: L{_BaseTCPClient}
"""
return self.reactor.createSocket(self.addressFamily, self.socketType)
def _collectSocketDetails(self):
"""
Clean up potentially circular references to the socket and to its
C{getFileHandle} method.
@see: L{_BaseBaseClient}
"""
del self.socket, self.getFileHandle
def _stopReadingAndWriting(self):
"""
Remove the active handle from the reactor.
@see: L{_BaseBaseClient}
"""
self.reactor.removeActiveHandle(self)
def cbConnect(self, rc, data, evt):
if rc:
rc = connectExErrors.get(rc, rc)
self.failIfNotConnected(
error.getConnectError((rc, errno.errorcode.get(rc, "Unknown error")))
)
else:
self.socket.setsockopt(
socket.SOL_SOCKET,
SO_UPDATE_CONNECT_CONTEXT,
struct.pack("P", self.socket.fileno()),
)
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = True
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = logPrefix + ",client"
if self.protocol is None:
# Factory.buildProtocol is allowed to return None. In that
# case, make up a protocol to satisfy the rest of the
# implementation; connectionLost is going to be called on
# something, for example. This is easier than adding special
# case support for a None protocol throughout the rest of the
# transport implementation.
self.protocol = Protocol()
# But dispose of the connection quickly.
self.loseConnection()
else:
self.protocol.makeConnection(self)
self.startReading()
def doConnect(self):
if not hasattr(self, "connector"):
# this happens if we connector.stopConnecting in
# factory.startedConnecting
return
assert _iocp.have_connectex
self.reactor.addActiveHandle(self)
evt = _iocp.Event(self.cbConnect, self)
rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
if rc and rc != ERROR_IO_PENDING:
self.cbConnect(rc, 0, evt)
class Server(Connection):
"""
Serverside socket-stream connection class.
I am a serverside network connection transport; a socket which came from an
accept() on a server.
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
connection, and by default when TLS is negotiated this class will act as
a TLS server.
"""
_tlsClientDefault = False
def __init__(
self,
sock: socket.socket,
protocol: IProtocol,
clientAddr: Union[IPv4Address, IPv6Address],
serverAddr: Union[IPv4Address, IPv6Address],
sessionno: int,
reactor: IOCPReactor,
):
"""
Server(sock, protocol, client, server, sessionno)
Initialize me with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.serverAddr = serverAddr
self.clientAddr = clientAddr
self.sessionno = sessionno
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = f"{logPrefix},{sessionno},{self.clientAddr.host}"
self.repstr: str = "<{} #{} on {}>".format(
self.protocol.__class__.__name__,
self.sessionno,
self.serverAddr.port,
)
self.connected = True
self.startReading()
def __repr__(self) -> str:
"""
A string representation of this connection.
"""
return self.repstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
return self.serverAddr
def getPeer(self):
"""
Returns an IPv4Address.
This indicates the client's address.
"""
return self.clientAddr
class Connector(TCPConnector):
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self, self.reactor)
@implementer(interfaces.IListeningPort)
class Port(_SocketCloser, _LogOwner):
connected = False
disconnected = False
disconnecting = False
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_addressType = address.IPv4Address
sessionno = 0
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber: Optional[int] = None
# A string describing the connections which will be created by this port.
# Normally this is C{"TCP"}, since this is a TCP port, but when the TLS
# implementation re-uses this class it overrides the value with C{"TLS"}.
# Only used for logging.
_type = "TCP"
def __init__(self, port, factory, backlog=50, interface="", reactor=None):
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
self.reactor = reactor
if isIPv6Address(interface):
self.addressFamily = socket.AF_INET6
self._addressType = address.IPv6Address
def __repr__(self) -> str:
if self._realPortNumber is not None:
return "<{} of {} on {}>".format(
self.__class__,
self.factory.__class__,
self._realPortNumber,
)
else:
return "<{} of {} (not listening)>".format(
self.__class__,
self.factory.__class__,
)
def startListening(self):
try:
skt = self.reactor.createSocket(self.addressFamily, self.socketType)
# TODO: resolve self.interface if necessary
if self.addressFamily == socket.AF_INET6:
addr = _resolveIPv6(self.interface, self.port)
else:
addr = (self.interface, self.port)
skt.bind(addr)
except OSError as le:
raise error.CannotListenError(self.interface, self.port, le)
self.addrLen = _iocp.maxAddrLen(skt.fileno())
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg(
"%s starting on %s"
% (self._getLogPrefix(self.factory), self._realPortNumber)
)
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.disconnected = False
self.reactor.addActiveHandle(self)
self.socket = skt
self.getFileHandle = self.socket.fileno
self.doAccept()
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down my socket and call self.connectionLost().
It returns a deferred which will fire successfully when the
port is actually closed.
"""
self.disconnecting = True
if self.connected:
self.deferred = defer.Deferred()
self.reactor.callLater(0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def _logConnectionLostMsg(self):
"""
Log message for closing port
"""
log.msg(f"({self._type} Port {self._realPortNumber} Closed)")
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
self._logConnectionLostMsg()
self._realPortNumber = None
d = None
if hasattr(self, "deferred"):
d = self.deferred
del self.deferred
self.disconnected = True
self.reactor.removeActiveHandle(self)
self.connected = False
self._closeSocket(True)
del self.socket
del self.getFileHandle
try:
self.factory.doStop()
except BaseException:
self.disconnecting = False
if d is not None:
d.errback(failure.Failure())
else:
raise
else:
self.disconnecting = False
if d is not None:
d.callback(None)
def logPrefix(self):
"""
Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""
Returns an IPv4Address or IPv6Address.
This indicates the server's address.
"""
return self._addressType("TCP", *_getsockname(self.socket))
def cbAccept(self, rc, data, evt):
self.handleAccept(rc, evt)
if not (self.disconnecting or self.disconnected):
self.doAccept()
def handleAccept(self, rc, evt):
if self.disconnecting or self.disconnected:
return False
# possible errors:
# (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
if rc:
log.msg(
"Could not accept new connection -- %s (%s)"
% (errno.errorcode.get(rc, "unknown error"), rc)
)
return False
else:
# Inherit the properties from the listening port socket as
# documented in the `Remarks` section of AcceptEx.
# https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
# In this way we can call getsockname and getpeername on the
# accepted socket.
evt.newskt.setsockopt(
socket.SOL_SOCKET,
SO_UPDATE_ACCEPT_CONTEXT,
struct.pack("P", self.socket.fileno()),
)
family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(), evt.buff)
assert family == self.addressFamily
# Build an IPv6 address that includes the scopeID, if necessary
if "%" in lAddr[0]:
scope = int(lAddr[0].split("%")[1])
lAddr = (lAddr[0], lAddr[1], 0, scope)
if "%" in rAddr[0]:
scope = int(rAddr[0].split("%")[1])
rAddr = (rAddr[0], rAddr[1], 0, scope)
protocol = self.factory.buildProtocol(self._addressType("TCP", *rAddr))
if protocol is None:
evt.newskt.close()
else:
s = self.sessionno
self.sessionno = s + 1
transport = Server(
evt.newskt,
protocol,
self._addressType("TCP", *rAddr),
self._addressType("TCP", *lAddr),
s,
self.reactor,
)
protocol.makeConnection(transport)
return True
def doAccept(self):
evt = _iocp.Event(self.cbAccept, self)
# see AcceptEx documentation
evt.buff = buff = bytearray(2 * (self.addrLen + 16))
evt.newskt = newskt = self.reactor.createSocket(
self.addressFamily, self.socketType
)
rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
if rc and rc != ERROR_IO_PENDING:
self.handleAccept(rc, evt)

View File

@@ -0,0 +1,428 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
UDP support for IOCP reactor
"""
import errno
import socket
import struct
import warnings
from typing import Optional
from zope.interface import implementer
from twisted.internet import address, defer, error, interfaces
from twisted.internet.abstract import isIPAddress, isIPv6Address
from twisted.internet.iocpreactor import abstract, iocpsupport as _iocp
from twisted.internet.iocpreactor.const import (
ERROR_CONNECTION_REFUSED,
ERROR_IO_PENDING,
ERROR_PORT_UNREACHABLE,
)
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.python import failure, log
@implementer(
IReadWriteHandle,
interfaces.IListeningPort,
interfaces.IUDPTransport,
interfaces.ISystemHandle,
)
class Port(abstract.FileHandle):
"""
UDP port, listening for packets.
@ivar addressFamily: L{socket.AF_INET} or L{socket.AF_INET6}, depending on
whether this port is listening on an IPv4 address or an IPv6 address.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_DGRAM
dynamicReadBuffers = False
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber: Optional[int] = None
def __init__(self, port, proto, interface="", maxPacketSize=8192, reactor=None):
"""
Initialize with a numeric port to listen on.
"""
self.port = port
self.protocol = proto
self.readBufferSize = maxPacketSize
self.interface = interface
self.setLogStr()
self._connectedAddr = None
self._setAddressFamily()
abstract.FileHandle.__init__(self, reactor)
skt = socket.socket(self.addressFamily, self.socketType)
addrLen = _iocp.maxAddrLen(skt.fileno())
self.addressBuffer = bytearray(addrLen)
# WSARecvFrom takes an int
self.addressLengthBuffer = bytearray(struct.calcsize("i"))
def _setAddressFamily(self):
"""
Resolve address family for the socket.
"""
if isIPv6Address(self.interface):
self.addressFamily = socket.AF_INET6
elif isIPAddress(self.interface):
self.addressFamily = socket.AF_INET
elif self.interface:
raise error.InvalidAddressError(
self.interface, "not an IPv4 or IPv6 address"
)
def __repr__(self) -> str:
if self._realPortNumber is not None:
return f"<{self.protocol.__class__} on {self._realPortNumber}>"
else:
return f"<{self.protocol.__class__} not connected>"
def getHandle(self):
"""
Return a socket object.
"""
return self.socket
def startListening(self):
"""
Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
self._bindSocket()
self._connectToProtocol()
def createSocket(self):
return self.reactor.createSocket(self.addressFamily, self.socketType)
def _bindSocket(self):
try:
skt = self.createSocket()
skt.bind((self.interface, self.port))
except OSError as le:
raise error.CannotListenError(self.interface, self.port, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg(
"%s starting on %s"
% (self._getLogPrefix(self.protocol), self._realPortNumber)
)
self.connected = True
self.socket = skt
self.getFileHandle = self.socket.fileno
def _connectToProtocol(self):
self.protocol.makeConnection(self)
self.startReading()
self.reactor.addActiveHandle(self)
def cbRead(self, rc, data, evt):
if self.reading:
self.handleRead(rc, data, evt)
self.doRead()
def handleRead(self, rc, data, evt):
if rc in (
errno.WSAECONNREFUSED,
errno.WSAECONNRESET,
ERROR_CONNECTION_REFUSED,
ERROR_PORT_UNREACHABLE,
):
if self._connectedAddr:
self.protocol.connectionRefused()
elif rc:
log.msg(
"error in recvfrom -- %s (%s)"
% (errno.errorcode.get(rc, "unknown error"), rc)
)
else:
try:
self.protocol.datagramReceived(
bytes(evt.buff[:data]), _iocp.makesockaddr(evt.addr_buff)
)
except BaseException:
log.err()
def doRead(self):
evt = _iocp.Event(self.cbRead, self)
evt.buff = buff = self._readBuffers[0]
evt.addr_buff = addr_buff = self.addressBuffer
evt.addr_len_buff = addr_len_buff = self.addressLengthBuffer
rc, data = _iocp.recvfrom(
self.getFileHandle(), buff, addr_buff, addr_len_buff, evt
)
if rc and rc != ERROR_IO_PENDING:
# If the error was not 0 or IO_PENDING then that means recvfrom() hit a
# failure condition. In this situation recvfrom() gives us our response
# right away and we don't need to wait for Windows to call the callback
# on our event. In fact, windows will not call it for us so we must call it
# ourselves manually
self.reactor.callLater(0, self.cbRead, rc, data, evt)
def write(self, datagram, addr=None):
"""
Write a datagram.
@param addr: should be a tuple (ip, port), can be None in connected
mode.
"""
if self._connectedAddr:
assert addr in (None, self._connectedAddr)
try:
return self.socket.send(datagram)
except OSError as se:
no = se.args[0]
if no == errno.WSAEINTR:
return self.write(datagram)
elif no == errno.WSAEMSGSIZE:
raise error.MessageLengthError("message too long")
elif no in (
errno.WSAECONNREFUSED,
errno.WSAECONNRESET,
ERROR_CONNECTION_REFUSED,
ERROR_PORT_UNREACHABLE,
):
self.protocol.connectionRefused()
else:
raise
else:
assert addr != None
if (
not isIPAddress(addr[0])
and not isIPv6Address(addr[0])
and addr[0] != "<broadcast>"
):
raise error.InvalidAddressError(
addr[0], "write() only accepts IP addresses, not hostnames"
)
if isIPAddress(addr[0]) and self.addressFamily == socket.AF_INET6:
raise error.InvalidAddressError(
addr[0], "IPv6 port write() called with IPv4 address"
)
if isIPv6Address(addr[0]) and self.addressFamily == socket.AF_INET:
raise error.InvalidAddressError(
addr[0], "IPv4 port write() called with IPv6 address"
)
try:
return self.socket.sendto(datagram, addr)
except OSError as se:
no = se.args[0]
if no == errno.WSAEINTR:
return self.write(datagram, addr)
elif no == errno.WSAEMSGSIZE:
raise error.MessageLengthError("message too long")
elif no in (
errno.WSAECONNREFUSED,
errno.WSAECONNRESET,
ERROR_CONNECTION_REFUSED,
ERROR_PORT_UNREACHABLE,
):
# in non-connected UDP ECONNREFUSED is platform dependent,
# I think and the info is not necessarily useful.
# Nevertheless maybe we should call connectionRefused? XXX
return
else:
raise
def writeSequence(self, seq, addr):
self.write(b"".join(seq), addr)
def connect(self, host, port):
"""
'Connect' to remote server.
"""
if self._connectedAddr:
raise RuntimeError(
"already connected, reconnecting is not currently supported "
"(talk to itamar if you want this)"
)
if not isIPAddress(host) and not isIPv6Address(host):
raise error.InvalidAddressError(host, "not an IPv4 or IPv6 address.")
self._connectedAddr = (host, port)
self.socket.connect((host, port))
def _loseConnection(self):
self.stopReading()
self.reactor.removeActiveHandle(self)
if self.connected: # actually means if we are *listening*
self.reactor.callLater(0, self.connectionLost)
def stopListening(self):
if self.connected:
result = self.d = defer.Deferred()
else:
result = None
self._loseConnection()
return result
def loseConnection(self):
warnings.warn(
"Please use stopListening() to disconnect port",
DeprecationWarning,
stacklevel=2,
)
self.stopListening()
def connectionLost(self, reason=None):
"""
Cleans up my socket.
"""
log.msg("(UDP Port %s Closed)" % self._realPortNumber)
self._realPortNumber = None
abstract.FileHandle.connectionLost(self, reason)
self.protocol.doStop()
self.socket.close()
del self.socket
del self.getFileHandle
if hasattr(self, "d"):
self.d.callback(None)
del self.d
def setLogStr(self):
"""
Initialize the C{logstr} attribute to be used by C{logPrefix}.
"""
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = "%s (UDP)" % logPrefix
def logPrefix(self):
"""
Returns the name of my class, to prefix log entries with.
"""
return self.logstr
def getHost(self):
"""
Return the local address of the UDP connection
@returns: the local address of the UDP connection
@rtype: L{IPv4Address} or L{IPv6Address}
"""
addr = self.socket.getsockname()
if self.addressFamily == socket.AF_INET:
return address.IPv4Address("UDP", *addr)
elif self.addressFamily == socket.AF_INET6:
return address.IPv6Address("UDP", *(addr[:2]))
def setBroadcastAllowed(self, enabled):
"""
Set whether this port may broadcast. This is disabled by default.
@param enabled: Whether the port may broadcast.
@type enabled: L{bool}
"""
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, enabled)
def getBroadcastAllowed(self):
"""
Checks if broadcast is currently allowed on this port.
@return: Whether this port may broadcast.
@rtype: L{bool}
"""
return bool(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST))
class MulticastMixin:
"""
Implement multicast functionality.
"""
def getOutgoingInterface(self):
i = self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF)
return socket.inet_ntoa(struct.pack("@i", i))
def setOutgoingInterface(self, addr):
"""
Returns Deferred of success.
"""
return self.reactor.resolve(addr).addCallback(self._setInterface)
def _setInterface(self, addr):
i = socket.inet_aton(addr)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
return 1
def getLoopbackMode(self):
return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP)
def setLoopbackMode(self, mode):
mode = struct.pack("b", bool(mode))
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, mode)
def getTTL(self):
return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL)
def setTTL(self, ttl):
ttl = struct.pack("B", ttl)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
def joinGroup(self, addr, interface=""):
"""
Join a multicast group. Returns Deferred of success.
"""
return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 1)
def _joinAddr1(self, addr, interface, join):
return self.reactor.resolve(interface).addCallback(self._joinAddr2, addr, join)
def _joinAddr2(self, interface, addr, join):
addr = socket.inet_aton(addr)
interface = socket.inet_aton(interface)
if join:
cmd = socket.IP_ADD_MEMBERSHIP
else:
cmd = socket.IP_DROP_MEMBERSHIP
try:
self.socket.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
except OSError as e:
return failure.Failure(error.MulticastJoinError(addr, interface, *e.args))
def leaveGroup(self, addr, interface=""):
"""
Leave multicast group, return Deferred of success.
"""
return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 0)
@implementer(interfaces.IMulticastTransport)
class MulticastPort(MulticastMixin, Port):
"""
UDP Port that supports multicasting.
"""
def __init__(
self,
port,
proto,
interface="",
maxPacketSize=8192,
reactor=None,
listenMultiple=False,
):
Port.__init__(self, port, proto, interface, maxPacketSize, reactor)
self.listenMultiple = listenMultiple
def createSocket(self):
skt = Port.createSocket(self)
if self.listenMultiple:
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
return skt

View File

@@ -0,0 +1,324 @@
# -*- test-case-name: twisted.test.test_kqueuereactor -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A kqueue()/kevent() based implementation of the Twisted main loop.
To use this reactor, start your application specifying the kqueue reactor::
twistd --reactor kqueue ...
To install the event loop from code (and you should do this before any
connections, listeners or connectors are added)::
from twisted.internet import kqreactor
kqreactor.install()
"""
import errno
import select
from zope.interface import Attribute, Interface, declarations, implementer
from twisted.internet import main, posixbase
from twisted.internet.interfaces import IReactorDaemonize, IReactorFDSet
from twisted.python import failure, log
try:
# This is to keep mypy from complaining
# We don't use type: ignore[attr-defined] on import, because mypy only complains
# on on some platforms, and then the unused ignore is an issue if the undefined
# attribute isn't.
KQ_EV_ADD = getattr(select, "KQ_EV_ADD")
KQ_EV_DELETE = getattr(select, "KQ_EV_DELETE")
KQ_EV_EOF = getattr(select, "KQ_EV_EOF")
KQ_FILTER_READ = getattr(select, "KQ_FILTER_READ")
KQ_FILTER_WRITE = getattr(select, "KQ_FILTER_WRITE")
except AttributeError as e:
raise ImportError(e)
class _IKQueue(Interface):
"""
An interface for KQueue implementations.
"""
kqueue = Attribute("An implementation of kqueue(2).")
kevent = Attribute("An implementation of kevent(2).")
declarations.directlyProvides(select, _IKQueue)
@implementer(IReactorFDSet, IReactorDaemonize)
class KQueueReactor(posixbase.PosixReactorBase):
"""
A reactor that uses kqueue(2)/kevent(2) and relies on Python 2.6 or higher
which has built in support for kqueue in the select module.
@ivar _kq: A C{kqueue} which will be used to check for I/O readiness.
@ivar _impl: The implementation of L{_IKQueue} to use.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptor}s which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A set containing integer file descriptors. Values in this
set will be registered with C{_kq} for read readiness notifications
which will be dispatched to the corresponding L{FileDescriptor}
instances in C{_selectables}.
@ivar _writes: A set containing integer file descriptors. Values in this
set will be registered with C{_kq} for write readiness notifications
which will be dispatched to the corresponding L{FileDescriptor}
instances in C{_selectables}.
"""
def __init__(self, _kqueueImpl=select):
"""
Initialize kqueue object, file descriptor tracking dictionaries, and
the base class.
See:
- http://docs.python.org/library/select.html
- www.freebsd.org/cgi/man.cgi?query=kqueue
- people.freebsd.org/~jlemon/papers/kqueue.pdf
@param _kqueueImpl: The implementation of L{_IKQueue} to use. A
hook for testing.
"""
self._impl = _kqueueImpl
self._kq = self._impl.kqueue()
self._reads = set()
self._writes = set()
self._selectables = {}
posixbase.PosixReactorBase.__init__(self)
def _updateRegistration(self, fd, filter, op):
"""
Private method for changing kqueue registration on a given FD
filtering for events given filter/op. This will never block and
returns nothing.
"""
self._kq.control([self._impl.kevent(fd, filter, op)], 0, 0)
def beforeDaemonize(self):
"""
Implement L{IReactorDaemonize.beforeDaemonize}.
"""
# Twisted-internal method called during daemonization (when application
# is started via twistd). This is called right before the magic double
# forking done for daemonization. We cleanly close the kqueue() and later
# recreate it. This is needed since a) kqueue() are not inherited across
# forks and b) twistd will create the reactor already before daemonization
# (and will also add at least 1 reader to the reactor, an instance of
# twisted.internet.posixbase._UnixWaker).
#
# See: twisted.scripts._twistd_unix.daemonize()
self._kq.close()
self._kq = None
def afterDaemonize(self):
"""
Implement L{IReactorDaemonize.afterDaemonize}.
"""
# Twisted-internal method called during daemonization. This is called right
# after daemonization and recreates the kqueue() and any readers/writers
# that were added before. Note that you MUST NOT call any reactor methods
# in between beforeDaemonize() and afterDaemonize()!
self._kq = self._impl.kqueue()
for fd in self._reads:
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
for fd in self._writes:
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
def addReader(self, reader):
"""
Implement L{IReactorFDSet.addReader}.
"""
fd = reader.fileno()
if fd not in self._reads:
try:
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
except OSError:
pass
finally:
self._selectables[fd] = reader
self._reads.add(fd)
def addWriter(self, writer):
"""
Implement L{IReactorFDSet.addWriter}.
"""
fd = writer.fileno()
if fd not in self._writes:
try:
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
except OSError:
pass
finally:
self._selectables[fd] = writer
self._writes.add(fd)
def removeReader(self, reader):
"""
Implement L{IReactorFDSet.removeReader}.
"""
wasLost = False
try:
fd = reader.fileno()
except BaseException:
fd = -1
if fd == -1:
for fd, fdes in self._selectables.items():
if reader is fdes:
wasLost = True
break
else:
return
if fd in self._reads:
self._reads.remove(fd)
if fd not in self._writes:
del self._selectables[fd]
if not wasLost:
try:
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DELETE)
except OSError:
pass
def removeWriter(self, writer):
"""
Implement L{IReactorFDSet.removeWriter}.
"""
wasLost = False
try:
fd = writer.fileno()
except BaseException:
fd = -1
if fd == -1:
for fd, fdes in self._selectables.items():
if writer is fdes:
wasLost = True
break
else:
return
if fd in self._writes:
self._writes.remove(fd)
if fd not in self._reads:
del self._selectables[fd]
if not wasLost:
try:
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DELETE)
except OSError:
pass
def removeAll(self):
"""
Implement L{IReactorFDSet.removeAll}.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes],
)
def getReaders(self):
"""
Implement L{IReactorFDSet.getReaders}.
"""
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
"""
Implement L{IReactorFDSet.getWriters}.
"""
return [self._selectables[fd] for fd in self._writes]
def doKEvent(self, timeout):
"""
Poll the kqueue for new events.
"""
if timeout is None:
timeout = 1
try:
events = self._kq.control([], len(self._selectables), timeout)
except OSError as e:
# Since this command blocks for potentially a while, it's possible
# EINTR can be raised for various reasons (for example, if the user
# hits ^C).
if e.errno == errno.EINTR:
return
else:
raise
_drdw = self._doWriteOrRead
for event in events:
fd = event.ident
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
def _doWriteOrRead(self, selectable, fd, event):
"""
Private method called when a FD is ready for reading, writing or was
lost. Do the work and raise errors where necessary.
"""
why = None
inRead = False
(filter, flags, data, fflags) = (
event.filter,
event.flags,
event.data,
event.fflags,
)
if flags & KQ_EV_EOF and data and fflags:
why = main.CONNECTION_LOST
else:
try:
if selectable.fileno() == -1:
inRead = False
why = posixbase._NO_FILEDESC
else:
if filter == KQ_FILTER_READ:
inRead = True
why = selectable.doRead()
if filter == KQ_FILTER_WRITE:
inRead = False
why = selectable.doWrite()
except BaseException:
# Any exception from application code gets logged and will
# cause us to disconnect the selectable.
why = failure.Failure()
log.err(
why,
"An exception was raised from application code"
" while processing a reactor selectable",
)
if why:
self._disconnectSelectable(selectable, why, inRead)
doIteration = doKEvent
def install():
"""
Install the kqueue() reactor.
"""
p = KQueueReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["KQueueReactor", "install"]

View File

@@ -0,0 +1,37 @@
# -*- test-case-name: twisted.internet.test.test_main -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Backwards compatibility, and utility functions.
In general, this module should not be used, other than by reactor authors
who need to use the 'installReactor' method.
"""
from twisted.internet import error
CONNECTION_DONE = error.ConnectionDone("Connection done")
CONNECTION_LOST = error.ConnectionLost("Connection lost")
def installReactor(reactor):
"""
Install reactor C{reactor}.
@param reactor: An object that provides one or more IReactor* interfaces.
"""
# this stuff should be common to all reactors.
import sys
import twisted.internet
if "twisted.internet.reactor" in sys.modules:
raise error.ReactorAlreadyInstalledError("reactor already installed")
twisted.internet.reactor = reactor
sys.modules["twisted.internet.reactor"] = reactor
__all__ = ["CONNECTION_LOST", "CONNECTION_DONE", "installReactor"]

View File

@@ -0,0 +1,189 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A poll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import pollreactor
pollreactor.install()
"""
# System imports
import errno
from select import (
POLLERR,
POLLHUP,
POLLIN,
POLLNVAL,
POLLOUT,
error as SelectError,
poll,
)
from zope.interface import implementer
from twisted.internet import posixbase
from twisted.internet.interfaces import IReactorFDSet
# Twisted imports
from twisted.python import log
@implementer(IReactorFDSet)
class PollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
A reactor that uses poll(2).
@ivar _poller: A L{select.poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptor}s which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
_POLL_DISCONNECTED = POLLHUP | POLLERR | POLLNVAL
_POLL_IN = POLLIN
_POLL_OUT = POLLOUT
def __init__(self):
"""
Initialize polling object, file descriptor tracking dictionaries, and
the base class.
"""
self._poller = poll()
self._selectables = {}
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def _updateRegistration(self, fd):
"""Register/unregister an fd with the poller."""
try:
self._poller.unregister(fd)
except KeyError:
pass
mask = 0
if fd in self._reads:
mask = mask | POLLIN
if fd in self._writes:
mask = mask | POLLOUT
if mask != 0:
self._poller.register(fd, mask)
else:
if fd in self._selectables:
del self._selectables[fd]
def _dictRemove(self, selectable, mdict):
try:
# the easy way
fd = selectable.fileno()
# make sure the fd is actually real. In some situations we can get
# -1 here.
mdict[fd]
except BaseException:
# the hard way: necessary because fileno() may disappear at any
# moment, thanks to python's underlying sockets impl
for fd, fdes in self._selectables.items():
if selectable is fdes:
break
else:
# Hmm, maybe not the right course of action? This method can't
# fail, because it happens inside error detection...
return
if fd in mdict:
del mdict[fd]
self._updateRegistration(fd)
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read."""
fd = reader.fileno()
if fd not in self._reads:
self._selectables[fd] = reader
self._reads[fd] = 1
self._updateRegistration(fd)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write."""
fd = writer.fileno()
if fd not in self._writes:
self._selectables[fd] = writer
self._writes[fd] = 1
self._updateRegistration(fd)
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read."""
return self._dictRemove(reader, self._reads)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write."""
return self._dictRemove(writer, self._writes)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes],
)
def doPoll(self, timeout):
"""Poll the poller for new events."""
if timeout is not None:
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
l = self._poller.poll(timeout)
except SelectError as e:
if e.args[0] == errno.EINTR:
return
else:
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def install():
"""Install the poll() reactor."""
p = PollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["PollReactor", "install"]

View File

@@ -0,0 +1,652 @@
# -*- test-case-name: twisted.test.test_internet,twisted.internet.test.test_posixbase -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Posix reactor base class
"""
import socket
import sys
from typing import Sequence
from zope.interface import classImplements, implementer
from twisted.internet import error, tcp, udp
from twisted.internet.base import ReactorBase
from twisted.internet.interfaces import (
IHalfCloseableDescriptor,
IReactorFDSet,
IReactorMulticast,
IReactorProcess,
IReactorSocket,
IReactorSSL,
IReactorTCP,
IReactorUDP,
IReactorUNIX,
IReactorUNIXDatagram,
)
from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
from twisted.python import failure, log
from twisted.python.runtime import platform, platformType
from ._signals import (
SignalHandling,
_ChildSignalHandling,
_IWaker,
_MultiSignalHandling,
_Waker,
)
# Exceptions that doSelect might return frequently
_NO_FILEDESC = error.ConnectionFdescWentAway("File descriptor lost")
try:
from twisted.protocols import tls as _tls
except ImportError:
tls = None
else:
tls = _tls
try:
from twisted.internet import ssl as _ssl
except ImportError:
ssl = None
else:
ssl = _ssl
unixEnabled = platformType == "posix"
processEnabled = False
if unixEnabled:
from twisted.internet import process, unix
processEnabled = True
if platform.isWindows():
try:
import win32process
processEnabled = True
except ImportError:
win32process = None
class _DisconnectSelectableMixin:
"""
Mixin providing the C{_disconnectSelectable} method.
"""
def _disconnectSelectable(
self,
selectable,
why,
isRead,
faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
},
):
"""
Utility function for disconnecting a selectable.
Supports half-close notification, isRead should be boolean indicating
whether error resulted from doRead().
"""
self.removeReader(selectable)
f = faildict.get(why.__class__)
if f:
if (
isRead
and why.__class__ == error.ConnectionDone
and IHalfCloseableDescriptor.providedBy(selectable)
):
selectable.readConnectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(failure.Failure(why))
@implementer(IReactorTCP, IReactorUDP, IReactorMulticast)
class PosixReactorBase(_DisconnectSelectableMixin, ReactorBase):
"""
A basis for reactors that use file descriptors.
@ivar _childWaker: L{None} or a reference to the L{_SIGCHLDWaker}
which is used to properly notice child process termination.
"""
_childWaker = None
# Callable that creates a waker, overrideable so that subclasses can
# substitute their own implementation:
def _wakerFactory(self) -> _IWaker:
return _Waker()
def installWaker(self):
"""
Install a `waker' to allow threads and signals to wake up the IO thread.
We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
the reactor. On Windows we use a pair of sockets.
"""
if not self.waker:
self.waker = self._wakerFactory()
self._internalReaders.add(self.waker)
self.addReader(self.waker)
def _signalsFactory(self) -> SignalHandling:
"""
Customize reactor signal handling to support child processes on POSIX
platforms.
"""
baseHandling = super()._signalsFactory()
# If we're on a platform that uses signals for process event signaling
if platformType == "posix":
# Compose ...
return _MultiSignalHandling(
(
# the base signal handling behavior ...
baseHandling,
# with our extra SIGCHLD handling behavior.
_ChildSignalHandling(
self._addInternalReader,
self._removeInternalReader,
),
)
)
# Otherwise just use the base behavior
return baseHandling
# IReactorProcess
def spawnProcess(
self,
processProtocol,
executable,
args=(),
env={},
path=None,
uid=None,
gid=None,
usePTY=0,
childFDs=None,
):
if platformType == "posix":
if usePTY:
if childFDs is not None:
raise ValueError(
"Using childFDs is not supported with usePTY=True."
)
return process.PTYProcess(
self, executable, args, env, path, processProtocol, uid, gid, usePTY
)
else:
return process.Process(
self,
executable,
args,
env,
path,
processProtocol,
uid,
gid,
childFDs,
)
elif platformType == "win32":
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("The usePTY parameter is not supported on Windows.")
if childFDs:
raise ValueError("Customizing childFDs is not supported on Windows.")
if win32process:
from twisted.internet._dumbwin32proc import Process
return Process(self, processProtocol, executable, args, env, path)
else:
raise NotImplementedError(
"spawnProcess not available since pywin32 is not installed."
)
else:
raise NotImplementedError(
"spawnProcess only available on Windows or POSIX."
)
# IReactorUDP
def listenUDP(self, port, protocol, interface="", maxPacketSize=8192):
"""Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
# IReactorMulticast
def listenMulticast(
self, port, protocol, interface="", maxPacketSize=8192, listenMultiple=False
):
"""Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(
port, protocol, interface, maxPacketSize, self, listenMultiple
)
p.startListening()
return p
# IReactorUNIX
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
assert unixEnabled, "UNIX support is not present"
c = unix.Connector(address, factory, timeout, self, checkPID)
c.connect()
return c
def listenUNIX(self, address, factory, backlog=50, mode=0o666, wantPID=0):
assert unixEnabled, "UNIX support is not present"
p = unix.Port(address, factory, backlog, mode, self, wantPID)
p.startListening()
return p
# IReactorUNIXDatagram
def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192, mode=0o666):
"""
Connects a given L{DatagramProtocol} to the given path.
EXPERIMENTAL.
@returns: object conforming to L{IListeningPort}.
"""
assert unixEnabled, "UNIX support is not present"
p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
p.startListening()
return p
def connectUNIXDatagram(
self, address, protocol, maxPacketSize=8192, mode=0o666, bindAddress=None
):
"""
Connects a L{ConnectedDatagramProtocol} instance to a path.
EXPERIMENTAL.
"""
assert unixEnabled, "UNIX support is not present"
p = unix.ConnectedDatagramPort(
address, protocol, maxPacketSize, mode, bindAddress, self
)
p.startListening()
return p
# IReactorSocket (no AF_UNIX on Windows)
if unixEnabled:
_supportedAddressFamilies: Sequence[socket.AddressFamily] = (
socket.AF_INET,
socket.AF_INET6,
socket.AF_UNIX,
)
else:
_supportedAddressFamilies = (
socket.AF_INET,
socket.AF_INET6,
)
def adoptStreamPort(self, fileDescriptor, addressFamily, factory):
"""
Create a new L{IListeningPort} from an already-initialized socket.
This just dispatches to a suitable port implementation (eg from
L{IReactorTCP}, etc) based on the specified C{addressFamily}.
@see: L{twisted.internet.interfaces.IReactorSocket.adoptStreamPort}
"""
if addressFamily not in self._supportedAddressFamilies:
raise error.UnsupportedAddressFamily(addressFamily)
if unixEnabled and addressFamily == socket.AF_UNIX:
p = unix.Port._fromListeningDescriptor(self, fileDescriptor, factory)
else:
p = tcp.Port._fromListeningDescriptor(
self, fileDescriptor, addressFamily, factory
)
p.startListening()
return p
def adoptStreamConnection(self, fileDescriptor, addressFamily, factory):
"""
@see:
L{twisted.internet.interfaces.IReactorSocket.adoptStreamConnection}
"""
if addressFamily not in self._supportedAddressFamilies:
raise error.UnsupportedAddressFamily(addressFamily)
if unixEnabled and addressFamily == socket.AF_UNIX:
return unix.Server._fromConnectedSocket(fileDescriptor, factory, self)
else:
return tcp.Server._fromConnectedSocket(
fileDescriptor, addressFamily, factory, self
)
def adoptDatagramPort(
self, fileDescriptor, addressFamily, protocol, maxPacketSize=8192
):
if addressFamily not in (socket.AF_INET, socket.AF_INET6):
raise error.UnsupportedAddressFamily(addressFamily)
p = udp.Port._fromListeningDescriptor(
self, fileDescriptor, addressFamily, protocol, maxPacketSize=maxPacketSize
)
p.startListening()
return p
# IReactorTCP
def listenTCP(self, port, factory, backlog=50, interface=""):
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
# IReactorSSL (sometimes, not implemented)
def connectSSL(
self, host, port, factory, contextFactory, timeout=30, bindAddress=None
):
if tls is not None:
tlsFactory = tls.TLSMemoryBIOFactory(contextFactory, True, factory)
return self.connectTCP(host, port, tlsFactory, timeout, bindAddress)
elif ssl is not None:
c = ssl.Connector(
host, port, factory, contextFactory, timeout, bindAddress, self
)
c.connect()
return c
else:
assert False, "SSL support is not present"
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
if tls is not None:
tlsFactory = tls.TLSMemoryBIOFactory(contextFactory, False, factory)
port = self.listenTCP(port, tlsFactory, backlog, interface)
port._type = "TLS"
return port
elif ssl is not None:
p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
p.startListening()
return p
else:
assert False, "SSL support is not present"
def _removeAll(self, readers, writers):
"""
Remove all readers and writers, and list of removed L{IReadDescriptor}s
and L{IWriteDescriptor}s.
Meant for calling from subclasses, to implement removeAll, like::
def removeAll(self):
return self._removeAll(self._reads, self._writes)
where C{self._reads} and C{self._writes} are iterables.
"""
removedReaders = set(readers) - self._internalReaders
for reader in removedReaders:
self.removeReader(reader)
removedWriters = set(writers)
for writer in removedWriters:
self.removeWriter(writer)
return list(removedReaders | removedWriters)
class _PollLikeMixin:
"""
Mixin for poll-like reactors.
Subclasses must define the following attributes::
- _POLL_DISCONNECTED - Bitmask for events indicating a connection was
lost.
- _POLL_IN - Bitmask for events indicating there is input to read.
- _POLL_OUT - Bitmask for events indicating output can be written.
Must be mixed in to a subclass of PosixReactorBase (for
_disconnectSelectable).
"""
def _doReadOrWrite(self, selectable, fd, event):
"""
fd is available for read or write, do the work and raise errors if
necessary.
"""
why = None
inRead = False
if event & self._POLL_DISCONNECTED and not (event & self._POLL_IN):
# Handle disconnection. But only if we finished processing all
# the pending input.
if fd in self._reads:
# If we were reading from the descriptor then this is a
# clean shutdown. We know there are no read events pending
# because we just checked above. It also might be a
# half-close (which is why we have to keep track of inRead).
inRead = True
why = CONNECTION_DONE
else:
# If we weren't reading, this is an error shutdown of some
# sort.
why = CONNECTION_LOST
else:
# Any non-disconnect event turns into a doRead or a doWrite.
try:
# First check to see if the descriptor is still valid. This
# gives fileno() a chance to raise an exception, too.
# Ideally, disconnection would always be indicated by the
# return value of doRead or doWrite (or an exception from
# one of those methods), but calling fileno here helps make
# buggy applications more transparent.
if selectable.fileno() == -1:
# -1 is sort of a historical Python artifact. Python
# files and sockets used to change their file descriptor
# to -1 when they closed. For the time being, we'll
# continue to support this anyway in case applications
# replicated it, plus abstract.FileDescriptor.fileno
# returns -1. Eventually it'd be good to deprecate this
# case.
why = _NO_FILEDESC
else:
if event & self._POLL_IN:
# Handle a read event.
why = selectable.doRead()
inRead = True
if not why and event & self._POLL_OUT:
# Handle a write event, as long as doRead didn't
# disconnect us.
why = selectable.doWrite()
inRead = False
except BaseException:
# Any exception from application code gets logged and will
# cause us to disconnect the selectable.
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, inRead)
@implementer(IReactorFDSet)
class _ContinuousPolling(_PollLikeMixin, _DisconnectSelectableMixin):
"""
Schedule reads and writes based on the passage of time, rather than
notification.
This is useful for supporting polling filesystem files, which C{epoll(7)}
does not support.
The implementation uses L{_PollLikeMixin}, which is a bit hacky, but
re-implementing and testing the relevant code yet again is unappealing.
@ivar _reactor: The L{EPollReactor} that is using this instance.
@ivar _loop: A C{LoopingCall} that drives the polling, or L{None}.
@ivar _readers: A C{set} of C{FileDescriptor} objects that should be read
from.
@ivar _writers: A C{set} of C{FileDescriptor} objects that should be
written to.
"""
# Attributes for _PollLikeMixin
_POLL_DISCONNECTED = 1
_POLL_IN = 2
_POLL_OUT = 4
def __init__(self, reactor):
self._reactor = reactor
self._loop = None
self._readers = set()
self._writers = set()
def _checkLoop(self):
"""
Start or stop a C{LoopingCall} based on whether there are readers and
writers.
"""
if self._readers or self._writers:
if self._loop is None:
from twisted.internet.task import _EPSILON, LoopingCall
self._loop = LoopingCall(self.iterate)
self._loop.clock = self._reactor
# LoopingCall seems unhappy with timeout of 0, so use very
# small number:
self._loop.start(_EPSILON, now=False)
elif self._loop:
self._loop.stop()
self._loop = None
def iterate(self):
"""
Call C{doRead} and C{doWrite} on all readers and writers respectively.
"""
for reader in list(self._readers):
self._doReadOrWrite(reader, reader, self._POLL_IN)
for writer in list(self._writers):
self._doReadOrWrite(writer, writer, self._POLL_OUT)
def addReader(self, reader):
"""
Add a C{FileDescriptor} for notification of data available to read.
"""
self._readers.add(reader)
self._checkLoop()
def addWriter(self, writer):
"""
Add a C{FileDescriptor} for notification of data available to write.
"""
self._writers.add(writer)
self._checkLoop()
def removeReader(self, reader):
"""
Remove a C{FileDescriptor} from notification of data available to read.
"""
try:
self._readers.remove(reader)
except KeyError:
return
self._checkLoop()
def removeWriter(self, writer):
"""
Remove a C{FileDescriptor} from notification of data available to
write.
"""
try:
self._writers.remove(writer)
except KeyError:
return
self._checkLoop()
def removeAll(self):
"""
Remove all readers and writers.
"""
result = list(self._readers | self._writers)
# Don't reset to new value, since self.isWriting and .isReading refer
# to the existing instance:
self._readers.clear()
self._writers.clear()
return result
def getReaders(self):
"""
Return a list of the readers.
"""
return list(self._readers)
def getWriters(self):
"""
Return a list of the writers.
"""
return list(self._writers)
def isReading(self, fd):
"""
Checks if the file descriptor is currently being observed for read
readiness.
@param fd: The file descriptor being checked.
@type fd: L{twisted.internet.abstract.FileDescriptor}
@return: C{True} if the file descriptor is being observed for read
readiness, C{False} otherwise.
@rtype: C{bool}
"""
return fd in self._readers
def isWriting(self, fd):
"""
Checks if the file descriptor is currently being observed for write
readiness.
@param fd: The file descriptor being checked.
@type fd: L{twisted.internet.abstract.FileDescriptor}
@return: C{True} if the file descriptor is being observed for write
readiness, C{False} otherwise.
@rtype: C{bool}
"""
return fd in self._writers
if tls is not None or ssl is not None:
classImplements(PosixReactorBase, IReactorSSL)
if unixEnabled:
classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
if processEnabled:
classImplements(PosixReactorBase, IReactorProcess)
if getattr(socket, "fromfd", None) is not None:
classImplements(PosixReactorBase, IReactorSocket)
__all__ = ["PosixReactorBase"]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,900 @@
# -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standard implementations of Twisted protocol-related interfaces.
Start here if you are looking to write a new protocol implementation for
Twisted. The Protocol class contains some introductory material.
"""
import random
from typing import Any, Callable, Optional
from zope.interface import implementer
from twisted.internet import defer, error, interfaces
from twisted.internet.interfaces import IAddress, ITransport
from twisted.logger import _loggerFor
from twisted.python import components, failure, log
@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
class Factory:
"""
This is a factory which produces protocols.
By default, buildProtocol will create a protocol of the class given in
self.protocol.
"""
protocol: "Optional[Callable[[], Protocol]]" = None
numPorts = 0
noisy = True
@classmethod
def forProtocol(cls, protocol, *args, **kwargs):
"""
Create a factory for the given protocol.
It sets the C{protocol} attribute and returns the constructed factory
instance.
@param protocol: A L{Protocol} subclass
@param args: Positional arguments for the factory.
@param kwargs: Keyword arguments for the factory.
@return: A L{Factory} instance wired up to C{protocol}.
"""
factory = cls(*args, **kwargs)
factory.protocol = protocol
return factory
def logPrefix(self):
"""
Describe this factory for log messages.
"""
return self.__class__.__name__
def doStart(self):
"""
Make sure startFactory is called.
Users should not call this function themselves!
"""
if not self.numPorts:
if self.noisy:
_loggerFor(self).info("Starting factory {factory!r}", factory=self)
self.startFactory()
self.numPorts = self.numPorts + 1
def doStop(self):
"""
Make sure stopFactory is called.
Users should not call this function themselves!
"""
if self.numPorts == 0:
# This shouldn't happen, but does sometimes and this is better
# than blowing up in assert as we did previously.
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
if self.noisy:
_loggerFor(self).info("Stopping factory {factory!r}", factory=self)
self.stopFactory()
def startFactory(self):
"""
This will be called before I begin listening on a Port or Connector.
It will only be called once, even if the factory is connected
to multiple ports.
This can be used to perform 'unserialization' tasks that
are best put off until things are actually running, such
as connecting to a database, opening files, etcetera.
"""
def stopFactory(self):
"""
This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
def buildProtocol(self, addr: IAddress) -> "Optional[Protocol]":
"""
Create an instance of a subclass of Protocol.
The returned instance will handle input on an incoming server
connection, and an attribute "factory" pointing to the creating
factory.
Alternatively, L{None} may be returned to immediately close the
new connection.
Override this method to alter how Protocol instances get created.
@param addr: an object implementing L{IAddress}
"""
assert self.protocol is not None
p = self.protocol()
p.factory = self
return p
class ClientFactory(Factory):
"""
A Protocol factory for clients.
This can be used together with the various connectXXX methods in
reactors.
"""
def startedConnecting(self, connector):
"""
Called when a connection has been started.
You can call connector.stopConnecting() to stop the connection attempt.
@param connector: a Connector object.
"""
def clientConnectionFailed(self, connector, reason):
"""
Called when a connection has failed to connect.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
def clientConnectionLost(self, connector, reason):
"""
Called when an established connection is lost.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
class _InstanceFactory(ClientFactory):
"""
Factory used by ClientCreator.
@ivar deferred: The L{Deferred} which represents this connection attempt and
which will be fired when it succeeds or fails.
@ivar pending: After a connection attempt succeeds or fails, a delayed call
which will fire the L{Deferred} representing this connection attempt.
"""
noisy = False
pending = None
def __init__(self, reactor, instance, deferred):
self.reactor = reactor
self.instance = instance
self.deferred = deferred
def __repr__(self) -> str:
return f"<ClientCreator factory: {self.instance!r}>"
def buildProtocol(self, addr):
"""
Return the pre-constructed protocol instance and arrange to fire the
waiting L{Deferred} to indicate success establishing the connection.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.callback, self.instance
)
self.deferred = None
return self.instance
def clientConnectionFailed(self, connector, reason):
"""
Arrange to fire the waiting L{Deferred} with the given failure to
indicate the connection could not be established.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.errback, reason
)
self.deferred = None
def fire(self, func, value):
"""
Clear C{self.pending} to avoid a reference cycle and then invoke func
with the value.
"""
self.pending = None
func(value)
class ClientCreator:
"""
Client connections that do not require a factory.
The various connect* methods create a protocol instance using the given
protocol class and arguments, and connect it, returning a Deferred of the
resulting protocol instance.
Useful for cases when we don't really need a factory. Mainly this
is when there is no shared state between protocol instances, and no need
to reconnect.
The C{connectTCP}, C{connectUNIX}, and C{connectSSL} methods each return a
L{Deferred} which will fire with an instance of the protocol class passed to
L{ClientCreator.__init__}. These Deferred can be cancelled to abort the
connection attempt (in a very unlikely case, cancelling the Deferred may not
prevent the protocol from being instantiated and connected to a transport;
if this happens, it will be disconnected immediately afterwards and the
Deferred will still errback with L{CancelledError}).
"""
def __init__(self, reactor, protocolClass, *args, **kwargs):
self.reactor = reactor
self.protocolClass = protocolClass
self.args = args
self.kwargs = kwargs
def _connect(self, method, *args, **kwargs):
"""
Initiate a connection attempt.
@param method: A callable which will actually start the connection
attempt. For example, C{reactor.connectTCP}.
@param args: Positional arguments to pass to C{method}, excluding the
factory.
@param kwargs: Keyword arguments to pass to C{method}.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
def cancelConnect(deferred):
connector.disconnect()
if f.pending is not None:
f.pending.cancel()
d = defer.Deferred(cancelConnect)
f = _InstanceFactory(
self.reactor, self.protocolClass(*self.args, **self.kwargs), d
)
connector = method(factory=f, *args, **kwargs)
return d
def connectTCP(self, host, port, timeout=30, bindAddress=None):
"""
Connect to a TCP server.
The parameters are all the same as to L{IReactorTCP.connectTCP} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectTCP,
host,
port,
timeout=timeout,
bindAddress=bindAddress,
)
def connectUNIX(self, address, timeout=30, checkPID=False):
"""
Connect to a Unix socket.
The parameters are all the same as to L{IReactorUNIX.connectUNIX} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectUNIX, address, timeout=timeout, checkPID=checkPID
)
def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
"""
Connect to an SSL server.
The parameters are all the same as to L{IReactorSSL.connectSSL} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectSSL,
host,
port,
contextFactory=contextFactory,
timeout=timeout,
bindAddress=bindAddress,
)
class ReconnectingClientFactory(ClientFactory):
"""
Factory which auto-reconnects clients with an exponential back-off.
Note that clients should call my resetDelay method after they have
connected successfully.
@ivar maxDelay: Maximum number of seconds between connection attempts.
@ivar initialDelay: Delay for the first reconnection attempt.
@ivar factor: A multiplicitive factor by which the delay grows
@ivar jitter: Percentage of randomness to introduce into the delay length
to prevent stampeding.
@ivar clock: The clock used to schedule reconnection. It's mainly useful to
be parametrized in tests. If the factory is serialized, this attribute
will not be serialized, and the default value (the reactor) will be
restored when deserialized.
@type clock: L{IReactorTime}
@ivar maxRetries: Maximum number of consecutive unsuccessful connection
attempts, after which no further connection attempts will be made. If
this is not explicitly set, no maximum is applied.
"""
maxDelay = 3600
initialDelay = 1.0
# Note: These highly sensitive factors have been precisely measured by
# the National Institute of Science and Technology. Take extreme care
# in altering them, or you may damage your Internet!
# (Seriously: <http://physics.nist.gov/cuu/Constants/index.html>)
factor = 2.7182818284590451 # (math.e)
# Phi = 1.6180339887498948 # (Phi is acceptable for use as a
# factor if e is too large for your application.)
# This is the value of the molar Planck constant times c, joule
# meter/mole. The value is attributable to
# https://physics.nist.gov/cgi-bin/cuu/Value?nahc|search_for=molar+planck+constant+times+c
jitter = 0.119626565582
delay = initialDelay
retries = 0
maxRetries = None
_callID = None
connector = None
clock = None
continueTrying = 1
def clientConnectionFailed(self, connector, reason):
if self.continueTrying:
self.connector = connector
self.retry()
def clientConnectionLost(self, connector, unused_reason):
if self.continueTrying:
self.connector = connector
self.retry()
def retry(self, connector=None):
"""
Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg(f"Abandoning {connector} on explicit request")
return
if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector
self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." % (connector, self.retries))
return
self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay, self.delay * self.jitter)
if self.noisy:
log.msg(
"%s will retry in %d seconds"
% (
connector,
self.delay,
)
)
def reconnector():
self._callID = None
connector.connect()
if self.clock is None:
from twisted.internet import reactor
self.clock = reactor
self._callID = self.clock.callLater(self.delay, reconnector)
def stopTrying(self):
"""
Put a stop to any attempt to reconnect in progress.
"""
# ??? Is this function really stopFactory?
if self._callID:
self._callID.cancel()
self._callID = None
self.continueTrying = 0
if self.connector:
try:
self.connector.stopConnecting()
except error.NotConnectingError:
pass
def resetDelay(self):
"""
Call this method after a successful connection: it resets the delay and
the retry counter.
"""
self.delay = self.initialDelay
self.retries = 0
self._callID = None
self.continueTrying = 1
def __getstate__(self):
"""
Remove all of the state which is mutated by connection attempts and
failures, returning just the state which describes how reconnections
should be attempted. This will make the unserialized instance
behave just as this one did when it was first instantiated.
"""
state = self.__dict__.copy()
for key in [
"connector",
"retries",
"delay",
"continueTrying",
"_callID",
"clock",
]:
if key in state:
del state[key]
return state
class ServerFactory(Factory):
"""
Subclass this to indicate that your protocol.Factory is only usable for servers.
"""
class BaseProtocol:
"""
This is the abstract superclass of all protocols.
Some methods have helpful default implementations here so that they can
easily be shared, but otherwise the direct subclasses of this class are more
interesting, L{Protocol} and L{ProcessProtocol}.
"""
connected = 0
transport: Optional[ITransport] = None
def makeConnection(self, transport):
"""
Make a connection to a transport and a server.
This sets the 'transport' attribute of this Protocol, and calls the
connectionMade() callback.
"""
self.connected = 1
self.transport = transport
self.connectionMade()
def connectionMade(self):
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
connectionDone = failure.Failure(error.ConnectionDone())
connectionDone.cleanFailure()
@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
class Protocol(BaseProtocol):
"""
This is the base class for streaming connection-oriented protocols.
If you are going to write a new connection-oriented protocol for Twisted,
start here. Any protocol implementation, either client or server, should
be a subclass of this class.
The API is quite simple. Implement L{dataReceived} to handle both
event-based and synchronous input; output can be sent through the
'transport' attribute, which is to be an instance that implements
L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
notified when the connection ends.
Some subclasses exist already to help you write common types of protocols:
see the L{twisted.protocols.basic} module for a few of them.
"""
factory: Optional[Factory] = None
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def dataReceived(self, data: bytes) -> None:
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(self, reason: failure.Failure = connectionDone) -> None:
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
@type reason: L{twisted.python.failure.Failure}
"""
@implementer(interfaces.IConsumer)
class ProtocolToConsumerAdapter(components.Adapter):
def write(self, data: bytes) -> None:
self.original.dataReceived(data)
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.registerAdapter(
ProtocolToConsumerAdapter, interfaces.IProtocol, interfaces.IConsumer
)
@implementer(interfaces.IProtocol)
class ConsumerToProtocolAdapter(components.Adapter):
def dataReceived(self, data: bytes) -> None:
self.original.write(data)
def connectionLost(self, reason: failure.Failure) -> None:
pass
def makeConnection(self, transport):
pass
def connectionMade(self):
pass
components.registerAdapter(
ConsumerToProtocolAdapter, interfaces.IConsumer, interfaces.IProtocol
)
@implementer(interfaces.IProcessProtocol)
class ProcessProtocol(BaseProtocol):
"""
Base process protocol implementation which does simple dispatching for
stdin, stdout, and stderr file descriptors.
"""
transport: Optional[interfaces.IProcessTransport] = None
def childDataReceived(self, childFD: int, data: bytes) -> None:
if childFD == 1:
self.outReceived(data)
elif childFD == 2:
self.errReceived(data)
def outReceived(self, data: bytes) -> None:
"""
Some data was received from stdout.
"""
def errReceived(self, data: bytes) -> None:
"""
Some data was received from stderr.
"""
def childConnectionLost(self, childFD: int) -> None:
if childFD == 0:
self.inConnectionLost()
elif childFD == 1:
self.outConnectionLost()
elif childFD == 2:
self.errConnectionLost()
def inConnectionLost(self):
"""
This will be called when stdin is closed.
"""
def outConnectionLost(self):
"""
This will be called when stdout is closed.
"""
def errConnectionLost(self):
"""
This will be called when stderr is closed.
"""
def processExited(self, reason: failure.Failure) -> None:
"""
This will be called when the subprocess exits.
@type reason: L{twisted.python.failure.Failure}
"""
def processEnded(self, reason: failure.Failure) -> None:
"""
Called when the child process exits and all file descriptors
associated with it have been closed.
@type reason: L{twisted.python.failure.Failure}
"""
class AbstractDatagramProtocol:
"""
Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP,
UDP.
"""
transport = None
numPorts = 0
noisy = True
def __getstate__(self):
d = self.__dict__.copy()
d["transport"] = None
return d
def doStart(self):
"""
Make sure startProtocol is called.
This will be called by makeConnection(), users should not call it.
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting protocol %s" % self)
self.startProtocol()
self.numPorts = self.numPorts + 1
def doStop(self):
"""
Make sure stopProtocol is called.
This will be called by the port, users should not call it.
"""
assert self.numPorts > 0
self.numPorts = self.numPorts - 1
self.transport = None
if not self.numPorts:
if self.noisy:
log.msg("Stopping protocol %s" % self)
self.stopProtocol()
def startProtocol(self):
"""
Called when a transport is connected to this protocol.
Will only be called once, even if multiple ports are connected.
"""
def stopProtocol(self):
"""
Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
def makeConnection(self, transport):
"""
Make a connection to a transport and a server.
This sets the 'transport' attribute of this DatagramProtocol, and calls the
doStart() callback.
"""
assert self.transport == None
self.transport = transport
self.doStart()
def datagramReceived(self, datagram: bytes, addr: Any) -> None:
"""
Called when a datagram is received.
@param datagram: the bytes received from the transport.
@param addr: tuple of source of datagram.
"""
@implementer(interfaces.ILoggingContext)
class DatagramProtocol(AbstractDatagramProtocol):
"""
Protocol for datagram-oriented transport, e.g. UDP.
@type transport: L{None} or
L{IUDPTransport<twisted.internet.interfaces.IUDPTransport>} provider
@ivar transport: The transport with which this protocol is associated,
if it is associated with one.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def connectionRefused(self):
"""
Called due to error from write in connected mode.
Note this is a result of ICMP message generated by *previous*
write.
"""
class ConnectedDatagramProtocol(DatagramProtocol):
"""
Protocol for connected datagram-oriented transport.
No longer necessary for UDP.
"""
def datagramReceived(self, datagram):
"""
Called when a datagram is received.
@param datagram: the string received from the transport.
"""
def connectionFailed(self, failure: failure.Failure) -> None:
"""
Called if connecting failed.
Usually this will be due to a DNS lookup failure.
"""
@implementer(interfaces.ITransport)
class FileWrapper:
"""
A wrapper around a file-like object to make it behave as a Transport.
This doesn't actually stream the file to the attached protocol,
and is thus useful mainly as a utility for debugging protocols.
"""
closed = 0
disconnecting = 0
producer = None
streamingProducer = 0
def __init__(self, file):
self.file = file
def write(self, data: bytes) -> None:
try:
self.file.write(data)
except BaseException:
self.handleException()
def _checkProducer(self):
# Cheating; this is called at "idle" times to allow producers to be
# found and dealt with
if self.producer:
self.producer.resumeProducing()
def registerProducer(self, producer, streaming):
"""
From abstract.FileDescriptor
"""
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def stopConsuming(self):
self.unregisterProducer()
self.loseConnection()
def writeSequence(self, iovec):
self.write(b"".join(iovec))
def loseConnection(self):
self.closed = 1
try:
self.file.close()
except OSError:
self.handleException()
def getPeer(self):
# FIXME: https://twistedmatrix.com/trac/ticket/7820
# According to ITransport, this should return an IAddress!
return "file", "file"
def getHost(self):
# FIXME: https://twistedmatrix.com/trac/ticket/7820
# According to ITransport, this should return an IAddress!
return "file"
def handleException(self):
pass
def resumeProducing(self):
# Never sends data anyways
pass
def pauseProducing(self):
# Never sends data anyways
pass
def stopProducing(self):
self.loseConnection()
__all__ = [
"Factory",
"ClientFactory",
"ReconnectingClientFactory",
"connectionDone",
"Protocol",
"ProcessProtocol",
"FileWrapper",
"ServerFactory",
"AbstractDatagramProtocol",
"DatagramProtocol",
"ConnectedDatagramProtocol",
"ClientCreator",
]

View File

@@ -0,0 +1,39 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module integrates PyUI with twisted.internet's mainloop.
Maintainer: Jp Calderone
See doc/examples/pyuidemo.py for example usage.
"""
# System imports
import pyui # type: ignore[import-not-found]
def _guiUpdate(reactor, delay):
pyui.draw()
if pyui.update() == 0:
pyui.quit()
reactor.stop()
else:
reactor.callLater(delay, _guiUpdate, reactor, delay)
def install(ms=10, reactor=None, args=(), kw={}):
"""
Schedule PyUI's display to be updated approximately every C{ms}
milliseconds, and initialize PyUI with the specified arguments.
"""
d = pyui.init(*args, **kw)
if reactor is None:
from twisted.internet import reactor
_guiUpdate(reactor, ms / 1000.0)
return d
__all__ = ["install"]

View File

@@ -0,0 +1,40 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The reactor is the Twisted event loop within Twisted, the loop which drives
applications using Twisted. The reactor provides APIs for networking,
threading, dispatching events, and more.
The default reactor depends on the platform and will be installed if this
module is imported without another reactor being explicitly installed
beforehand. Regardless of which reactor is installed, importing this module is
the correct way to get a reference to it.
New application code should prefer to pass and accept the reactor as a
parameter where it is needed, rather than relying on being able to import this
module to get a reference. This simplifies unit testing and may make it easier
to one day support multiple reactors (as a performance enhancement), though
this is not currently possible.
@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
"""
import sys
del sys.modules["twisted.internet.reactor"]
from twisted.internet import default
default.install()

View File

@@ -0,0 +1,214 @@
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Select reactor
"""
from __future__ import annotations
import select
import sys
from errno import EBADF, EINTR
from time import sleep
from typing import Callable, Type, TypeVar
from zope.interface import implementer
from twisted.internet import posixbase
from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor, IWriteDescriptor
from twisted.python import log
from twisted.python.runtime import platformType
def win32select(r, w, e, timeout=None):
"""Win32 select wrapper."""
if not (r or w):
# windows select() exits immediately when no sockets
if timeout is None:
timeout = 0.01
else:
timeout = min(timeout, 0.001)
sleep(timeout)
return [], [], []
# windows doesn't process 'signals' inside select(), so we set a max
# time or ctrl-c will never be recognized
if timeout is None or timeout > 0.5:
timeout = 0.5
r, w, e = select.select(r, w, w, timeout)
return r, w + e, []
if platformType == "win32":
_select = win32select
else:
_select = select.select
try:
from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
except ImportError:
_extraBase: Type[object] = object
else:
_extraBase = _ThreadedWin32EventsMixin
_T = TypeVar("_T")
def _onePreen(
toPreen: list[_T],
preenInto: set[_T],
disconnect: Callable[[_T, Exception, bool], None],
) -> None:
preenInto.clear()
for selectable in toPreen:
try:
select.select([selectable], [selectable], [selectable], 0)
except Exception as e:
log.msg("bad descriptor %s" % selectable)
disconnect(selectable, e, False)
else:
preenInto.add(selectable)
def _preenDescriptors(
reads: set[IReadDescriptor],
writes: set[IWriteDescriptor],
disconnect: Callable[[IReadDescriptor | IWriteDescriptor, Exception, bool], None],
) -> None:
log.msg("Malformed file descriptor found. Preening lists.")
readers: list[IReadDescriptor] = list(reads)
writers: list[IWriteDescriptor] = list(writes)
_onePreen(readers, reads, disconnect)
_onePreen(writers, writes, disconnect)
@implementer(IReactorFDSet)
class SelectReactor(posixbase.PosixReactorBase, _extraBase): # type: ignore[misc,valid-type]
"""
A select() based reactor - runs on all POSIX platforms and on Win32.
@ivar _reads: A set containing L{FileDescriptor} instances which will be
checked for read events.
@ivar _writes: A set containing L{FileDescriptor} instances which will be
checked for writability.
"""
def __init__(self) -> None:
"""
Initialize file descriptor tracking dictionaries and the base class.
"""
self._reads: set[IReadDescriptor] = set()
self._writes: set[IWriteDescriptor] = set()
posixbase.PosixReactorBase.__init__(self)
def _preenDescriptors(self) -> None:
_preenDescriptors(self._reads, self._writes, self._disconnectSelectable)
def doSelect(self, timeout):
"""
Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
try:
r, w, ignored = _select(self._reads, self._writes, [], timeout)
except ValueError:
# Possibly a file descriptor has gone negative?
self._preenDescriptors()
return
except TypeError:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptors()
return
except OSError as se:
# select(2) encountered an error, perhaps while calling the fileno()
# method of a socket. (Python 2.6 socket.error is an IOError
# subclass, but on Python 2.5 and earlier it is not.)
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not self._reads) and (not self._writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptors()
return
else:
# OK, I really don't know what's going on. Blow up.
raise
_drdw = self._doReadOrWrite
_logrun = log.callWithLogger
for selectables, method, fdset in (
(r, "doRead", self._reads),
(w, "doWrite", self._writes),
):
for selectable in selectables:
# if this was disconnected in another thread, kill it.
# ^^^^ --- what the !@#*? serious! -exarkun
if selectable not in fdset: # type:ignore[operator]
continue
# This for pausing input when we're not ready for more.
_logrun(selectable, _drdw, selectable, method)
doIteration = doSelect
def _doReadOrWrite(self, selectable, method):
try:
why = getattr(selectable, method)()
except BaseException:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, method == "doRead")
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._reads.add(reader)
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._writes.add(writer)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
self._reads.discard(reader)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
self._writes.discard(writer)
def removeAll(self):
return self._removeAll(self._reads, self._writes)
def getReaders(self):
return list(self._reads)
def getWriters(self):
return list(self._writes)
def install():
"""Configure the twisted mainloop to be run using the select() reactor."""
reactor = SelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
__all__ = ["install"]

View File

@@ -0,0 +1,100 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial Port Protocol
"""
# http://twistedmatrix.com/trac/ticket/3725#comment:24
# Apparently applications use these names even though they should
# be imported from pyserial
__all__ = [
"serial",
"PARITY_ODD",
"PARITY_EVEN",
"PARITY_NONE",
"STOPBITS_TWO",
"STOPBITS_ONE",
"FIVEBITS",
"EIGHTBITS",
"SEVENBITS",
"SIXBITS",
# Name this module is actually trying to export
"SerialPort",
]
# all of them require pyserial at the moment, so check that first
import serial
from serial import (
EIGHTBITS,
FIVEBITS,
PARITY_EVEN,
PARITY_NONE,
PARITY_ODD,
SEVENBITS,
SIXBITS,
STOPBITS_ONE,
STOPBITS_TWO,
)
from twisted.python.runtime import platform
class BaseSerialPort:
"""
Base class for Windows and POSIX serial ports.
@ivar _serialFactory: a pyserial C{serial.Serial} factory, used to create
the instance stored in C{self._serial}. Overrideable to enable easier
testing.
@ivar _serial: a pyserial C{serial.Serial} instance used to manage the
options on the serial port.
"""
_serialFactory = serial.Serial
def setBaudRate(self, baudrate):
if hasattr(self._serial, "setBaudrate"):
self._serial.setBaudrate(baudrate)
else:
self._serial.setBaudRate(baudrate)
def inWaiting(self):
return self._serial.inWaiting()
def flushInput(self):
self._serial.flushInput()
def flushOutput(self):
self._serial.flushOutput()
def sendBreak(self):
self._serial.sendBreak()
def getDSR(self):
return self._serial.getDSR()
def getCD(self):
return self._serial.getCD()
def getRI(self):
return self._serial.getRI()
def getCTS(self):
return self._serial.getCTS()
def setDTR(self, on=1):
self._serial.setDTR(on)
def setRTS(self, on=1):
self._serial.setRTS(on)
# Expert appropriate implementation of SerialPort.
if platform.isWindows():
from twisted.internet._win32serialport import SerialPort
else:
from twisted.internet._posixserialport import SerialPort # type: ignore[assignment]

View File

@@ -0,0 +1,281 @@
# -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements Transport Layer Security (TLS) support for Twisted. It
requires U{PyOpenSSL <https://pypi.python.org/pypi/pyOpenSSL>}.
If you wish to establish a TLS connection, please use one of the following
APIs:
- SSL endpoints for L{servers
<twisted.internet.endpoints.SSL4ServerEndpoint>} and L{clients
<twisted.internet.endpoints.SSL4ClientEndpoint>}
- L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}
- L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}
- L{listenSSL <twisted.internet.interfaces.IReactorSSL.listenSSL>}
These APIs all require a C{contextFactory} argument that specifies their
security properties, such as certificate, private key, certificate authorities
to verify the peer, allowed TLS protocol versions, cipher suites, and so on.
The recommended value for this argument is a L{CertificateOptions} instance;
see its documentation for an explanation of the available options.
The C{contextFactory} name is a bit of an anachronism now, as context factories
have been replaced with "connection creators", but these objects serve the same
role.
Be warned that implementing your own connection creator (i.e.: value for the
C{contextFactory}) is both difficult and dangerous; the Twisted team has worked
hard to make L{CertificateOptions}' API comprehensible and unsurprising, and
the Twisted team is actively maintaining it to ensure that it becomes more
secure over time.
If you are really absolutely sure that you want to take on the risk of
implementing your own connection creator based on the pyOpenSSL API, see the
L{server connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} and L{client
connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} interfaces.
Developers using Twisted, please ignore the L{Port}, L{Connector}, and
L{Client} classes defined here, as these are details of certain reactors' TLS
implementations, exposed by accident (and remaining here only for compatibility
reasons). If you wish to establish a TLS connection, please use one of the
APIs listed above.
@note: "SSL" (Secure Sockets Layer) is an antiquated synonym for "TLS"
(Transport Layer Security). You may see these terms used interchangeably
throughout the documentation.
"""
from __future__ import annotations
from zope.interface import implementedBy, implementer, implementer_only
# System imports
from OpenSSL import SSL
# Twisted imports
from twisted.internet import interfaces, tcp
supported = True
@implementer(interfaces.IOpenSSLContextFactory)
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{OpenSSL.SSL.Context}.
"""
_context = None
def __init__(
self,
privateKeyFileName,
certificateFileName,
sslmethod=SSL.TLS_METHOD,
_contextFactory=SSL.Context,
):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d["_context"]
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
@implementer(interfaces.IOpenSSLContextFactory)
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# TLS_METHOD allows negotiation of multiple TLS versions.
method = SSL.TLS_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
ctx.set_options(
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
)
return ctx
@implementer_only(
interfaces.ISSLTransport,
*(i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport),
)
class Client(tcp.Client):
"""
I am an SSL client.
"""
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
@implementer(interfaces.ISSLTransport)
class Server(tcp.Server):
"""
I am an SSL server.
"""
server: Port
def __init__(self, *args, **kwargs):
tcp.Server.__init__(self, *args, **kwargs)
self.startTLS(self.server.ctxFactory)
def getPeerCertificate(self):
# ISSLTransport.getPeerCertificate
raise NotImplementedError("Server.getPeerCertificate")
class Port(tcp.Port):
"""
I am an SSL port.
"""
transport = Server
_type = "TLS"
def __init__(
self, port, factory, ctxFactory, backlog=50, interface="", reactor=None
):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def _getLogPrefix(self, factory):
"""
Override the normal prefix to include an annotation indicating this is a
port for TLS connections.
"""
return tcp.Port._getLogPrefix(self, factory) + " (TLS)"
class Connector(tcp.Connector):
def __init__(
self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None
):
self.contextFactory = contextFactory
tcp.Connector.__init__(self, host, port, factory, timeout, bindAddress, reactor)
# Force some parameter checking in pyOpenSSL. It's better to fail now
# than after we've set up the transport.
contextFactory.getContext()
def _makeTransport(self):
return Client(
self.host,
self.port,
self.bindAddress,
self.contextFactory,
self,
self.reactor,
)
from twisted.internet._sslverify import (
DN,
Certificate,
CertificateRequest,
DistinguishedName,
KeyPair,
OpenSSLAcceptableCiphers as AcceptableCiphers,
OpenSSLCertificateOptions as CertificateOptions,
OpenSSLDefaultPaths,
OpenSSLDiffieHellmanParameters as DiffieHellmanParameters,
PrivateCertificate,
ProtocolNegotiationSupport,
TLSVersion,
VerificationError,
optionsForClientTLS,
platformTrust,
protocolNegotiationMechanisms,
trustRootFromCertificates,
)
__all__ = [
"ContextFactory",
"DefaultOpenSSLContextFactory",
"ClientContextFactory",
"DistinguishedName",
"DN",
"Certificate",
"CertificateRequest",
"PrivateCertificate",
"KeyPair",
"AcceptableCiphers",
"CertificateOptions",
"DiffieHellmanParameters",
"platformTrust",
"OpenSSLDefaultPaths",
"TLSVersion",
"VerificationError",
"optionsForClientTLS",
"ProtocolNegotiationSupport",
"protocolNegotiationMechanisms",
"trustRootFromCertificates",
]

View File

@@ -0,0 +1,37 @@
# -*- test-case-name: twisted.test.test_stdio -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standard input/out/err support.
This module exposes one name, StandardIO, which is a factory that takes an
IProtocol provider as an argument. It connects that protocol to standard input
and output on the current process.
It should work on any UNIX and also on Win32 (with some caveats: due to
platform limitations, it will perform very poorly on Win32).
Future Plans::
support for stderr, perhaps
Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
protocols to transport.
Maintainer: James Y Knight
"""
from twisted.python.runtime import platform
if platform.isWindows():
from twisted.internet._win32stdio import StandardIO, Win32PipeAddress as PipeAddress
else:
from twisted.internet._posixstdio import ( # type: ignore[assignment]
PipeAddress,
StandardIO,
)
__all__ = ["StandardIO", "PipeAddress"]

View File

@@ -0,0 +1,976 @@
# -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
"""
import sys
import time
import warnings
from typing import (
Callable,
Coroutine,
Iterable,
Iterator,
List,
NoReturn,
Optional,
Sequence,
TypeVar,
Union,
cast,
)
from zope.interface import implementer
from incremental import Version
from twisted.internet.base import DelayedCall
from twisted.internet.defer import Deferred, ensureDeferred, maybeDeferred
from twisted.internet.error import ReactorNotRunning
from twisted.internet.interfaces import IDelayedCall, IReactorCore, IReactorTime
from twisted.python import log, reflect
from twisted.python.deprecate import _getDeprecationWarningString
from twisted.python.failure import Failure
_T = TypeVar("_T")
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@ivar running: A flag which is C{True} while C{f} is scheduled to be called
(or is currently being called). It is set to C{True} when L{start} is
called and set to C{False} when L{stop} is called or if C{f} raises an
exception. In either case, it will be C{False} by the time the
C{Deferred} returned by L{start} fires its callback or errback.
@ivar _realLastTime: When counting skips, the time at which the skip
counter was last invoked.
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
to L{LoopingCall.start}.
"""
call: Optional[IDelayedCall] = None
running = False
_deferred: Optional[Deferred["LoopingCall"]] = None
interval: Optional[float] = None
_runAtStart = False
starttime: Optional[float] = None
_realLastTime: Optional[float] = None
def __init__(self, f: Callable[..., object], *a: object, **kw: object) -> None:
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = cast(IReactorTime, reactor)
@property
def deferred(self) -> Optional[Deferred["LoopingCall"]]:
"""
DEPRECATED. L{Deferred} fired when loop stops or fails.
Use the L{Deferred} returned by L{LoopingCall.start}.
"""
warningString = _getDeprecationWarningString(
"twisted.internet.task.LoopingCall.deferred",
Version("Twisted", 16, 0, 0),
replacement="the deferred returned by start()",
)
warnings.warn(warningString, DeprecationWarning, stacklevel=2)
return self._deferred
@classmethod
def withCount(cls, countCallable: Callable[[int], object]) -> "LoopingCall":
"""
An alternate constructor for L{LoopingCall} that makes available the
number of calls which should have occurred since it was last invoked.
Note that this number is an C{int} value; It represents the discrete
number of calls that should have been made. For example, if you are
using a looping call to display an animation with discrete frames, this
number would be the number of frames to advance.
The count is normally 1, but can be higher. For example, if the reactor
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
returned from a previous call is not fired before an interval has
elapsed, or if the callable itself blocks for longer than an interval,
preventing I{itself} from being called.
When running with an interval of 0, count will be always 1.
@param countCallable: A callable that will be invoked each time the
resulting LoopingCall is run, with an integer specifying the number
of calls that should have been invoked.
@return: An instance of L{LoopingCall} with call counting enabled,
which provides the count as the first positional argument.
@since: 9.0
"""
def counter() -> object:
now = self.clock.seconds()
if self.interval == 0:
self._realLastTime = now
return countCallable(1)
lastTime = self._realLastTime
if lastTime is None:
assert (
self.starttime is not None
), "LoopingCall called before it was started"
lastTime = self.starttime
if self._runAtStart:
assert (
self.interval is not None
), "Looping call called with None interval"
lastTime -= self.interval
lastInterval = self._intervalOf(lastTime)
thisInterval = self._intervalOf(now)
count = thisInterval - lastInterval
if count > 0:
self._realLastTime = now
return countCallable(count)
return None
self = cls(counter)
return self
def _intervalOf(self, t: float) -> int:
"""
Determine the number of intervals passed as of the given point in
time.
@param t: The specified time (from the start of the L{LoopingCall}) to
be measured in intervals
@return: The C{int} number of intervals which have passed as of the
given point in time.
"""
assert self.starttime is not None
assert self.interval is not None
elapsedTime = t - self.starttime
intervalNum = int(elapsedTime / self.interval)
return intervalNum
def start(self, interval: float, now: bool = True) -> Deferred["LoopingCall"]:
"""
Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, "Tried to start an already running " "LoopingCall."
if interval < 0:
raise ValueError("interval must be >= 0")
self.running = True
# Loop might fail to start and then self._deferred will be cleared.
# This why the local C{deferred} variable is used.
deferred = self._deferred = Deferred()
self.starttime = self.clock.seconds()
self.interval = interval
self._runAtStart = now
if now:
self()
else:
self._scheduleFrom(self.starttime)
return deferred
def stop(self) -> None:
"""Stop running function."""
assert self.running, "Tried to stop a LoopingCall that was " "not running."
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self._deferred = self._deferred, None
assert d is not None
d.callback(self)
def reset(self) -> None:
"""
Skip the next iteration and reset the timer.
@since: 11.1
"""
assert self.running, "Tried to reset a LoopingCall that was " "not running."
if self.call is not None:
self.call.cancel()
self.call = None
self.starttime = self.clock.seconds()
self._scheduleFrom(self.starttime)
def __call__(self) -> None:
def cb(result: object) -> None:
if self.running:
self._scheduleFrom(self.clock.seconds())
else:
d, self._deferred = self._deferred, None
assert d is not None
d.callback(self)
def eb(failure: Failure) -> None:
self.running = False
d, self._deferred = self._deferred, None
assert d is not None
d.errback(failure)
self.call = None
d = maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _scheduleFrom(self, when: float) -> None:
"""
Schedule the next iteration of this looping call.
@param when: The present time from whence the call is scheduled.
"""
def howLong() -> float:
# How long should it take until the next invocation of our
# callable? Split out into a function because there are multiple
# places we want to 'return' out of this.
if self.interval == 0:
# If the interval is 0, just go as fast as possible, always
# return zero, call ourselves ASAP.
return 0
# Compute the time until the next interval; how long has this call
# been running for?
assert self.starttime is not None
runningFor = when - self.starttime
# And based on that start time, when does the current interval end?
assert self.interval is not None
untilNextInterval = self.interval - (runningFor % self.interval)
# Now that we know how long it would be, we have to tell if the
# number is effectively zero. However, we can't just test against
# zero. If a number with a small exponent is added to a number
# with a large exponent, it may be so small that the digits just
# fall off the end, which means that adding the increment makes no
# difference; it's time to tick over into the next interval.
if when == when + untilNextInterval:
# If it's effectively zero, then we need to add another
# interval.
return self.interval
# Finally, if everything else is normal, we just return the
# computed delay.
return untilNextInterval
self.call = self.clock.callLater(howLong(), self)
def __repr__(self) -> str:
# This code should be replaced by a utility function in reflect;
# see ticket #6066:
func = getattr(self.f, "__qualname__", None)
if func is None:
func = getattr(self.f, "__name__", None)
if func is not None:
imClass = getattr(self.f, "im_class", None)
if imClass is not None:
func = f"{imClass}.{func}"
if func is None:
func = reflect.safe_repr(self.f)
return "LoopingCall<{!r}>({}, *{}, **{})".format(
self.interval,
func,
reflect.safe_repr(self.a),
reflect.safe_repr(self.kw),
)
class SchedulerError(Exception):
"""
The operation could not be completed because the scheduler or one of its
tasks was in an invalid state. This exception should not be raised
directly, but is a superclass of various scheduler-state-related
exceptions.
"""
class SchedulerStopped(SchedulerError):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class TaskFinished(SchedulerError):
"""
The operation could not complete because the task was already completed,
stopped, encountered an error or otherwise permanently stopped running.
"""
class TaskDone(TaskFinished):
"""
The operation could not complete because the task was already completed.
"""
class TaskStopped(TaskFinished):
"""
The operation could not complete because the task was stopped.
"""
class TaskFailed(TaskFinished):
"""
The operation could not complete because the task died with an unhandled
error.
"""
class NotPaused(SchedulerError):
"""
This exception is raised when a task is resumed which was not previously
paused.
"""
class _Timer:
MAX_SLICE = 0.01
def __init__(self) -> None:
self.end = time.time() + self.MAX_SLICE
def __call__(self) -> bool:
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(callable: Callable[[], None]) -> IDelayedCall:
from twisted.internet import reactor
return cast(IReactorTime, reactor).callLater(_EPSILON, callable)
_TaskResultT = TypeVar("_TaskResultT")
class CooperativeTask:
"""
A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
paused, resumed, and stopped. It can also have its completion (or
termination) monitored.
@see: L{Cooperator.cooperate}
@ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
asked to do work.
@ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
participates in, which is used to re-insert it upon resume.
@ivar _deferreds: the list of L{Deferred}s to fire when this task
completes, fails, or finishes.
@ivar _pauseCount: the number of times that this L{CooperativeTask} has
been paused; if 0, it is running.
@ivar _completionState: The completion-state of this L{CooperativeTask}.
L{None} if the task is not yet completed, an instance of L{TaskStopped}
if C{stop} was called to stop this task early, of L{TaskFailed} if the
application code in the iterator raised an exception which caused it to
terminate, and of L{TaskDone} if it terminated normally via raising
C{StopIteration}.
"""
def __init__(
self, iterator: Iterator[_TaskResultT], cooperator: "Cooperator"
) -> None:
"""
A private constructor: to create a new L{CooperativeTask}, see
L{Cooperator.cooperate}.
"""
self._iterator = iterator
self._cooperator = cooperator
self._deferreds: List[Deferred[Iterator[_TaskResultT]]] = []
self._pauseCount = 0
self._completionState: Optional[SchedulerError] = None
self._completionResult: Optional[Union[Iterator[_TaskResultT], Failure]] = None
cooperator._addTask(self)
def whenDone(self) -> Deferred[Iterator[_TaskResultT]]:
"""
Get a L{Deferred} notification of when this task is complete.
@return: a L{Deferred} that fires with the C{iterator} that this
L{CooperativeTask} was created with when the iterator has been
exhausted (i.e. its C{next} method has raised C{StopIteration}), or
fails with the exception raised by C{next} if it raises some other
exception.
@rtype: L{Deferred}
"""
d: Deferred[Iterator[_TaskResultT]] = Deferred()
if self._completionState is None:
self._deferreds.append(d)
else:
assert self._completionResult is not None
d.callback(self._completionResult)
return d
def pause(self) -> None:
"""
Pause this L{CooperativeTask}. Stop doing work until
L{CooperativeTask.resume} is called. If C{pause} is called more than
once, C{resume} must be called an equal number of times to resume this
task.
@raise TaskFinished: if this task has already finished or completed.
"""
self._checkFinish()
self._pauseCount += 1
if self._pauseCount == 1:
self._cooperator._removeTask(self)
def resume(self) -> None:
"""
Resume processing of a paused L{CooperativeTask}.
@raise NotPaused: if this L{CooperativeTask} is not paused.
"""
if self._pauseCount == 0:
raise NotPaused()
self._pauseCount -= 1
if self._pauseCount == 0 and self._completionState is None:
self._cooperator._addTask(self)
def _completeWith(
self,
completionState: SchedulerError,
deferredResult: Union[Iterator[_TaskResultT], Failure],
) -> None:
"""
@param completionState: a L{SchedulerError} exception or a subclass
thereof, indicating what exception should be raised when subsequent
operations are performed.
@param deferredResult: the result to fire all the deferreds with.
"""
self._completionState = completionState
self._completionResult = deferredResult
if not self._pauseCount:
self._cooperator._removeTask(self)
# The Deferreds need to be invoked after all this is completed, because
# a Deferred may want to manipulate other tasks in a Cooperator. For
# example, if you call "stop()" on a cooperator in a callback on a
# Deferred returned from whenDone(), this CooperativeTask must be gone
# from the Cooperator by that point so that _completeWith is not
# invoked reentrantly; that would cause these Deferreds to blow up with
# an AlreadyCalledError, or the _removeTask to fail with a ValueError.
for d in self._deferreds:
d.callback(deferredResult)
def stop(self) -> None:
"""
Stop further processing of this task.
@raise TaskFinished: if this L{CooperativeTask} has previously
completed, via C{stop}, completion, or failure.
"""
self._checkFinish()
self._completeWith(TaskStopped(), Failure(TaskStopped()))
def _checkFinish(self) -> None:
"""
If this task has been stopped, raise the appropriate subclass of
L{TaskFinished}.
"""
if self._completionState is not None:
raise self._completionState
def _oneWorkUnit(self) -> None:
"""
Perform one unit of work for this task, retrieving one item from its
iterator, stopping if there are no further items in the iterator, and
pausing if the result was a L{Deferred}.
"""
try:
result = next(self._iterator)
except StopIteration:
self._completeWith(TaskDone(), self._iterator)
except BaseException:
self._completeWith(TaskFailed(), Failure())
else:
if isinstance(result, Deferred):
self.pause()
def failLater(failure: Failure) -> None:
self._completeWith(TaskFailed(), failure)
result.addCallbacks(lambda result: self.resume(), failLater)
class Cooperator:
"""
Cooperative task scheduler.
A cooperative task is an iterator where each iteration represents an
atomic unit of work. When the iterator yields, it allows the
L{Cooperator} to decide which of its tasks to execute next. If the
iterator yields a L{Deferred} then work will pause until the
L{Deferred} fires and completes its callback chain.
When a L{Cooperator} has more than one task, it distributes work between
all tasks.
There are two ways to add tasks to a L{Cooperator}, L{cooperate} and
L{coiterate}. L{cooperate} is the more useful of the two, as it returns a
L{CooperativeTask}, which can be L{paused<CooperativeTask.pause>},
L{resumed<CooperativeTask.resume>} and L{waited
on<CooperativeTask.whenDone>}. L{coiterate} has the same effect, but
returns only a L{Deferred} that fires when the task is done.
L{Cooperator} can be used for many things, including but not limited to:
- running one or more computationally intensive tasks without blocking
- limiting parallelism by running a subset of the total tasks
simultaneously
- doing one thing, waiting for a L{Deferred} to fire,
doing the next thing, repeat (i.e. serializing a sequence of
asynchronous tasks)
Multiple L{Cooperator}s do not cooperate with each other, so for most
cases you should use the L{global cooperator<task.cooperate>}.
"""
def __init__(
self,
terminationPredicateFactory: Callable[[], Callable[[], bool]] = _Timer,
scheduler: Callable[[Callable[[], None]], IDelayedCall] = _defaultScheduler,
started: bool = True,
):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return True when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self._tasks: List[CooperativeTask] = []
self._metarator: Iterator[CooperativeTask] = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall: Optional[IDelayedCall] = None
self._stopped = False
self._started = started
def coiterate(
self,
iterator: Iterator[_TaskResultT],
doneDeferred: Optional[Deferred[Iterator[_TaskResultT]]] = None,
) -> Deferred[Iterator[_TaskResultT]]:
"""
Add an iterator to the list of iterators this L{Cooperator} is
currently running.
Equivalent to L{cooperate}, but returns a L{Deferred} that will
be fired when the task is done.
@param doneDeferred: If specified, this will be the Deferred used as
the completion deferred. It is suggested that you use the default,
which creates a new Deferred for you.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = Deferred()
whenDone: Deferred[Iterator[_TaskResultT]] = CooperativeTask(
iterator, self
).whenDone()
whenDone.chainDeferred(doneDeferred)
return doneDeferred
def cooperate(self, iterator: Iterator[_TaskResultT]) -> CooperativeTask:
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return CooperativeTask(iterator, self)
def _addTask(self, task: CooperativeTask) -> None:
"""
Add a L{CooperativeTask} object to this L{Cooperator}.
"""
if self._stopped:
self._tasks.append(task) # XXX silly, I know, but _completeWith
# does the inverse
task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
else:
self._tasks.append(task)
self._reschedule()
def _removeTask(self, task: CooperativeTask) -> None:
"""
Remove a L{CooperativeTask} from this L{Cooperator}.
"""
self._tasks.remove(task)
# If no work left to do, cancel the delayed call:
if not self._tasks and self._delayedCall:
self._delayedCall.cancel()
self._delayedCall = None
def _tasksWhileNotStopped(self) -> Iterable[CooperativeTask]:
"""
Yield all L{CooperativeTask} objects in a loop as long as this
L{Cooperator}'s termination condition has not been met.
"""
terminator = self._terminationPredicateFactory()
while self._tasks:
for t in self._metarator:
yield t
if terminator():
return
self._metarator = iter(self._tasks)
def _tick(self) -> None:
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasksWhileNotStopped():
taskObj._oneWorkUnit()
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self) -> None:
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self._tasks:
self._delayedCall = self._scheduler(self._tick)
def start(self) -> None:
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self) -> None:
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for taskObj in self._tasks:
taskObj._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
self._tasks = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
@property
def running(self) -> bool:
"""
Is this L{Cooperator} is currently running?
@return: C{True} if the L{Cooperator} is running, C{False} otherwise.
@rtype: C{bool}
"""
return self._started and not self._stopped
_theCooperator = Cooperator()
def coiterate(iterator: Iterator[_T]) -> Deferred[Iterator[_T]]:
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
@param iterator: the iterator to invoke.
@return: a Deferred that will fire when the iterator finishes.
"""
return _theCooperator.coiterate(iterator)
def cooperate(iterator: Iterator[_T]) -> CooperativeTask:
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
This is very useful if you have computationally expensive tasks that you
want to run without blocking the reactor. Just break each task up so that
it yields frequently, pass it in here and the global L{Cooperator} will
make sure work is distributed between them without blocking longer than a
single iteration of a single task.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return _theCooperator.cooperate(iterator)
@implementer(IReactorTime)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
rightNow = 0.0
def __init__(self) -> None:
self.calls: List[DelayedCall] = []
def seconds(self) -> float:
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a time value
relative to the current time.
@return: The time which should be considered the current time.
"""
return self.rightNow
def _sortCalls(self) -> None:
"""
Sort the pending calls according to the time they are scheduled.
"""
self.calls.sort(key=lambda a: a.getTime())
def callLater(
self, delay: float, callable: Callable[..., object], *args: object, **kw: object
) -> IDelayedCall:
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
dc = DelayedCall(
self.seconds() + delay,
callable,
args,
kw,
self.calls.remove,
lambda c: None,
self.seconds,
)
self.calls.append(dc)
self._sortCalls()
return dc
def getDelayedCalls(self) -> Sequence[IDelayedCall]:
"""
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
"""
return self.calls
def advance(self, amount: float) -> None:
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
self._sortCalls()
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
self._sortCalls()
def pump(self, timings: Iterable[float]) -> None:
"""
Advance incrementally by the given set of times.
"""
for amount in timings:
self.advance(amount)
def deferLater(
clock: IReactorTime,
delay: float,
callable: Optional[Callable[..., _T]] = None,
*args: object,
**kw: object,
) -> Deferred[_T]:
"""
Call the given function after a certain period of time has passed.
@param clock: The object which will be used to schedule the delayed
call.
@param delay: The number of seconds to wait before calling the function.
@param callable: The callable to call after the delay, or C{None}.
@param args: The positional arguments to pass to C{callable}.
@param kw: The keyword arguments to pass to C{callable}.
@return: A deferred that fires with the result of the callable when the
specified time has elapsed.
"""
def deferLaterCancel(deferred: Deferred[object]) -> None:
delayedCall.cancel()
def cb(result: object) -> _T:
if callable is None:
return None # type: ignore[return-value]
return callable(*args, **kw)
d: Deferred[_T] = Deferred(deferLaterCancel)
d.addCallback(cb)
delayedCall = clock.callLater(delay, d.callback, None)
return d
def react(
main: Callable[
...,
Union[Deferred[_T], Coroutine["Deferred[_T]", object, _T]],
],
argv: Iterable[object] = (),
_reactor: Optional[IReactorCore] = None,
) -> NoReturn:
"""
Call C{main} and run the reactor until the L{Deferred} it returns fires or
the coroutine it returns completes.
This is intended as the way to start up an application with a well-defined
completion condition. Use it to write clients or one-off asynchronous
operations. Prefer this to calling C{reactor.run} directly, as this
function will also:
- Take care to call C{reactor.stop} once and only once, and at the right
time.
- Log any failures from the C{Deferred} returned by C{main}.
- Exit the application when done, with exit code 0 in case of success and
1 in case of failure. If C{main} fails with a C{SystemExit} error, the
code returned is used.
The following demonstrates the signature of a C{main} function which can be
used with L{react}::
async def main(reactor, username, password):
return "ok"
task.react(main, ("alice", "secret"))
@param main: A callable which returns a L{Deferred} or
coroutine. It should take the reactor as its first
parameter, followed by the elements of C{argv}.
@param argv: A list of arguments to pass to C{main}. If omitted the
callable will be invoked with no additional arguments.
@param _reactor: An implementation detail to allow easier unit testing. Do
not supply this parameter.
@since: 12.3
"""
if _reactor is None:
from twisted.internet import reactor
_reactor = cast(IReactorCore, reactor)
finished = ensureDeferred(main(_reactor, *argv))
code = 0
stopping = False
def onShutdown() -> None:
nonlocal stopping
stopping = True
_reactor.addSystemEventTrigger("before", "shutdown", onShutdown)
def stop(result: object, stopReactor: bool) -> None:
if stopReactor:
assert _reactor is not None
try:
_reactor.stop()
except ReactorNotRunning:
pass
if isinstance(result, Failure):
nonlocal code
if result.check(SystemExit) is not None:
code = result.value.code
else:
log.err(result, "main function encountered error")
code = 1
def cbFinish(result: object) -> None:
if stopping:
stop(result, False)
else:
assert _reactor is not None
_reactor.callWhenRunning(stop, result, True)
finished.addBoth(cbFinish)
_reactor.run()
sys.exit(code)
__all__ = [
"LoopingCall",
"Clock",
"SchedulerStopped",
"Cooperator",
"coiterate",
"deferLater",
"react",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet}.
"""

View File

@@ -0,0 +1,169 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POSIX implementation of local network interface enumeration.
"""
import socket
import sys
from ctypes import (
CDLL,
POINTER,
Structure,
c_char_p,
c_int,
c_ubyte,
c_uint8,
c_uint32,
c_ushort,
c_void_p,
cast,
pointer,
)
from ctypes.util import find_library
from socket import AF_INET, AF_INET6, inet_ntop
from typing import Any, List, Tuple
from twisted.python.compat import nativeString
libc = CDLL(find_library("c") or "")
if sys.platform.startswith("freebsd") or sys.platform == "darwin":
_sockaddrCommon: List[Tuple[str, Any]] = [
("sin_len", c_uint8),
("sin_family", c_uint8),
]
else:
_sockaddrCommon: List[Tuple[str, Any]] = [
("sin_family", c_ushort),
]
class in_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 4),
]
class in6_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 16),
]
class sockaddr(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
]
class sockaddr_in(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_addr", in_addr),
]
class sockaddr_in6(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_flowinfo", c_uint32),
("sin_addr", in6_addr),
]
class ifaddrs(Structure):
pass
ifaddrs_p = POINTER(ifaddrs)
ifaddrs._fields_ = [
("ifa_next", ifaddrs_p),
("ifa_name", c_char_p),
("ifa_flags", c_uint32),
("ifa_addr", POINTER(sockaddr)),
("ifa_netmask", POINTER(sockaddr)),
("ifa_dstaddr", POINTER(sockaddr)),
("ifa_data", c_void_p),
]
getifaddrs = libc.getifaddrs
getifaddrs.argtypes = [POINTER(ifaddrs_p)]
getifaddrs.restype = c_int
freeifaddrs = libc.freeifaddrs
freeifaddrs.argtypes = [ifaddrs_p]
def _maybeCleanupScopeIndex(family, packed):
"""
On FreeBSD, kill the embedded interface indices in link-local scoped
addresses.
@param family: The address family of the packed address - one of the
I{socket.AF_*} constants.
@param packed: The packed representation of the address (ie, the bytes of a
I{in_addr} field).
@type packed: L{bytes}
@return: The packed address with any FreeBSD-specific extra bits cleared.
@rtype: L{bytes}
@see: U{https://twistedmatrix.com/trac/ticket/6843}
@see: U{http://www.freebsd.org/doc/en/books/developers-handbook/ipv6.html#ipv6-scope-index}
@note: Indications are that the need for this will be gone in FreeBSD >=10.
"""
if sys.platform.startswith("freebsd") and packed[:2] == b"\xfe\x80":
return packed[:2] + b"\x00\x00" + packed[4:]
return packed
def _interfaces():
"""
Call C{getifaddrs(3)} and return a list of tuples of interface name, address
family, and human-readable address representing its results.
"""
ifaddrs = ifaddrs_p()
if getifaddrs(pointer(ifaddrs)) < 0:
raise OSError()
results = []
try:
while ifaddrs:
if ifaddrs[0].ifa_addr:
family = ifaddrs[0].ifa_addr[0].sin_family
if family == AF_INET:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in))
elif family == AF_INET6:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in6))
else:
addr = None
if addr:
packed = bytes(addr[0].sin_addr.in_addr[:])
packed = _maybeCleanupScopeIndex(family, packed)
results.append(
(ifaddrs[0].ifa_name, family, inet_ntop(family, packed))
)
ifaddrs = ifaddrs[0].ifa_next
finally:
freeifaddrs(ifaddrs)
return results
def posixGetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by I{getifaddrs(3)}.
"""
retList = []
for interface, family, address in _interfaces():
interface = nativeString(interface)
address = nativeString(address)
if family == socket.AF_INET6 and address.startswith("fe80:"):
retList.append(f"{address}%{interface}")
return retList

View File

@@ -0,0 +1,137 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows implementation of local network interface enumeration.
"""
from ctypes import ( # type: ignore[attr-defined]
POINTER,
Structure,
WinDLL,
byref,
c_int,
c_void_p,
cast,
create_string_buffer,
create_unicode_buffer,
wstring_at,
)
from socket import AF_INET6, SOCK_STREAM, socket
WS2_32 = WinDLL("ws2_32")
SOCKET = c_int
DWORD = c_int
LPVOID = c_void_p
LPSOCKADDR = c_void_p
LPWSAPROTOCOL_INFO = c_void_p
LPTSTR = c_void_p
LPDWORD = c_void_p
LPWSAOVERLAPPED = c_void_p
LPWSAOVERLAPPED_COMPLETION_ROUTINE = c_void_p
# http://msdn.microsoft.com/en-us/library/ms741621(v=VS.85).aspx
# int WSAIoctl(
# __in SOCKET s,
# __in DWORD dwIoControlCode,
# __in LPVOID lpvInBuffer,
# __in DWORD cbInBuffer,
# __out LPVOID lpvOutBuffer,
# __in DWORD cbOutBuffer,
# __out LPDWORD lpcbBytesReturned,
# __in LPWSAOVERLAPPED lpOverlapped,
# __in LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
# );
WSAIoctl = WS2_32.WSAIoctl
WSAIoctl.argtypes = [
SOCKET,
DWORD,
LPVOID,
DWORD,
LPVOID,
DWORD,
LPDWORD,
LPWSAOVERLAPPED,
LPWSAOVERLAPPED_COMPLETION_ROUTINE,
]
WSAIoctl.restype = c_int
# http://msdn.microsoft.com/en-us/library/ms741516(VS.85).aspx
# INT WSAAPI WSAAddressToString(
# __in LPSOCKADDR lpsaAddress,
# __in DWORD dwAddressLength,
# __in_opt LPWSAPROTOCOL_INFO lpProtocolInfo,
# __inout LPTSTR lpszAddressString,
# __inout LPDWORD lpdwAddressStringLength
# );
WSAAddressToString = WS2_32.WSAAddressToStringW
WSAAddressToString.argtypes = [LPSOCKADDR, DWORD, LPWSAPROTOCOL_INFO, LPTSTR, LPDWORD]
WSAAddressToString.restype = c_int
SIO_ADDRESS_LIST_QUERY = 0x48000016
WSAEFAULT = 10014
class SOCKET_ADDRESS(Structure):
_fields_ = [("lpSockaddr", c_void_p), ("iSockaddrLength", c_int)]
def make_SAL(ln):
class SOCKET_ADDRESS_LIST(Structure):
_fields_ = [("iAddressCount", c_int), ("Address", SOCKET_ADDRESS * ln)]
return SOCKET_ADDRESS_LIST
def win32GetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by
I{WSAIoctl}/C{SIO_ADDRESS_LIST_QUERY}.
"""
s = socket(AF_INET6, SOCK_STREAM)
size = 4096
retBytes = c_int()
for i in range(2):
buf = create_string_buffer(size)
ret = WSAIoctl(
s.fileno(), SIO_ADDRESS_LIST_QUERY, 0, 0, buf, size, byref(retBytes), 0, 0
)
# WSAIoctl might fail with WSAEFAULT, which means there was not enough
# space in the buffer we gave it. There's no way to check the errno
# until Python 2.6, so we don't even try. :/ Maybe if retBytes is still
# 0 another error happened, though.
if ret and retBytes.value:
size = retBytes.value
else:
break
# If it failed, then we'll just have to give up. Still no way to see why.
if ret:
raise RuntimeError("WSAIoctl failure")
addrList = cast(buf, POINTER(make_SAL(0)))
addrCount = addrList[0].iAddressCount
addrList = cast(buf, POINTER(make_SAL(addrCount)))
addressStringBufLength = 1024
addressStringBuf = create_unicode_buffer(addressStringBufLength)
retList = []
for i in range(addrList[0].iAddressCount):
retBytes.value = addressStringBufLength
address = addrList[0].Address[i]
ret = WSAAddressToString(
address.lpSockaddr,
address.iSockaddrLength,
0,
addressStringBuf,
byref(retBytes),
)
if ret:
raise RuntimeError("WSAAddressToString failure")
retList.append(wstring_at(addressStringBuf))
return [addr for addr in retList if "%" in addr]

View File

@@ -0,0 +1,594 @@
# -*- test-case-name: twisted.internet.test.test_tcp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various helpers for tests for connection-oriented transports.
"""
import socket
from gc import collect
from typing import Optional
from weakref import ref
from zope.interface.verify import verifyObject
from twisted.internet.defer import Deferred, gatherResults
from twisted.internet.interfaces import IConnector, IReactorFDSet
from twisted.internet.protocol import ClientFactory, Protocol, ServerFactory
from twisted.internet.test.reactormixins import needsRunningReactor
from twisted.python import context, log
from twisted.python.failure import Failure
from twisted.python.log import ILogContext, err, msg
from twisted.python.runtime import platform
from twisted.test.test_tcp import ClosingProtocol
from twisted.trial.unittest import SkipTest
def findFreePort(interface="127.0.0.1", family=socket.AF_INET, type=socket.SOCK_STREAM):
"""
Ask the platform to allocate a free port on the specified interface, then
release the socket and return the address which was allocated.
@param interface: The local address to try to bind the port on.
@type interface: C{str}
@param type: The socket type which will use the resulting port.
@return: A two-tuple of address and port, like that returned by
L{socket.getsockname}.
"""
addr = socket.getaddrinfo(interface, 0)[0][4]
probe = socket.socket(family, type)
try:
probe.bind(addr)
if family == socket.AF_INET6:
sockname = probe.getsockname()
hostname = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
)[0]
return (hostname, sockname[1])
else:
return probe.getsockname()
finally:
probe.close()
class ConnectableProtocol(Protocol):
"""
A protocol to be used with L{runProtocolsWithReactor}.
The protocol and its pair should eventually disconnect from each other.
@ivar reactor: The reactor used in this test.
@ivar disconnectReason: The L{Failure} passed to C{connectionLost}.
@ivar _done: A L{Deferred} which will be fired when the connection is
lost.
"""
disconnectReason = None
def _setAttributes(self, reactor, done):
"""
Set attributes on the protocol that are known only externally; this
will be called by L{runProtocolsWithReactor} when this protocol is
instantiated.
@param reactor: The reactor used in this test.
@param done: A L{Deferred} which will be fired when the connection is
lost.
"""
self.reactor = reactor
self._done = done
def connectionLost(self, reason):
self.disconnectReason = reason
self._done.callback(None)
del self._done
class EndpointCreator:
"""
Create client and server endpoints that know how to connect to each other.
"""
def server(self, reactor):
"""
Return an object providing C{IStreamServerEndpoint} for use in creating
a server to use to establish the connection type to be tested.
"""
raise NotImplementedError()
def client(self, reactor, serverAddress):
"""
Return an object providing C{IStreamClientEndpoint} for use in creating
a client to use to establish the connection type to be tested.
"""
raise NotImplementedError()
class _SingleProtocolFactory(ClientFactory):
"""
Factory to be used by L{runProtocolsWithReactor}.
It always returns the same protocol (i.e. is intended for only a single
connection).
"""
def __init__(self, protocol):
self._protocol = protocol
def buildProtocol(self, addr):
return self._protocol
def runProtocolsWithReactor(
reactorBuilder, serverProtocol, clientProtocol, endpointCreator
):
"""
Connect two protocols using endpoints and a new reactor instance.
A new reactor will be created and run, with the client and server protocol
instances connected to each other using the given endpoint creator. The
protocols should run through some set of tests, then disconnect; when both
have disconnected the reactor will be stopped and the function will
return.
@param reactorBuilder: A L{ReactorBuilder} instance.
@param serverProtocol: A L{ConnectableProtocol} that will be the server.
@param clientProtocol: A L{ConnectableProtocol} that will be the client.
@param endpointCreator: An instance of L{EndpointCreator}.
@return: The reactor run by this test.
"""
reactor = reactorBuilder.buildReactor()
serverProtocol._setAttributes(reactor, Deferred())
clientProtocol._setAttributes(reactor, Deferred())
serverFactory = _SingleProtocolFactory(serverProtocol)
clientFactory = _SingleProtocolFactory(clientProtocol)
# Listen on a port:
serverEndpoint = endpointCreator.server(reactor)
d = serverEndpoint.listen(serverFactory)
# Connect to the port:
def gotPort(p):
clientEndpoint = endpointCreator.client(reactor, p.getHost())
return clientEndpoint.connect(clientFactory)
d.addCallback(gotPort)
# Stop reactor when both connections are lost:
def failed(result):
log.err(result, "Connection setup failed.")
disconnected = gatherResults([serverProtocol._done, clientProtocol._done])
d.addCallback(lambda _: disconnected)
d.addErrback(failed)
d.addCallback(lambda _: needsRunningReactor(reactor, reactor.stop))
reactorBuilder.runReactor(reactor)
return reactor
def _getWriters(reactor):
"""
Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as
well.
"""
if IReactorFDSet.providedBy(reactor):
return reactor.getWriters()
elif "IOCP" in reactor.__class__.__name__:
return reactor.handles
else:
# Cannot tell what is going on.
raise Exception(f"Cannot find writers on {reactor!r}")
class _AcceptOneClient(ServerFactory):
"""
This factory fires a L{Deferred} with a protocol instance shortly after it
is constructed (hopefully long enough afterwards so that it has been
connected to a transport).
@ivar reactor: The reactor used to schedule the I{shortly}.
@ivar result: A L{Deferred} which will be fired with the protocol instance.
"""
def __init__(self, reactor, result):
self.reactor = reactor
self.result = result
def buildProtocol(self, addr):
protocol = ServerFactory.buildProtocol(self, addr)
self.reactor.callLater(0, self.result.callback, protocol)
return protocol
class _SimplePullProducer:
"""
A pull producer which writes one byte whenever it is resumed. For use by
C{test_unregisterProducerAfterDisconnect}.
"""
def __init__(self, consumer):
self.consumer = consumer
def stopProducing(self):
pass
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.consumer.write(b"x")
class Stop(ClientFactory):
"""
A client factory which stops a reactor when a connection attempt fails.
"""
failReason = None
def __init__(self, reactor):
self.reactor = reactor
def clientConnectionFailed(self, connector, reason):
self.failReason = reason
msg(f"Stop(CF) cCFailed: {reason.getErrorMessage()}")
self.reactor.stop()
class ClosingLaterProtocol(ConnectableProtocol):
"""
ClosingLaterProtocol exchanges one byte with its peer and then disconnects
itself. This is mostly a work-around for the fact that connectionMade is
called before the SSL handshake has completed.
"""
def __init__(self, onConnectionLost):
self.lostConnectionReason = None
self.onConnectionLost = onConnectionLost
def connectionMade(self):
msg("ClosingLaterProtocol.connectionMade")
def dataReceived(self, bytes):
msg(f"ClosingLaterProtocol.dataReceived {bytes!r}")
self.transport.loseConnection()
def connectionLost(self, reason):
msg("ClosingLaterProtocol.connectionLost")
self.lostConnectionReason = reason
self.onConnectionLost.callback(self)
class ConnectionTestsMixin:
"""
This mixin defines test methods which should apply to most L{ITransport}
implementations.
"""
endpoints: Optional[EndpointCreator] = None
def test_logPrefix(self):
"""
Client and server transports implement L{ILoggingContext.logPrefix} to
return a message reflecting the protocol they are running.
"""
class CustomLogPrefixProtocol(ConnectableProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = None
def connectionMade(self):
self.transport.write(b"a")
def logPrefix(self):
return self._prefix
def dataReceived(self, bytes):
self.system = context.get(ILogContext)["system"]
self.transport.write(b"b")
# Only close connection if both sides have received data, so
# that both sides have system set.
if b"b" in bytes:
self.transport.loseConnection()
client = CustomLogPrefixProtocol("Custom Client")
server = CustomLogPrefixProtocol("Custom Server")
runProtocolsWithReactor(self, server, client, self.endpoints)
self.assertIn("Custom Client", client.system)
self.assertIn("Custom Server", server.system)
def test_writeAfterDisconnect(self):
"""
After a connection is disconnected, L{ITransport.write} and
L{ITransport.writeSequence} are no-ops.
"""
reactor = self.buildReactor()
finished = []
serverConnectionLostDeferred = Deferred()
protocol = lambda: ClosingLaterProtocol(serverConnectionLostDeferred)
portDeferred = self.endpoints.server(reactor).listen(
ServerFactory.forProtocol(protocol)
)
def listening(port):
msg(f"Listening on {port.getHost()!r}")
endpoint = self.endpoints.client(reactor, port.getHost())
lostConnectionDeferred = Deferred()
protocol = lambda: ClosingLaterProtocol(lostConnectionDeferred)
client = endpoint.connect(ClientFactory.forProtocol(protocol))
def write(proto):
msg(f"About to write to {proto!r}")
proto.transport.write(b"x")
client.addCallbacks(write, lostConnectionDeferred.errback)
def disconnected(proto):
msg(f"{proto!r} disconnected")
proto.transport.write(b"some bytes to get lost")
proto.transport.writeSequence([b"some", b"more"])
finished.append(True)
lostConnectionDeferred.addCallback(disconnected)
serverConnectionLostDeferred.addCallback(disconnected)
return gatherResults([lostConnectionDeferred, serverConnectionLostDeferred])
def onListen():
portDeferred.addCallback(listening)
portDeferred.addErrback(err)
portDeferred.addCallback(lambda ignored: reactor.stop())
needsRunningReactor(reactor, onListen)
self.runReactor(reactor)
self.assertEqual(finished, [True, True])
def test_protocolGarbageAfterLostConnection(self):
"""
After the connection a protocol is being used for is closed, the
reactor discards all of its references to the protocol.
"""
lostConnectionDeferred = Deferred()
clientProtocol = ClosingLaterProtocol(lostConnectionDeferred)
clientRef = ref(clientProtocol)
reactor = self.buildReactor()
portDeferred = self.endpoints.server(reactor).listen(
ServerFactory.forProtocol(Protocol)
)
def listening(port):
msg(f"Listening on {port.getHost()!r}")
endpoint = self.endpoints.client(reactor, port.getHost())
client = endpoint.connect(ClientFactory.forProtocol(lambda: clientProtocol))
def disconnect(proto):
msg(f"About to disconnect {proto!r}")
proto.transport.loseConnection()
client.addCallback(disconnect)
client.addErrback(lostConnectionDeferred.errback)
return lostConnectionDeferred
def onListening():
portDeferred.addCallback(listening)
portDeferred.addErrback(err)
portDeferred.addBoth(lambda ignored: reactor.stop())
needsRunningReactor(reactor, onListening)
self.runReactor(reactor)
# Drop the reference and get the garbage collector to tell us if there
# are no references to the protocol instance left in the reactor.
clientProtocol = None
collect()
self.assertIsNone(clientRef())
class LogObserverMixin:
"""
Mixin for L{TestCase} subclasses which want to observe log events.
"""
def observe(self):
loggedMessages = []
log.addObserver(loggedMessages.append)
self.addCleanup(log.removeObserver, loggedMessages.append)
return loggedMessages
class BrokenContextFactory:
"""
A context factory with a broken C{getContext} method, for exercising the
error handling for such a case.
"""
message = "Some path was wrong maybe"
def getContext(self):
raise ValueError(self.message)
class StreamClientTestsMixin:
"""
This mixin defines tests applicable to SOCK_STREAM client implementations.
This must be mixed in to a L{ReactorBuilder
<twisted.internet.test.reactormixins.ReactorBuilder>} subclass, as it
depends on several of its methods.
Then the methods C{connect} and C{listen} must defined, defining a client
and a server communicating with each other.
"""
def test_interface(self):
"""
The C{connect} method returns an object providing L{IConnector}.
"""
reactor = self.buildReactor()
connector = self.connect(reactor, ClientFactory())
self.assertTrue(verifyObject(IConnector, connector))
def test_clientConnectionFailedStopsReactor(self):
"""
The reactor can be stopped by a client factory's
C{clientConnectionFailed} method.
"""
reactor = self.buildReactor()
needsRunningReactor(reactor, lambda: self.connect(reactor, Stop(reactor)))
self.runReactor(reactor)
def test_connectEvent(self):
"""
This test checks that we correctly get notifications event for a
client. This ought to prevent a regression under Windows using the
GTK2 reactor. See #3925.
"""
reactor = self.buildReactor()
self.listen(reactor, ServerFactory.forProtocol(Protocol))
connected = []
class CheckConnection(Protocol):
def connectionMade(self):
connected.append(self)
reactor.stop()
clientFactory = Stop(reactor)
clientFactory.protocol = CheckConnection
needsRunningReactor(reactor, lambda: self.connect(reactor, clientFactory))
reactor.run()
self.assertTrue(connected)
def test_unregisterProducerAfterDisconnect(self):
"""
If a producer is unregistered from a transport after the transport has
been disconnected (by the peer) and after C{loseConnection} has been
called, the transport is not re-added to the reactor as a writer as
would be necessary if the transport were still connected.
"""
reactor = self.buildReactor()
self.listen(reactor, ServerFactory.forProtocol(ClosingProtocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
writing = []
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, wait for the server to disconnect from us, and then
unregister the producer.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(
_SimplePullProducer(self.transport), False
)
self.transport.loseConnection()
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
self.unregister()
writing.append(self.transport in _getWriters(reactor))
finished.callback(None)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
self.connect(reactor, clientFactory)
self.runReactor(reactor)
self.assertFalse(writing[0], "Transport was writing after unregisterProducer.")
def test_disconnectWhileProducing(self):
"""
If C{loseConnection} is called while a producer is registered with the
transport, the connection is closed after the producer is unregistered.
"""
reactor = self.buildReactor()
# For some reason, pyobject/pygtk will not deliver the close
# notification that should happen after the unregisterProducer call in
# this test. The selectable is in the write notification set, but no
# notification ever arrives. Probably for the same reason #5233 led
# win32eventreactor to be broken.
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"A pygobject/pygtk bug disables this functionality " "on Windows."
)
class Producer:
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.listen(reactor, ServerFactory.forProtocol(Protocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, unregister the producer, and wait for the connection to
actually be lost.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(Producer(), False)
self.transport.loseConnection()
# Let the reactor tick over, in case synchronously calling
# loseConnection and then unregisterProducer is the same as
# synchronously calling unregisterProducer and then
# loseConnection (as it is in several reactors).
reactor.callLater(0, reactor.callLater, 0, self.unregister)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
# This should all be pretty quick. Fail the test
# if we don't get a connectionLost event really
# soon.
reactor.callLater(
1.0, finished.errback, Failure(Exception("Connection was not lost"))
)
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
finished.callback(None)
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
self.connect(reactor, clientFactory)
self.runReactor(reactor)
# If the test failed, we logged an error already and trial
# will catch it.

View File

@@ -0,0 +1,49 @@
This is a concatenation of thing1.pem and thing2.pem.
-----BEGIN CERTIFICATE-----
MIID6DCCAtACAwtEVjANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMCVFIxDzAN
BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv
Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb
QXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1
cml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0yMjA4MjMyMzUxMTVaGA8yMTIyMDcz
MDIzNTExNVowgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV
BAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3
aXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1
dGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j
b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxEgrHBfUQuzIyDaZM
RqMy7h8tFCaQ4+0EshqENOjf4AMpxFeyxdHhR0IPBvMDZ7FkWg/mh8NImD2BgfhC
Z8fuWIfUCmF/sA2BInfwMwJAKy28g6wpg+ZJGpyadKq0+OrN/fmT3wsaEP/wcOuD
Pqk6wKt6Ry7eF3p7obgHHVVyku7gGQ/8bxshWNtoFT8oyCsO54VluEnL2XTBKQsS
EQQKCk0RdIAo2kCuA4AE+SxFlCBp9XiIux6a/z1rHTewuuCeM3DgxTgHDstUHMFO
YF29JlESeBvBpjiKCKrJk8K+Szhwza4eSfoB+dTenZX9Z/u370tUvppkC4Gf62lL
fqndAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAERd0Gt9An/6gKqOaEvTvHIubSIi
BCN4/udreXSH19z32DksCPW9sTG93O7PX51T2GN0FKgF2AcNSl5aNpKxQexn3uBJ
F4nxM4AGv0ltkHzeJdltyCVQyzcYOxAHAGTTNWqaWsJezXngirpvFRE15OaJcMRA
M5ygRh52YKYS+DvhaRwPs5xsTSLaJtyGYmXoXu8zTcvqVyWWdqj4PEHkV/g7OFoS
Mc+0s22i7FMvMRJozHA8hHJv4Dg6it6ifvQiZh6ihEO+kTSb1cpDfyu3Uhw50dAW
23/mit+5faDT5g6lC5AG3yU/DOWFwJqXi73YhcggqTtWBufQfavq/2QmdXw=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIID6DCCAtACAwtEVjANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMCVFIxDzAN
BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv
Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb
QXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1
cml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0yMjA4MjMyMzUxMzdaGA8yMTIyMDcz
MDIzNTEzN1owgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV
BAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3
aXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1
dGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j
b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCB/ICkiDmAALdHzezV
fxg6/azHX9eymmBS2PEizw0QOkt+VriWu5Ik0KHDBP9NWuLiQF3G1Zch6YVkOfPU
vbvfCv4GbFdssnoVPX/0VOT/YEsEHtyCjgGo22FAE/vd4EcnnJx017B/5mzxLaf6
u9XwOZaIojw6I20iaKDjpfyvVBJA5gXH1YGrBYRLIqGaS4lbZcTy6ZoqanYMOmJn
kd3IBOJOaFwfazROVvaVkQQlP9SgAKwaEcEF8Hk/E4N5nVWS8QnN/PWhzouJSAtQ
WzXgLTZrUdDItBxlG+IeVSWdYUoBPFruZ33tdZta7zJ2FrrkxX27azd+nCIC6NLQ
h1DfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAGmx33QaOIw+dTzdBmPGZy5kZv6Y
a1z/rfyBtnR37iAZ+5rhJZkcQSLyUY+kiuEPxTouh3Amx8EOVccerfZvzCFAb21L
TOeSoeJ1TSn2ppIBqzjjdJ/JHcQTMEB/5D8S069Rp/D6wbgCbmJ2CJcU+OYY+W7z
fT6fEy7MmNXC3RwoD5Vc0RaS4fDSCMOG/bL/rylQoimmvm4rQAmZJK2PuKvqtkzc
Fs9dq4VJ3Eba1/qA2000CtzHHBYMalL9+EQxwjy0QmVL08usUSMiRPfySCUJBEYA
xZmEEvyHkD7dFVngJvjW64pFg7d/mV0e8dQv1QURoCDb7XOuCWY1vltp7iE=
-----END CERTIFICATE-----

View File

@@ -0,0 +1 @@
This file is not a certificate; it is present to make sure that it will be skipped.

View File

@@ -0,0 +1,27 @@
This certificate, and thing2.pem, are generated by re-running
twisted/test/thing1.pem and copying the certificate out (without its private
key) a few times.
-----BEGIN CERTIFICATE-----
MIID6DCCAtACAwtEVjANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMCVFIxDzAN
BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv
Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb
QXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1
cml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0yMjA4MjMyMzUxMTVaGA8yMTIyMDcz
MDIzNTExNVowgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV
BAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3
aXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1
dGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j
b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxEgrHBfUQuzIyDaZM
RqMy7h8tFCaQ4+0EshqENOjf4AMpxFeyxdHhR0IPBvMDZ7FkWg/mh8NImD2BgfhC
Z8fuWIfUCmF/sA2BInfwMwJAKy28g6wpg+ZJGpyadKq0+OrN/fmT3wsaEP/wcOuD
Pqk6wKt6Ry7eF3p7obgHHVVyku7gGQ/8bxshWNtoFT8oyCsO54VluEnL2XTBKQsS
EQQKCk0RdIAo2kCuA4AE+SxFlCBp9XiIux6a/z1rHTewuuCeM3DgxTgHDstUHMFO
YF29JlESeBvBpjiKCKrJk8K+Szhwza4eSfoB+dTenZX9Z/u370tUvppkC4Gf62lL
fqndAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAERd0Gt9An/6gKqOaEvTvHIubSIi
BCN4/udreXSH19z32DksCPW9sTG93O7PX51T2GN0FKgF2AcNSl5aNpKxQexn3uBJ
F4nxM4AGv0ltkHzeJdltyCVQyzcYOxAHAGTTNWqaWsJezXngirpvFRE15OaJcMRA
M5ygRh52YKYS+DvhaRwPs5xsTSLaJtyGYmXoXu8zTcvqVyWWdqj4PEHkV/g7OFoS
Mc+0s22i7FMvMRJozHA8hHJv4Dg6it6ifvQiZh6ihEO+kTSb1cpDfyu3Uhw50dAW
23/mit+5faDT5g6lC5AG3yU/DOWFwJqXi73YhcggqTtWBufQfavq/2QmdXw=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID6DCCAtACAwtEVjANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMCVFIxDzAN
BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv
Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb
QXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1
cml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0yMjA4MjMyMzUxMzdaGA8yMTIyMDcz
MDIzNTEzN1owgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV
BAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3
aXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1
dGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j
b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCB/ICkiDmAALdHzezV
fxg6/azHX9eymmBS2PEizw0QOkt+VriWu5Ik0KHDBP9NWuLiQF3G1Zch6YVkOfPU
vbvfCv4GbFdssnoVPX/0VOT/YEsEHtyCjgGo22FAE/vd4EcnnJx017B/5mzxLaf6
u9XwOZaIojw6I20iaKDjpfyvVBJA5gXH1YGrBYRLIqGaS4lbZcTy6ZoqanYMOmJn
kd3IBOJOaFwfazROVvaVkQQlP9SgAKwaEcEF8Hk/E4N5nVWS8QnN/PWhzouJSAtQ
WzXgLTZrUdDItBxlG+IeVSWdYUoBPFruZ33tdZta7zJ2FrrkxX27azd+nCIC6NLQ
h1DfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAGmx33QaOIw+dTzdBmPGZy5kZv6Y
a1z/rfyBtnR37iAZ+5rhJZkcQSLyUY+kiuEPxTouh3Amx8EOVccerfZvzCFAb21L
TOeSoeJ1TSn2ppIBqzjjdJ/JHcQTMEB/5D8S069Rp/D6wbgCbmJ2CJcU+OYY+W7z
fT6fEy7MmNXC3RwoD5Vc0RaS4fDSCMOG/bL/rylQoimmvm4rQAmZJK2PuKvqtkzc
Fs9dq4VJ3Eba1/qA2000CtzHHBYMalL9+EQxwjy0QmVL08usUSMiRPfySCUJBEYA
xZmEEvyHkD7dFVngJvjW64pFg7d/mV0e8dQv1QURoCDb7XOuCWY1vltp7iE=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID6DCCAtACAwtEVjANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMCVFIxDzAN
BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv
Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb
QXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1
cml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0yMjA4MjMyMzUxMzdaGA8yMTIyMDcz
MDIzNTEzN1owgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV
BAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3
aXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1
dGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j
b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCB/ICkiDmAALdHzezV
fxg6/azHX9eymmBS2PEizw0QOkt+VriWu5Ik0KHDBP9NWuLiQF3G1Zch6YVkOfPU
vbvfCv4GbFdssnoVPX/0VOT/YEsEHtyCjgGo22FAE/vd4EcnnJx017B/5mzxLaf6
u9XwOZaIojw6I20iaKDjpfyvVBJA5gXH1YGrBYRLIqGaS4lbZcTy6ZoqanYMOmJn
kd3IBOJOaFwfazROVvaVkQQlP9SgAKwaEcEF8Hk/E4N5nVWS8QnN/PWhzouJSAtQ
WzXgLTZrUdDItBxlG+IeVSWdYUoBPFruZ33tdZta7zJ2FrrkxX27azd+nCIC6NLQ
h1DfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAGmx33QaOIw+dTzdBmPGZy5kZv6Y
a1z/rfyBtnR37iAZ+5rhJZkcQSLyUY+kiuEPxTouh3Amx8EOVccerfZvzCFAb21L
TOeSoeJ1TSn2ppIBqzjjdJ/JHcQTMEB/5D8S069Rp/D6wbgCbmJ2CJcU+OYY+W7z
fT6fEy7MmNXC3RwoD5Vc0RaS4fDSCMOG/bL/rylQoimmvm4rQAmZJK2PuKvqtkzc
Fs9dq4VJ3Eba1/qA2000CtzHHBYMalL9+EQxwjy0QmVL08usUSMiRPfySCUJBEYA
xZmEEvyHkD7dFVngJvjW64pFg7d/mV0e8dQv1QURoCDb7XOuCWY1vltp7iE=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,63 @@
# -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Fake client and server endpoint string parser plugins for testing purposes.
"""
from zope.interface.declarations import implementer
from twisted.internet.interfaces import (
IStreamClientEndpoint,
IStreamClientEndpointStringParserWithReactor,
IStreamServerEndpoint,
IStreamServerEndpointStringParser,
)
from twisted.plugin import IPlugin
@implementer(IPlugin)
class PluginBase:
def __init__(self, pfx):
self.prefix = pfx
@implementer(IStreamClientEndpointStringParserWithReactor)
class FakeClientParserWithReactor(PluginBase):
def parseStreamClient(self, *a, **kw):
return StreamClient(self, a, kw)
@implementer(IStreamServerEndpointStringParser)
class FakeParser(PluginBase):
def parseStreamServer(self, *a, **kw):
return StreamServer(self, a, kw)
class EndpointBase:
def __init__(self, parser, args, kwargs):
self.parser = parser
self.args = args
self.kwargs = kwargs
@implementer(IStreamClientEndpoint)
class StreamClient(EndpointBase):
def connect(self, protocolFactory=None):
# IStreamClientEndpoint.connect
pass
@implementer(IStreamServerEndpoint)
class StreamServer(EndpointBase):
def listen(self, protocolFactory=None):
# IStreamClientEndpoint.listen
pass
# Instantiate plugin interface providers to register them.
fake = FakeParser("fake")
fakeClientWithReactor = FakeClientParserWithReactor("crfake")
fakeClientWithReactorAndPreference = FakeClientParserWithReactor("cpfake")

View File

@@ -0,0 +1,61 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Testing helpers related to the module system.
"""
__all__ = ["NoReactor", "AlternateReactor"]
import sys
import twisted.internet
from twisted.test.test_twisted import SetAsideModule
class NoReactor(SetAsideModule):
"""
Context manager that uninstalls the reactor, if any, and then restores it
afterwards.
"""
def __init__(self):
SetAsideModule.__init__(self, "twisted.internet.reactor")
def __enter__(self):
SetAsideModule.__enter__(self)
if "twisted.internet.reactor" in self.modules:
del twisted.internet.reactor
def __exit__(self, excType, excValue, traceback):
SetAsideModule.__exit__(self, excType, excValue, traceback)
# Clean up 'reactor' attribute that may have been set on
# twisted.internet:
reactor = self.modules.get("twisted.internet.reactor", None)
if reactor is not None:
twisted.internet.reactor = reactor
else:
try:
del twisted.internet.reactor
except AttributeError:
pass
class AlternateReactor(NoReactor):
"""
A context manager which temporarily installs a different object as the
global reactor.
"""
def __init__(self, reactor):
"""
@param reactor: Any object to install as the global reactor.
"""
NoReactor.__init__(self)
self.alternate = reactor
def __enter__(self):
NoReactor.__enter__(self)
twisted.internet.reactor = self.alternate
sys.modules["twisted.internet.reactor"] = self.alternate

View File

@@ -0,0 +1,22 @@
import os
import sys
try:
# On Windows, stdout is not opened in binary mode by default,
# so newline characters are munged on writing, interfering with
# the tests.
import msvcrt
msvcrt.setmode( # type:ignore[attr-defined]
sys.stdout.fileno(), os.O_BINARY
)
except ImportError:
pass
# Loop over each of the arguments given and print it to stdout
for arg in sys.argv[1:]:
res = arg + chr(0)
sys.stdout.buffer.write(res.encode(sys.getfilesystemencoding(), "surrogateescape"))
sys.stdout.flush()

View File

@@ -0,0 +1,8 @@
import os
import sys
while 1:
line = sys.stdin.readline().strip()
if not line:
break
os.close(int(line))

View File

@@ -0,0 +1,25 @@
import sys
# Override theSystemPath so it throws KeyError on gi.pygtkcompat:
from twisted.python import modules
from twisted.python.reflect import requireModule
modules.theSystemPath = modules.PythonPath([], moduleDict={})
# Now, when we import gireactor it shouldn't use pygtkcompat, and should
# instead prevent gobject from being importable:
gireactor = requireModule("twisted.internet.gireactor")
for name in gireactor._PYGTK_MODULES:
if sys.modules[name] is not None:
sys.stdout.write(
"failure, sys.modules[%r] is %r, instead of None"
% (name, sys.modules["gobject"])
)
sys.exit(0)
try:
import gobject
except ImportError:
sys.stdout.write("success")
else:
sys.stdout.write(f"failure: {gobject.__path__} was imported")

View File

@@ -0,0 +1,46 @@
# A program which exits after starting a child which inherits its
# stdin/stdout/stderr and keeps them open until stdin is closed.
import os
import sys
def grandchild() -> None:
sys.stdout.write("grandchild started")
sys.stdout.flush()
sys.stdin.read()
def main() -> None:
if sys.argv[1] == "child":
if sys.argv[2] == "windows":
import win32api as api
import win32process as proc
info = proc.STARTUPINFO()
info.hStdInput = api.GetStdHandle(api.STD_INPUT_HANDLE)
info.hStdOutput = api.GetStdHandle(api.STD_OUTPUT_HANDLE)
info.hStdError = api.GetStdHandle(api.STD_ERROR_HANDLE)
python = sys.executable
scriptDir = os.path.dirname(__file__)
scriptName = os.path.basename(__file__)
proc.CreateProcess(
None,
" ".join((python, scriptName, "grandchild")),
None,
None,
1,
0,
os.environ,
scriptDir,
info,
)
else:
if os.fork() == 0:
grandchild()
else:
grandchild()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,418 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities for unit testing reactor implementations.
The main feature of this module is L{ReactorBuilder}, a base class for use when
writing interface/blackbox tests for reactor implementations. Test case classes
for reactor features should subclass L{ReactorBuilder} instead of
L{SynchronousTestCase}. All of the features of L{SynchronousTestCase} will be
available. Additionally, the tests will automatically be applied to all
available reactor implementations.
"""
__all__ = ["TestTimeoutError", "ReactorBuilder", "needsRunningReactor"]
import os
import signal
import time
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Type, Union, cast
from zope.interface import Interface
from twisted.python import log
from twisted.python.deprecate import _fullyQualifiedName as fullyQualifiedName
from twisted.python.failure import Failure
from twisted.python.reflect import namedAny
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest, SynchronousTestCase
from twisted.trial.util import DEFAULT_TIMEOUT_DURATION, acquireAttribute
if TYPE_CHECKING:
# Only bring in this name to support the type annotation below. We don't
# really want to import a reactor module this early at runtime.
from twisted.internet import asyncioreactor
# Access private APIs.
try:
from twisted.internet import process as _process
except ImportError:
process = None
else:
process = _process
class TestTimeoutError(Exception):
"""
The reactor was still running after the timeout period elapsed in
L{ReactorBuilder.runReactor}.
"""
def needsRunningReactor(reactor, thunk):
"""
Various functions within these tests need an already-running reactor at
some point. They need to stop the reactor when the test has completed, and
that means calling reactor.stop(). However, reactor.stop() raises an
exception if the reactor isn't already running, so if the L{Deferred} that
a particular API under test returns fires synchronously (as especially an
endpoint's C{connect()} method may do, if the connect is to a local
interface address) then the test won't be able to stop the reactor being
tested and finish. So this calls C{thunk} only once C{reactor} is running.
(This is just an alias for
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
reactor parameter, in order to centrally reference the above paragraph and
repeating it everywhere as a comment.)
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
@param thunk: a 0-argument callable, which eventually finishes the test in
question, probably in a L{Deferred} callback.
"""
reactor.callWhenRunning(thunk)
def stopOnError(case, reactor, publisher=None):
"""
Stop the reactor as soon as any error is logged on the given publisher.
This is beneficial for tests which will wait for a L{Deferred} to fire
before completing (by passing or failing). Certain implementation bugs may
prevent the L{Deferred} from firing with any result at all (consider a
protocol's {dataReceived} method that raises an exception: this exception
is logged but it won't ever cause a L{Deferred} to fire). In that case the
test would have to complete by timing out which is a much less desirable
outcome than completing as soon as the unexpected error is encountered.
@param case: A L{SynchronousTestCase} to use to clean up the necessary log
observer when the test is over.
@param reactor: The reactor to stop.
@param publisher: A L{LogPublisher} to watch for errors. If L{None}, the
global log publisher will be watched.
"""
if publisher is None:
from twisted.python import log as publisher
running = [None]
def stopIfError(event):
if running and event.get("isError"):
running.pop()
reactor.stop()
publisher.addObserver(stopIfError)
case.addCleanup(publisher.removeObserver, stopIfError)
class ReactorBuilder:
"""
L{SynchronousTestCase} mixin which provides a reactor-creation API. This
mixin defines C{setUp} and C{tearDown}, so mix it in before
L{SynchronousTestCase} or call its methods from the overridden ones in the
subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, L{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
L{SynchronousTestCase}s will be created.
"""
_reactors = [
# Select works everywhere
"twisted.internet.selectreactor.SelectReactor",
]
if platform.isWindows():
# PortableGtkReactor is only really interesting on Windows,
# but not really Windows specific; if you want you can
# temporarily move this up to the all-platforms list to test
# it on other platforms. It's not there in general because
# it's not _really_ worth it to support on other platforms,
# since no one really wants to use it on other platforms.
_reactors.extend(
[
"twisted.internet.gireactor.PortableGIReactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor",
]
)
else:
_reactors.extend(
[
"twisted.internet.gireactor.GIReactor",
]
)
_reactors.append("twisted.internet.test.reactormixins.AsyncioSelectorReactor")
_reactors.append("twisted.internet._threadedselect.ThreadedSelectReactor")
if platform.isMacOSX():
_reactors.append("twisted.internet.cfreactor.CFReactor")
else:
_reactors.extend(
[
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor",
]
)
if not platform.isLinux():
# Presumably Linux is not going to start supporting kqueue, so
# skip even trying this configuration.
_reactors.extend(
[
# Support KQueue on non-OS-X POSIX platforms for now.
"twisted.internet.kqreactor.KQueueReactor",
]
)
reactorFactory: Optional[Callable[[], object]] = None
originalHandler = None
requiredInterfaces: Optional[Sequence[Type[Interface]]] = None
skippedReactors: Dict[str, str] = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if not platform.isWindows():
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
begin = time.time()
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r"
% (process.reapProcessHandlers,)
)
process.reapAllProcesses()
# The process should exit on its own. However, if it
# doesn't, we're stuck in this loop forever. To avoid
# hanging the test suite, eventually give the process some
# help exiting and move on.
time.sleep(0.001)
if time.time() - begin > 60:
for pid in process.reapProcessHandlers:
os.kill(pid, signal.SIGKILL)
raise Exception(
"Timeout waiting for child processes to exit: %r"
% (process.reapProcessHandlers,)
)
def _unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, "_internalReaders", None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
# Restore the original reactor state:
from twisted.internet import reactor as globalReactor
globalReactor.__dict__ = reactor._originalReactorDict
globalReactor.__class__ = reactor._originalReactorClass
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet import reactor as globalReactor
from twisted.internet.cfreactor import CFReactor
except ImportError:
pass
else:
if (
isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor
):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself"
)
try:
assert self.reactorFactory is not None
reactor = self.reactorFactory()
reactor._originalReactorDict = globalReactor.__dict__
reactor._originalReactorClass = globalReactor.__class__
# Make twisted.internet.reactor point to the new reactor,
# temporarily; this is undone in unbuildReactor().
globalReactor.__dict__ = reactor.__dict__
globalReactor.__class__ = reactor.__class__
except BaseException:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = [
required
for required in self.requiredInterfaces
if not required.providedBy(reactor)
]
if missing:
self._unbuildReactor(reactor)
raise SkipTest(
"%s does not provide %s"
% (
fullyQualifiedName(reactor.__class__),
",".join([fullyQualifiedName(x) for x in missing]),
)
)
self.addCleanup(self._unbuildReactor, reactor)
return reactor
def getTimeout(self):
"""
Determine how long to run the test before considering it failed.
@return: A C{int} or C{float} giving a number of seconds.
"""
return acquireAttribute(self._parents, "timeout", DEFAULT_TIMEOUT_DURATION)
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If L{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TestTimeoutError: If the reactor is still running after
C{timeout} seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
timedOutCall = reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TestTimeoutError(f"reactor still running after {timeout} seconds")
else:
timedOutCall.cancel()
@classmethod
def makeTestCaseClasses(
cls: Type["ReactorBuilder"],
) -> Dict[str, Union[Type["ReactorBuilder"], Type[SynchronousTestCase]]]:
"""
Create a L{SynchronousTestCase} subclass which mixes in C{cls} for each
known reactor and return a dict mapping their names to them.
"""
classes: Dict[
str, Union[Type["ReactorBuilder"], Type[SynchronousTestCase]]
] = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName + "Tests").replace(".", "_")
class testcase(cls, SynchronousTestCase): # type: ignore[valid-type,misc]
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except BaseException:
skip = Failure().getErrorMessage()
testcase.__name__ = name
testcase.__qualname__ = ".".join(cls.__qualname__.split()[0:-1] + [name])
classes[testcase.__name__] = testcase
return classes
def asyncioSelectorReactor(self: object) -> "asyncioreactor.AsyncioSelectorReactor":
"""
Make a new asyncio reactor associated with a new event loop.
The test suite prefers this constructor because having a new event loop
for each reactor provides better test isolation. The real constructor
prefers to re-use (or create) a global loop because of how this interacts
with other asyncio-based libraries and applications (though maybe it
shouldn't).
@param self: The L{ReactorBuilder} subclass this is being called on. We
don't use this parameter but we get called with it anyway.
"""
from asyncio import get_event_loop, new_event_loop, set_event_loop
from twisted.internet import asyncioreactor
asTestCase = cast(SynchronousTestCase, self)
originalLoop = get_event_loop()
loop = new_event_loop()
set_event_loop(loop)
@asTestCase.addCleanup
def cleanUp():
loop.close()
set_event_loop(originalLoop)
return asyncioreactor.AsyncioSelectorReactor(loop)
# Give it an alias that makes the names of the generated test classes fit the
# pattern.
AsyncioSelectorReactor = asyncioSelectorReactor

View File

@@ -0,0 +1,66 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.abstract}, a collection of APIs for implementing
reactors.
"""
from __future__ import annotations
from twisted.internet.abstract import isIPv6Address
from twisted.trial.unittest import SynchronousTestCase
class IPv6AddressTests(SynchronousTestCase):
"""
Tests for L{isIPv6Address}, a function for determining if a particular
string is an IPv6 address literal.
"""
def test_empty(self) -> None:
"""
The empty string is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(""))
def test_colon(self) -> None:
"""
A single C{":"} is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(":"))
def test_loopback(self) -> None:
"""
C{"::1"} is the IPv6 loopback address literal.
"""
self.assertTrue(isIPv6Address("::1"))
def test_scopeID(self) -> None:
"""
An otherwise valid IPv6 address literal may also include a C{"%"}
followed by an arbitrary scope identifier.
"""
self.assertTrue(isIPv6Address("fe80::1%eth0"))
self.assertTrue(isIPv6Address("fe80::2%1"))
self.assertTrue(isIPv6Address("fe80::3%en2"))
def test_invalidWithScopeID(self) -> None:
"""
An otherwise invalid IPv6 address literal is still invalid with a
trailing scope identifier.
"""
self.assertFalse(isIPv6Address("%eth0"))
self.assertFalse(isIPv6Address(":%eth0"))
self.assertFalse(isIPv6Address("hello%eth0"))
def test_unicodeAndBytes(self) -> None:
"""
L{isIPv6Address} evaluates ASCII-encoded bytes as well as text.
"""
# the type annotation only supports str, but bytes is supported at
# runtime
self.assertTrue(isIPv6Address(b"fe80::2%1")) # type: ignore[arg-type]
self.assertTrue(isIPv6Address("fe80::2%1"))
self.assertFalse(isIPv6Address("\u4321"))
self.assertFalse(isIPv6Address("hello%eth0"))
self.assertFalse(isIPv6Address(b"hello%eth0")) # type: ignore[arg-type]

View File

@@ -0,0 +1,249 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import os
import socket
from unittest import skipIf
from twisted.internet.address import (
HostnameAddress,
IPv4Address,
IPv6Address,
UNIXAddress,
)
from twisted.python.compat import nativeString
from twisted.python.runtime import platform
from twisted.trial.unittest import SynchronousTestCase, TestCase
symlinkSkip = not platform._supportsSymlinks()
try:
socket.AF_UNIX
except AttributeError:
unixSkip = True
else:
unixSkip = False
class AddressTestCaseMixin:
def test_addressComparison(self):
"""
Two different address instances, sharing the same properties are
considered equal by C{==} and not considered not equal by C{!=}.
Note: When applied via UNIXAddress class, this uses the same
filename for both objects being compared.
"""
self.assertTrue(self.buildAddress() == self.buildAddress())
self.assertFalse(self.buildAddress() != self.buildAddress())
def test_hash(self):
"""
C{__hash__} can be used to get a hash of an address, allowing
addresses to be used as keys in dictionaries, for instance.
"""
addr = self.buildAddress()
d = {addr: True}
self.assertTrue(d[self.buildAddress()])
def test_differentNamesComparison(self):
"""
Check that comparison operators work correctly on address objects
when a different name is passed in
"""
self.assertFalse(self.buildAddress() == self.buildDifferentAddress())
self.assertFalse(self.buildDifferentAddress() == self.buildAddress())
self.assertTrue(self.buildAddress() != self.buildDifferentAddress())
self.assertTrue(self.buildDifferentAddress() != self.buildAddress())
def assertDeprecations(self, testMethod, message):
"""
Assert that the a DeprecationWarning with the given message was
emitted against the given method.
"""
warnings = self.flushWarnings([testMethod])
self.assertEqual(warnings[0]["category"], DeprecationWarning)
self.assertEqual(warnings[0]["message"], message)
self.assertEqual(len(warnings), 1)
class IPv4AddressTestCaseMixin(AddressTestCaseMixin):
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
class HostnameAddressTests(TestCase, AddressTestCaseMixin):
"""
Test case for L{HostnameAddress}.
"""
addressArgSpec = (("hostname", "%s"), ("port", "%d"))
def buildAddress(self):
"""
Create an arbitrary new L{HostnameAddress} instance.
@return: A L{HostnameAddress} instance.
"""
return HostnameAddress(b"example.com", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different hostname.
@return: A L{HostnameAddress} instance.
"""
return HostnameAddress(b"example.net", 0)
class IPv4AddressTCPTests(SynchronousTestCase, IPv4AddressTestCaseMixin):
def buildAddress(self):
"""
Create an arbitrary new L{IPv4Address} instance with a C{"TCP"}
type. A new instance is created for each call, but always for the
same address.
"""
return IPv4Address("TCP", "127.0.0.1", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return IPv4Address("TCP", "127.0.0.2", 0)
class IPv4AddressUDPTests(SynchronousTestCase, IPv4AddressTestCaseMixin):
def buildAddress(self):
"""
Create an arbitrary new L{IPv4Address} instance with a C{"UDP"}
type. A new instance is created for each call, but always for the
same address.
"""
return IPv4Address("UDP", "127.0.0.1", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return IPv4Address("UDP", "127.0.0.2", 0)
class IPv6AddressTests(SynchronousTestCase, AddressTestCaseMixin):
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
def buildAddress(self):
"""
Create an arbitrary new L{IPv6Address} instance with a C{"TCP"}
type. A new instance is created for each call, but always for the
same address.
"""
return IPv6Address("TCP", "::1", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return IPv6Address("TCP", "::2", 0)
@skipIf(unixSkip, "Platform doesn't support UNIX sockets.")
class UNIXAddressTests(SynchronousTestCase):
addressArgSpec = (("name", "%r"),)
def setUp(self):
self._socketAddress = self.mktemp()
self._otherAddress = self.mktemp()
def buildAddress(self):
"""
Create an arbitrary new L{UNIXAddress} instance. A new instance is
created for each call, but always for the same address.
"""
return UNIXAddress(self._socketAddress)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return UNIXAddress(self._otherAddress)
def test_repr(self):
"""
The repr of L{UNIXAddress} returns with the filename that the
L{UNIXAddress} is for.
"""
self.assertEqual(
repr(self.buildAddress()),
"UNIXAddress('%s')" % (nativeString(self._socketAddress)),
)
@skipIf(symlinkSkip, "Platform does not support symlinks")
def test_comparisonOfLinkedFiles(self):
"""
UNIXAddress objects compare as equal if they link to the same file.
"""
linkName = self.mktemp()
with open(self._socketAddress, "w") as self.fd:
os.symlink(os.path.abspath(self._socketAddress), linkName)
self.assertEqual(UNIXAddress(self._socketAddress), UNIXAddress(linkName))
self.assertEqual(UNIXAddress(linkName), UNIXAddress(self._socketAddress))
@skipIf(symlinkSkip, "Platform does not support symlinks")
def test_hashOfLinkedFiles(self):
"""
UNIXAddress Objects that compare as equal have the same hash value.
"""
linkName = self.mktemp()
with open(self._socketAddress, "w") as self.fd:
os.symlink(os.path.abspath(self._socketAddress), linkName)
self.assertEqual(
hash(UNIXAddress(self._socketAddress)), hash(UNIXAddress(linkName))
)
@skipIf(unixSkip, "platform doesn't support UNIX sockets.")
class EmptyUNIXAddressTests(SynchronousTestCase, AddressTestCaseMixin):
"""
Tests for L{UNIXAddress} operations involving a L{None} address.
"""
addressArgSpec = (("name", "%r"),)
def setUp(self):
self._socketAddress = self.mktemp()
def buildAddress(self):
"""
Create an arbitrary new L{UNIXAddress} instance. A new instance is
created for each call, but always for the same address. This builds it
with a fixed address of L{None}.
"""
return UNIXAddress(None)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a random temporary directory.
"""
return UNIXAddress(self._socketAddress)
@skipIf(symlinkSkip, "Platform does not support symlinks")
def test_comparisonOfLinkedFiles(self):
"""
A UNIXAddress referring to a L{None} address does not
compare equal to a UNIXAddress referring to a symlink.
"""
linkName = self.mktemp()
with open(self._socketAddress, "w") as self.fd:
os.symlink(os.path.abspath(self._socketAddress), linkName)
self.assertNotEqual(UNIXAddress(self._socketAddress), UNIXAddress(None))
self.assertNotEqual(UNIXAddress(None), UNIXAddress(self._socketAddress))
def test_emptyHash(self):
"""
C{__hash__} can be used to get a hash of an address, even one referring
to L{None} rather than a real path.
"""
addr = self.buildAddress()
d = {addr: True}
self.assertTrue(d[self.buildAddress()])

View File

@@ -0,0 +1,296 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.asyncioreactor}.
"""
import gc
import sys
from asyncio import (
AbstractEventLoop,
AbstractEventLoopPolicy,
DefaultEventLoopPolicy,
Future,
SelectorEventLoop,
get_event_loop,
get_event_loop_policy,
set_event_loop,
set_event_loop_policy,
)
from unittest import skipIf
from twisted.internet.asyncioreactor import AsyncioSelectorReactor
from twisted.python.runtime import platform
from twisted.trial.unittest import SynchronousTestCase
from .reactormixins import ReactorBuilder
hasWindowsProactorEventLoopPolicy = False
hasWindowsSelectorEventLoopPolicy = False
try:
if sys.platform.startswith("win32"):
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
hasWindowsProactorEventLoopPolicy = True
hasWindowsSelectorEventLoopPolicy = True
except ImportError:
pass
_defaultEventLoop = DefaultEventLoopPolicy().new_event_loop()
_defaultEventLoopIsSelector = isinstance(_defaultEventLoop, SelectorEventLoop)
_defaultEventLoop.close()
class AsyncioSelectorReactorTests(ReactorBuilder, SynchronousTestCase):
"""
L{AsyncioSelectorReactor} tests.
"""
def assertReactorWorksWithAsyncioFuture(self, reactor):
"""
Ensure that C{reactor} has an event loop that works
properly with L{asyncio.Future}.
"""
future = Future()
result = []
def completed(future):
result.append(future.result())
reactor.stop()
future.add_done_callback(completed)
future.set_result(True)
self.assertEqual(result, [])
self.runReactor(reactor, timeout=1)
self.assertEqual(result, [True])
def newLoop(self, policy: AbstractEventLoopPolicy) -> AbstractEventLoop:
"""
Make a new asyncio loop from a policy for use with a reactor, and add
appropriate cleanup to restore any global state.
"""
existingLoop = get_event_loop()
existingPolicy = get_event_loop_policy()
result = policy.new_event_loop()
@self.addCleanup
def cleanUp():
result.close()
set_event_loop(existingLoop)
set_event_loop_policy(existingPolicy)
return result
@skipIf(
not _defaultEventLoopIsSelector,
"default event loop: {}\nis not of type SelectorEventLoop "
"on Python {}.{} ({})".format(
type(_defaultEventLoop),
sys.version_info.major,
sys.version_info.minor,
platform.getType(),
),
)
def test_defaultSelectorEventLoopFromGlobalPolicy(self):
"""
L{AsyncioSelectorReactor} wraps the global policy's event loop
by default. This ensures that L{asyncio.Future}s and
coroutines created by library code that uses
L{asyncio.get_event_loop} are bound to the same loop.
"""
reactor = AsyncioSelectorReactor()
self.assertReactorWorksWithAsyncioFuture(reactor)
@skipIf(
not _defaultEventLoopIsSelector,
"default event loop: {}\nis not of type SelectorEventLoop "
"on Python {}.{} ({})".format(
type(_defaultEventLoop),
sys.version_info.major,
sys.version_info.minor,
platform.getType(),
),
)
def test_newSelectorEventLoopFromDefaultEventLoopPolicy(self):
"""
If we use the L{asyncio.DefaultLoopPolicy} to create a new event loop,
and then pass that event loop to a new L{AsyncioSelectorReactor},
this reactor should work properly with L{asyncio.Future}.
"""
event_loop = self.newLoop(DefaultEventLoopPolicy())
reactor = AsyncioSelectorReactor(event_loop)
set_event_loop(event_loop)
self.assertReactorWorksWithAsyncioFuture(reactor)
@skipIf(
_defaultEventLoopIsSelector,
"default event loop: {}\nis of type SelectorEventLoop "
"on Python {}.{} ({})".format(
type(_defaultEventLoop),
sys.version_info.major,
sys.version_info.minor,
platform.getType(),
),
)
def test_defaultNotASelectorEventLoopFromGlobalPolicy(self):
"""
On Windows Python 3.5 to 3.7, L{get_event_loop()} returns a
L{WindowsSelectorEventLoop} by default.
On Windows Python 3.8+, L{get_event_loop()} returns a
L{WindowsProactorEventLoop} by default.
L{AsyncioSelectorReactor} should raise a
L{TypeError} if the default event loop is not a
L{WindowsSelectorEventLoop}.
"""
self.assertRaises(TypeError, AsyncioSelectorReactor)
@skipIf(
not hasWindowsProactorEventLoopPolicy, "WindowsProactorEventLoop not available"
)
def test_WindowsProactorEventLoop(self):
"""
L{AsyncioSelectorReactor} will raise a L{TypeError}
if instantiated with a L{asyncio.WindowsProactorEventLoop}
"""
event_loop = self.newLoop(WindowsProactorEventLoopPolicy())
self.assertRaises(TypeError, AsyncioSelectorReactor, event_loop)
@skipIf(
not hasWindowsSelectorEventLoopPolicy,
"WindowsSelectorEventLoop only on Windows",
)
def test_WindowsSelectorEventLoop(self):
"""
L{WindowsSelectorEventLoop} works with L{AsyncioSelectorReactor}
"""
event_loop = self.newLoop(WindowsSelectorEventLoopPolicy())
reactor = AsyncioSelectorReactor(event_loop)
set_event_loop(event_loop)
self.assertReactorWorksWithAsyncioFuture(reactor)
@skipIf(
not hasWindowsProactorEventLoopPolicy,
"WindowsProactorEventLoopPolicy only on Windows",
)
def test_WindowsProactorEventLoopPolicy(self):
"""
L{AsyncioSelectorReactor} will raise a L{TypeError}
if L{asyncio.WindowsProactorEventLoopPolicy} is default.
"""
set_event_loop_policy(WindowsProactorEventLoopPolicy())
self.addCleanup(lambda: set_event_loop_policy(None))
with self.assertRaises(TypeError):
AsyncioSelectorReactor()
@skipIf(
not hasWindowsSelectorEventLoopPolicy,
"WindowsSelectorEventLoopPolicy only on Windows",
)
def test_WindowsSelectorEventLoopPolicy(self):
"""
L{AsyncioSelectorReactor} will work if
if L{asyncio.WindowsSelectorEventLoopPolicy} is default.
"""
set_event_loop_policy(WindowsSelectorEventLoopPolicy())
self.addCleanup(lambda: set_event_loop_policy(None))
reactor = AsyncioSelectorReactor()
self.assertReactorWorksWithAsyncioFuture(reactor)
def test_seconds(self):
"""L{seconds} should return a plausible epoch time."""
if hasWindowsSelectorEventLoopPolicy:
set_event_loop_policy(WindowsSelectorEventLoopPolicy())
self.addCleanup(lambda: set_event_loop_policy(None))
reactor = AsyncioSelectorReactor()
result = reactor.seconds()
# greater than 2020-01-01
self.assertGreater(result, 1577836800)
# less than 2120-01-01
self.assertLess(result, 4733510400)
def test_delayedCallResetToLater(self):
"""
L{DelayedCall.reset()} properly reschedules timer to later time
"""
if hasWindowsSelectorEventLoopPolicy:
set_event_loop_policy(WindowsSelectorEventLoopPolicy())
self.addCleanup(lambda: set_event_loop_policy(None))
reactor = AsyncioSelectorReactor()
timer_called_at = [None]
def on_timer():
timer_called_at[0] = reactor.seconds()
start_time = reactor.seconds()
dc = reactor.callLater(0, on_timer)
dc.reset(0.5)
reactor.callLater(1, reactor.stop)
reactor.run()
self.assertIsNotNone(timer_called_at[0])
self.assertGreater(timer_called_at[0] - start_time, 0.4)
def test_delayedCallResetToEarlier(self):
"""
L{DelayedCall.reset()} properly reschedules timer to earlier time
"""
if hasWindowsSelectorEventLoopPolicy:
set_event_loop_policy(WindowsSelectorEventLoopPolicy())
reactor = AsyncioSelectorReactor()
timer_called_at = [None]
def on_timer():
timer_called_at[0] = reactor.seconds()
start_time = reactor.seconds()
dc = reactor.callLater(0.5, on_timer)
dc.reset(0)
reactor.callLater(1, reactor.stop)
import io
from contextlib import redirect_stderr
stderr = io.StringIO()
with redirect_stderr(stderr):
reactor.run()
self.assertEqual(stderr.getvalue(), "")
self.assertIsNotNone(timer_called_at[0])
self.assertLess(timer_called_at[0] - start_time, 0.4)
if hasWindowsSelectorEventLoopPolicy:
set_event_loop_policy(None)
def test_noCycleReferencesInCallLater(self):
"""
L{AsyncioSelectorReactor.callLater()} doesn't leave cyclic references
"""
if hasWindowsSelectorEventLoopPolicy:
set_event_loop_policy(WindowsSelectorEventLoopPolicy())
gc_was_enabled = gc.isenabled()
gc.disable()
try:
objects_before = len(gc.get_objects())
timer_count = 1000
reactor = AsyncioSelectorReactor()
for _ in range(timer_count):
reactor.callLater(0, lambda: None)
reactor.runUntilCurrent()
objects_after = len(gc.get_objects())
self.assertLess((objects_after - objects_before) / timer_count, 1)
finally:
if gc_was_enabled:
gc.enable()
if hasWindowsSelectorEventLoopPolicy:
set_event_loop_policy(None)

View File

@@ -0,0 +1,472 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.base}.
"""
import socket
from queue import Queue
from typing import Callable
from unittest import skipIf
from zope.interface import implementer
from typing_extensions import ParamSpec
from twisted.internet._resolver import FirstOneWins
from twisted.internet.base import DelayedCall, ReactorBase, ThreadedResolver
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorThreads, IReactorTime, IResolverSimple
from twisted.internet.task import Clock
from twisted.python.threadpool import ThreadPool
from twisted.trial.unittest import SkipTest, TestCase
try:
import signal as _signal
except ImportError:
signal = None
else:
signal = _signal
_P = ParamSpec("_P")
@implementer(IReactorTime, IReactorThreads)
class FakeReactor:
"""
A fake reactor implementation which just supports enough reactor APIs for
L{ThreadedResolver}.
"""
def __init__(self):
self._clock = Clock()
self.callLater = self._clock.callLater
self._threadpool = ThreadPool()
self._threadpool.start()
self.getThreadPool = lambda: self._threadpool
self._threadCalls = Queue()
def callFromThread(
self, callable: Callable[_P, object], *args: _P.args, **kwargs: _P.kwargs
) -> None:
self._threadCalls.put((callable, args, kwargs))
def _runThreadCalls(self):
f, args, kwargs = self._threadCalls.get()
f(*args, **kwargs)
def _stop(self):
self._threadpool.stop()
def getDelayedCalls(self):
# IReactorTime.getDelayedCalls
pass
def seconds(self) -> float: # type: ignore[empty-body]
# IReactorTime.seconds
pass
def callInThread(
self, callable: Callable[_P, object], *args: _P.args, **kwargs: _P.kwargs
) -> None:
# IReactorInThreads.callInThread
pass
def suggestThreadPoolSize(self, size):
# IReactorThreads.suggestThreadPoolSize
pass
class ThreadedResolverTests(TestCase):
"""
Tests for L{ThreadedResolver}.
"""
def test_success(self):
"""
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires
with the value returned by the call to L{socket.gethostbyname} in the
threadpool of the reactor passed to L{ThreadedResolver.__init__}.
"""
ip = "10.0.0.17"
name = "foo.bar.example.com"
timeout = 30
reactor = FakeReactor()
self.addCleanup(reactor._stop)
lookedUp = []
resolvedTo = []
def fakeGetHostByName(name):
lookedUp.append(name)
return ip
self.patch(socket, "gethostbyname", fakeGetHostByName)
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName(name, (timeout,))
d.addCallback(resolvedTo.append)
reactor._runThreadCalls()
self.assertEqual(lookedUp, [name])
self.assertEqual(resolvedTo, [ip])
# Make sure that any timeout-related stuff gets cleaned up.
reactor._clock.advance(timeout + 1)
self.assertEqual(reactor._clock.calls, [])
def test_failure(self):
"""
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires a
L{Failure} if the call to L{socket.gethostbyname} raises an exception.
"""
timeout = 30
reactor = FakeReactor()
self.addCleanup(reactor._stop)
def fakeGetHostByName(name):
raise OSError("ENOBUFS (this is a funny joke)")
self.patch(socket, "gethostbyname", fakeGetHostByName)
failedWith = []
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName("some.name", (timeout,))
self.assertFailure(d, DNSLookupError)
d.addCallback(failedWith.append)
reactor._runThreadCalls()
self.assertEqual(len(failedWith), 1)
# Make sure that any timeout-related stuff gets cleaned up.
reactor._clock.advance(timeout + 1)
self.assertEqual(reactor._clock.calls, [])
def test_timeout(self):
"""
If L{socket.gethostbyname} does not complete before the specified
timeout elapsed, the L{Deferred} returned by
L{ThreadedResolver.getHostByName} fails with L{DNSLookupError}.
"""
timeout = 10
reactor = FakeReactor()
self.addCleanup(reactor._stop)
result = Queue()
def fakeGetHostByName(name):
raise result.get()
self.patch(socket, "gethostbyname", fakeGetHostByName)
failedWith = []
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName("some.name", (timeout,))
self.assertFailure(d, DNSLookupError)
d.addCallback(failedWith.append)
reactor._clock.advance(timeout - 1)
self.assertEqual(failedWith, [])
reactor._clock.advance(1)
self.assertEqual(len(failedWith), 1)
# Eventually the socket.gethostbyname does finish - in this case, with
# an exception. Nobody cares, though.
result.put(IOError("The I/O was errorful"))
def test_resolverGivenStr(self):
"""
L{ThreadedResolver.getHostByName} is passed L{str}, encoded using IDNA
if required.
"""
calls = []
@implementer(IResolverSimple)
class FakeResolver:
def getHostByName(self, name, timeouts=()):
calls.append(name)
return Deferred()
class JustEnoughReactor(ReactorBase):
def installWaker(self):
pass
fake = FakeResolver()
reactor = JustEnoughReactor()
reactor.installResolver(fake)
rec = FirstOneWins(Deferred())
reactor.nameResolver.resolveHostName(rec, "example.example")
reactor.nameResolver.resolveHostName(rec, "example.example")
reactor.nameResolver.resolveHostName(rec, "v\xe4\xe4ntynyt.example")
reactor.nameResolver.resolveHostName(rec, "\u0440\u0444.example")
reactor.nameResolver.resolveHostName(rec, "xn----7sbb4ac0ad0be6cf.xn--p1ai")
self.assertEqual(len(calls), 5)
self.assertEqual(list(map(type, calls)), [str] * 5)
self.assertEqual("example.example", calls[0])
self.assertEqual("example.example", calls[1])
self.assertEqual("xn--vntynyt-5waa.example", calls[2])
self.assertEqual("xn--p1ai.example", calls[3])
self.assertEqual("xn----7sbb4ac0ad0be6cf.xn--p1ai", calls[4])
def nothing():
"""
Function used by L{DelayedCallTests.test_str}.
"""
class DelayedCallMixin:
"""
L{DelayedCall}
"""
def _getDelayedCallAt(self, time):
"""
Get a L{DelayedCall} instance at a given C{time}.
@param time: The absolute time at which the returned L{DelayedCall}
will be scheduled.
"""
def noop(call):
pass
return DelayedCall(time, lambda: None, (), {}, noop, noop, None)
def setUp(self):
"""
Create two L{DelayedCall} instanced scheduled to run at different
times.
"""
self.zero = self._getDelayedCallAt(0)
self.one = self._getDelayedCallAt(1)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
L{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
dc = DelayedCall(12, nothing, (3,), {"A": 5}, None, None, lambda: 1.5)
self.assertEqual(
str(dc),
"<DelayedCall 0x%x [10.5s] called=0 cancelled=0 nothing(3, A=5)>"
% (id(dc),),
)
def test_repr(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
{repr}, is identical to that returned by L{str}.
"""
dc = DelayedCall(13, nothing, (6,), {"A": 9}, None, None, lambda: 1.6)
self.assertEqual(str(dc), repr(dc))
def test_lt(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a < b} is true
if and only if C{a} is scheduled to run before C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(zero < one)
self.assertFalse(one < zero)
self.assertFalse(zero < zero)
self.assertFalse(one < one)
def test_le(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a <= b} is true
if and only if C{a} is scheduled to run before C{b} or at the same
time as C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(zero <= one)
self.assertFalse(one <= zero)
self.assertTrue(zero <= zero)
self.assertTrue(one <= one)
def test_gt(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
if and only if C{a} is scheduled to run after C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(one > zero)
self.assertFalse(zero > one)
self.assertFalse(zero > zero)
self.assertFalse(one > one)
def test_ge(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
if and only if C{a} is scheduled to run after C{b} or at the same
time as C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(one >= zero)
self.assertFalse(zero >= one)
self.assertTrue(zero >= zero)
self.assertTrue(one >= one)
def test_eq(self):
"""
A L{DelayedCall} instance is only equal to itself.
"""
# Explicitly use == here, instead of assertEqual, to be more
# confident __eq__ is being tested.
self.assertFalse(self.zero == self.one)
self.assertTrue(self.zero == self.zero)
self.assertTrue(self.one == self.one)
def test_ne(self):
"""
A L{DelayedCall} instance is not equal to any other object.
"""
# Explicitly use != here, instead of assertEqual, to be more
# confident __ne__ is being tested.
self.assertTrue(self.zero != self.one)
self.assertFalse(self.zero != self.zero)
self.assertFalse(self.one != self.one)
class DelayedCallNoDebugTests(DelayedCallMixin, TestCase):
"""
L{DelayedCall}
"""
def setUp(self):
"""
Turn debug off.
"""
self.patch(DelayedCall, "debug", False)
DelayedCallMixin.setUp(self)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
L{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
dc = DelayedCall(12, nothing, (3,), {"A": 5}, None, None, lambda: 1.5)
expected = (
"<DelayedCall 0x{:x} [10.5s] called=0 cancelled=0 "
"nothing(3, A=5)>".format(id(dc))
)
self.assertEqual(str(dc), expected)
def test_switchToDebug(self):
"""
If L{DelayedCall.debug} changes from C{0} to C{1} between
L{DelayeCall.__init__} and L{DelayedCall.__repr__} then
L{DelayedCall.__repr__} returns a string that does not include the
creator stack.
"""
dc = DelayedCall(3, lambda: None, (), {}, nothing, nothing, lambda: 2)
dc.debug = 1
self.assertNotIn("traceback at creation", repr(dc))
class DelayedCallDebugTests(DelayedCallMixin, TestCase):
"""
L{DelayedCall}
"""
def setUp(self):
"""
Turn debug on.
"""
self.patch(DelayedCall, "debug", True)
DelayedCallMixin.setUp(self)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
L{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
dc = DelayedCall(12, nothing, (3,), {"A": 5}, None, None, lambda: 1.5)
expectedRegexp = (
"<DelayedCall 0x{:x} \\[10.5s\\] called=0 cancelled=0 "
"nothing\\(3, A=5\\)\n\n"
"traceback at creation:".format(id(dc))
)
self.assertRegex(str(dc), expectedRegexp)
def test_switchFromDebug(self):
"""
If L{DelayedCall.debug} changes from C{1} to C{0} between
L{DelayeCall.__init__} and L{DelayedCall.__repr__} then
L{DelayedCall.__repr__} returns a string that includes the creator
stack (we captured it, we might as well display it).
"""
dc = DelayedCall(3, lambda: None, (), {}, nothing, nothing, lambda: 2)
dc.debug = 0
self.assertIn("traceback at creation", repr(dc))
class TestSpySignalCapturingReactor(ReactorBase):
"""
Subclass of ReactorBase to capture signals delivered to the
reactor for inspection.
"""
def installWaker(self):
"""
Required method, unused.
"""
@skipIf(not signal, "signal module not available")
class ReactorBaseSignalTests(TestCase):
"""
Tests to exercise ReactorBase's signal exit reporting path.
"""
def test_exitSignalDefaultsToNone(self):
"""
The default value of the _exitSignal attribute is None.
"""
reactor = TestSpySignalCapturingReactor()
self.assertIs(None, reactor._exitSignal)
def test_captureSIGINT(self):
"""
ReactorBase's SIGINT handler saves the value of SIGINT to the
_exitSignal attribute.
"""
reactor = TestSpySignalCapturingReactor()
reactor.sigInt(signal.SIGINT, None)
self.assertEquals(signal.SIGINT, reactor._exitSignal)
def test_captureSIGTERM(self):
"""
ReactorBase's SIGTERM handler saves the value of SIGTERM to the
_exitSignal attribute.
"""
reactor = TestSpySignalCapturingReactor()
reactor.sigTerm(signal.SIGTERM, None)
self.assertEquals(signal.SIGTERM, reactor._exitSignal)
def test_captureSIGBREAK(self):
"""
ReactorBase's SIGBREAK handler saves the value of SIGBREAK to the
_exitSignal attribute.
"""
if not hasattr(signal, "SIGBREAK"):
raise SkipTest("signal module does not have SIGBREAK")
reactor = TestSpySignalCapturingReactor()
reactor.sigBreak(signal.SIGBREAK, None)
self.assertEquals(signal.SIGBREAK, reactor._exitSignal)

View File

@@ -0,0 +1,75 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._baseprocess} which implements process-related
functionality that is useful in all platforms supporting L{IReactorProcess}.
"""
from twisted.internet._baseprocess import BaseProcess
from twisted.python.deprecate import getWarningMethod, setWarningMethod
from twisted.trial.unittest import TestCase
class BaseProcessTests(TestCase):
"""
Tests for L{BaseProcess}, a parent class for other classes which represent
processes which implements functionality common to many different process
implementations.
"""
def test_callProcessExited(self):
"""
L{BaseProcess._callProcessExited} calls the C{processExited} method of
its C{proto} attribute and passes it a L{Failure} wrapping the given
exception.
"""
class FakeProto:
reason = None
def processExited(self, reason):
self.reason = reason
reason = RuntimeError("fake reason")
process = BaseProcess(FakeProto())
process._callProcessExited(reason)
process.proto.reason.trap(RuntimeError)
self.assertIs(reason, process.proto.reason.value)
def test_callProcessExitedMissing(self):
"""
L{BaseProcess._callProcessExited} emits a L{DeprecationWarning} if the
object referred to by its C{proto} attribute has no C{processExited}
method.
"""
class FakeProto:
pass
reason = object()
process = BaseProcess(FakeProto())
self.addCleanup(setWarningMethod, getWarningMethod())
warnings = []
def collect(message, category, stacklevel):
warnings.append((message, category, stacklevel))
setWarningMethod(collect)
process._callProcessExited(reason)
[(message, category, stacklevel)] = warnings
self.assertEqual(
message,
"Since Twisted 8.2, IProcessProtocol.processExited is required. "
"%s.%s must implement it." % (FakeProto.__module__, FakeProto.__name__),
)
self.assertIs(category, DeprecationWarning)
# The stacklevel doesn't really make sense for this kind of
# deprecation. Requiring it to be 0 will at least avoid pointing to
# any part of Twisted or a random part of the application's code, which
# I think would be more misleading than having it point inside the
# warning system itself. -exarkun
self.assertEqual(stacklevel, 0)

View File

@@ -0,0 +1,101 @@
from typing import TYPE_CHECKING, List
from twisted.trial.unittest import SynchronousTestCase
from .reactormixins import ReactorBuilder
if TYPE_CHECKING:
fakeBase = SynchronousTestCase
else:
fakeBase = object
def noop() -> None:
"""
Do-nothing callable. Stub for testing.
"""
noop() # Exercise for coverage, since it will never be called below.
class CoreFoundationSpecificTests(ReactorBuilder, fakeBase):
"""
Tests for platform interactions of the CoreFoundation-based reactor.
"""
_reactors = ["twisted.internet.cfreactor.CFReactor"]
def test_whiteboxStopSimulating(self) -> None:
"""
CFReactor's simulation timer is None after CFReactor crashes.
"""
r = self.buildReactor()
r.callLater(0, r.crash)
r.callLater(100, noop)
self.runReactor(r)
self.assertIs(r._currentSimulator, None)
def test_callLaterLeakage(self) -> None:
"""
callLater should not leak global state into CoreFoundation which will
be invoked by a different reactor running the main loop.
@note: this test may actually be usable for other reactors as well, so
we may wish to promote it to ensure this invariant across other
foreign-main-loop reactors.
"""
r = self.buildReactor()
delayed = r.callLater(0, noop)
r2 = self.buildReactor()
def stopBlocking() -> None:
r2.callLater(0, r2stop)
def r2stop() -> None:
r2.stop()
r2.callLater(0, stopBlocking)
self.runReactor(r2)
self.assertEqual(r.getDelayedCalls(), [delayed])
def test_whiteboxIterate(self) -> None:
"""
C{.iterate()} should remove the CFTimer that will run Twisted's
callLaters from the loop, even if one is still pending. We test this
state indirectly with a white-box assertion by verifying the
C{_currentSimulator} is set to C{None}, since CoreFoundation does not
allow us to enumerate all active timers or sources.
"""
r = self.buildReactor()
x: List[int] = []
r.callLater(0, x.append, 1)
delayed = r.callLater(100, noop)
r.iterate()
self.assertIs(r._currentSimulator, None)
self.assertEqual(r.getDelayedCalls(), [delayed])
self.assertEqual(x, [1])
def test_noTimers(self) -> None:
"""
The loop can wake up just fine even if there are no timers in it.
"""
r = self.buildReactor()
stopped = []
def doStop() -> None:
r.stop()
stopped.append("yes")
def sleepThenStop() -> None:
r.callFromThread(doStop)
r.callLater(0, r.callInThread, sleepThenStop)
# Can't use runReactor here because it does a callLater. This is
# therefore a somewhat risky test: inherently, this is the "no timed
# events anywhere in the reactor" test case and so we can't have a
# timeout for it.
r.run()
self.assertEqual(stopped, ["yes"])
globals().update(CoreFoundationSpecificTests.makeTestCaseClasses())

View File

@@ -0,0 +1,316 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorCore}.
"""
import signal
import time
from types import FrameType
from typing import Callable, List, Optional, Tuple, Union, cast
from twisted.internet.abstract import FileDescriptor
from twisted.internet.defer import Deferred
from twisted.internet.error import ReactorAlreadyRunning, ReactorNotRestartable
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.failure import Failure
from twisted.trial.unittest import SynchronousTestCase
class SystemEventTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{IReactorCore.addSystemEventTrigger}
and L{IReactorCore.fireSystemEvent}.
"""
def test_stopWhenNotStarted(self) -> None:
"""
C{reactor.stop()} raises L{RuntimeError} when called when the reactor
has not been started.
"""
reactor = self.buildReactor()
cast(SynchronousTestCase, self).assertRaises(RuntimeError, reactor.stop)
def test_stopWhenAlreadyStopped(self) -> None:
"""
C{reactor.stop()} raises L{RuntimeError} when called after the reactor
has been stopped.
"""
reactor = self.buildReactor()
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
cast(SynchronousTestCase, self).assertRaises(RuntimeError, reactor.stop)
def test_callWhenRunningOrder(self) -> None:
"""
Functions are run in the order that they were passed to
L{reactor.callWhenRunning}.
"""
reactor = self.buildReactor()
events: List[str] = []
reactor.callWhenRunning(events.append, "first")
reactor.callWhenRunning(events.append, "second")
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
cast(SynchronousTestCase, self).assertEqual(events, ["first", "second"])
def test_runningForStartupEvents(self) -> None:
"""
The reactor is not running when C{"before"} C{"startup"} triggers are
called and is running when C{"during"} and C{"after"} C{"startup"}
triggers are called.
"""
reactor = self.buildReactor()
state = {}
def beforeStartup() -> None:
state["before"] = reactor.running
def duringStartup() -> None:
state["during"] = reactor.running
def afterStartup() -> None:
state["after"] = reactor.running
testCase = cast(SynchronousTestCase, self)
reactor.addSystemEventTrigger("before", "startup", beforeStartup)
reactor.addSystemEventTrigger("during", "startup", duringStartup)
reactor.addSystemEventTrigger("after", "startup", afterStartup)
reactor.callWhenRunning(reactor.stop)
testCase.assertEqual(state, {})
self.runReactor(reactor)
testCase.assertEqual(state, {"before": False, "during": True, "after": True})
def test_signalHandlersInstalledDuringStartup(self) -> None:
"""
Signal handlers are installed in responsed to the C{"during"}
C{"startup"}.
"""
reactor = self.buildReactor()
phase: Optional[str] = None
def beforeStartup() -> None:
nonlocal phase
phase = "before"
def afterStartup() -> None:
nonlocal phase
phase = "after"
reactor.addSystemEventTrigger("before", "startup", beforeStartup)
reactor.addSystemEventTrigger("after", "startup", afterStartup)
sawPhase = []
def fakeSignal(signum: int, action: Callable[[int, FrameType], None]) -> None:
sawPhase.append(phase)
testCase = cast(SynchronousTestCase, self)
testCase.patch(signal, "signal", fakeSignal)
reactor.callWhenRunning(reactor.stop)
testCase.assertIsNone(phase)
testCase.assertEqual(sawPhase, [])
self.runReactor(reactor)
testCase.assertIn("before", sawPhase)
testCase.assertEqual(phase, "after")
def test_stopShutDownEvents(self) -> None:
"""
C{reactor.stop()} fires all three phases of shutdown event triggers
before it makes C{reactor.run()} return.
"""
reactor = self.buildReactor()
events = []
reactor.addSystemEventTrigger(
"before", "shutdown", lambda: events.append(("before", "shutdown"))
)
reactor.addSystemEventTrigger(
"during", "shutdown", lambda: events.append(("during", "shutdown"))
)
reactor.addSystemEventTrigger(
"after", "shutdown", lambda: events.append(("after", "shutdown"))
)
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
cast(SynchronousTestCase, self).assertEqual(
events,
[("before", "shutdown"), ("during", "shutdown"), ("after", "shutdown")],
)
def test_shutdownFiresTriggersAsynchronously(self) -> None:
"""
C{"before"} C{"shutdown"} triggers are not run synchronously from
L{reactor.stop}.
"""
reactor = self.buildReactor()
events: List[str] = []
reactor.addSystemEventTrigger(
"before", "shutdown", events.append, "before shutdown"
)
def stopIt() -> None:
reactor.stop()
events.append("stopped")
testCase = cast(SynchronousTestCase, self)
reactor.callWhenRunning(stopIt)
testCase.assertEqual(events, [])
self.runReactor(reactor)
testCase.assertEqual(events, ["stopped", "before shutdown"])
def test_shutdownDisconnectsCleanly(self) -> None:
"""
A L{IFileDescriptor.connectionLost} implementation which raises an
exception does not prevent the remaining L{IFileDescriptor}s from
having their C{connectionLost} method called.
"""
lostOK = [False]
# Subclass FileDescriptor to get logPrefix
class ProblematicFileDescriptor(FileDescriptor):
def connectionLost(self, reason: Failure) -> None:
raise RuntimeError("simulated connectionLost error")
class OKFileDescriptor(FileDescriptor):
def connectionLost(self, reason: Failure) -> None:
lostOK[0] = True
testCase = cast(SynchronousTestCase, self)
reactor = self.buildReactor()
# Unfortunately, it is necessary to patch removeAll to directly control
# the order of the returned values. The test is only valid if
# ProblematicFileDescriptor comes first. Also, return these
# descriptors only the first time removeAll is called so that if it is
# called again the file descriptors aren't re-disconnected.
fds = iter([ProblematicFileDescriptor(), OKFileDescriptor()])
reactor.removeAll = lambda: fds
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
testCase.assertEqual(len(testCase.flushLoggedErrors(RuntimeError)), 1)
testCase.assertTrue(lostOK[0])
def test_multipleRun(self) -> None:
"""
C{reactor.run()} raises L{ReactorAlreadyRunning} when called when
the reactor is already running.
"""
events: List[str] = []
testCase = cast(SynchronousTestCase, self)
def reentrantRun() -> None:
testCase.assertRaises(ReactorAlreadyRunning, reactor.run)
events.append("tested")
reactor = self.buildReactor()
reactor.callWhenRunning(reentrantRun)
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
testCase.assertEqual(events, ["tested"])
def test_runWithAsynchronousBeforeStartupTrigger(self) -> None:
"""
When there is a C{'before'} C{'startup'} trigger which returns an
unfired L{Deferred}, C{reactor.run()} starts the reactor and does not
return until after C{reactor.stop()} is called
"""
events = []
def trigger() -> Deferred[object]:
events.append("trigger")
d: Deferred[object] = Deferred()
d.addCallback(callback)
reactor.callLater(0, d.callback, None)
return d
def callback(ignored: object) -> None:
events.append("callback")
reactor.stop()
reactor = self.buildReactor()
reactor.addSystemEventTrigger("before", "startup", trigger)
self.runReactor(reactor)
cast(SynchronousTestCase, self).assertEqual(events, ["trigger", "callback"])
def test_iterate(self) -> None:
"""
C{reactor.iterate()} does not block.
"""
reactor = self.buildReactor()
t = reactor.callLater(5, reactor.crash)
start = time.time()
reactor.iterate(0) # Shouldn't block
elapsed = time.time() - start
cast(SynchronousTestCase, self).assertTrue(elapsed < 2)
t.cancel()
def test_crash(self) -> None:
"""
C{reactor.crash()} stops the reactor and does not fire shutdown
triggers.
"""
reactor = self.buildReactor()
events = []
reactor.addSystemEventTrigger(
"before", "shutdown", lambda: events.append(("before", "shutdown"))
)
reactor.callWhenRunning(reactor.callLater, 0, reactor.crash)
self.runReactor(reactor)
testCase = cast(SynchronousTestCase, self)
testCase.assertFalse(reactor.running)
testCase.assertFalse(
events, "Shutdown triggers invoked but they should not have been."
)
def test_runAfterCrash(self) -> None:
"""
C{reactor.run()} restarts the reactor after it has been stopped by
C{reactor.crash()}.
"""
events: List[Union[str, Tuple[str, bool]]] = []
def crash() -> None:
events.append("crash")
reactor.crash()
reactor = self.buildReactor()
reactor.callWhenRunning(crash)
self.runReactor(reactor)
def stop() -> None:
events.append(("stop", reactor.running))
reactor.stop()
reactor.callWhenRunning(stop)
self.runReactor(reactor)
cast(SynchronousTestCase, self).assertEqual(events, ["crash", ("stop", True)])
def test_runAfterStop(self) -> None:
"""
C{reactor.run()} raises L{ReactorNotRestartable} when called when
the reactor is being run after getting stopped priorly.
"""
events: List[str] = []
testCase = cast(SynchronousTestCase, self)
def restart() -> None:
testCase.assertRaises(ReactorNotRestartable, reactor.run)
events.append("tested")
reactor = self.buildReactor()
reactor.callWhenRunning(reactor.stop)
reactor.addSystemEventTrigger("after", "shutdown", restart)
self.runReactor(reactor)
testCase.assertEqual(events, ["tested"])
globals().update(SystemEventTestsBuilder.makeTestCaseClasses())

View File

@@ -0,0 +1,114 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.default}.
"""
from __future__ import annotations
import select
import sys
from typing import Callable
from twisted.internet import default
from twisted.internet.default import _getInstallFunction, install
from twisted.internet.interfaces import IReactorCore
from twisted.internet.test.test_main import NoReactor
from twisted.python.reflect import requireModule
from twisted.python.runtime import Platform
from twisted.trial.unittest import SynchronousTestCase
unix = Platform("posix", "other")
linux = Platform("posix", "linux2")
windows = Platform("nt", "win32")
osx = Platform("posix", "darwin")
class PollReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the poll(2) or epoll(7)-based reactors.
"""
def assertIsPoll(self, install: Callable[..., object]) -> None:
"""
Assert the given function will install the poll() reactor, or select()
if poll() is unavailable.
"""
if hasattr(select, "poll"):
self.assertEqual(install.__module__, "twisted.internet.pollreactor")
else:
self.assertEqual(install.__module__, "twisted.internet.selectreactor")
def test_unix(self) -> None:
"""
L{_getInstallFunction} chooses the poll reactor on arbitrary Unix
platforms, falling back to select(2) if it is unavailable.
"""
install = _getInstallFunction(unix)
self.assertIsPoll(install)
def test_linux(self) -> None:
"""
L{_getInstallFunction} chooses the epoll reactor on Linux, or poll if
epoll is unavailable.
"""
install = _getInstallFunction(linux)
if requireModule("twisted.internet.epollreactor") is None:
self.assertIsPoll(install)
else:
self.assertEqual(install.__module__, "twisted.internet.epollreactor")
class SelectReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the select(2)-based reactor.
"""
def test_osx(self) -> None:
"""
L{_getInstallFunction} chooses the select reactor on macOS.
"""
install = _getInstallFunction(osx)
self.assertEqual(install.__module__, "twisted.internet.selectreactor")
def test_windows(self) -> None:
"""
L{_getInstallFunction} chooses the select reactor on Windows.
"""
install = _getInstallFunction(windows)
self.assertEqual(install.__module__, "twisted.internet.selectreactor")
class InstallationTests(SynchronousTestCase):
"""
Tests for actual installation of the reactor.
"""
def test_install(self) -> None:
"""
L{install} installs a reactor.
"""
with NoReactor():
install()
self.assertIn("twisted.internet.reactor", sys.modules)
def test_reactor(self) -> None:
"""
Importing L{twisted.internet.reactor} installs the default reactor if
none is installed.
"""
installed: list[bool] = []
def installer() -> object:
installed.append(True)
return install()
self.patch(default, "install", installer)
with NoReactor():
from twisted.internet import reactor
self.assertTrue(IReactorCore.providedBy(reactor))
self.assertEqual(installed, [True])

View File

@@ -0,0 +1,241 @@
# -*- test-case-name: twisted.internet.test.test_defer_await -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for C{await} support in Deferreds.
"""
import types
from typing_extensions import NoReturn
from twisted.internet.defer import (
Deferred,
ensureDeferred,
fail,
maybeDeferred,
succeed,
)
from twisted.internet.task import Clock
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
class SampleException(Exception):
"""
A specific sample exception for testing.
"""
class AwaitTests(TestCase):
"""
Tests for using Deferreds in conjunction with PEP-492.
"""
def test_awaitReturnsIterable(self) -> None:
"""
C{Deferred.__await__} returns an iterable.
"""
d: Deferred[None] = Deferred()
awaitedDeferred = d.__await__()
self.assertEqual(awaitedDeferred, iter(awaitedDeferred))
def test_deferredFromCoroutine(self) -> None:
"""
L{Deferred.fromCoroutine} will turn a coroutine into a L{Deferred}.
"""
async def run() -> str:
d = succeed("bar")
await d
res = await run2()
return res
async def run2() -> str:
d = succeed("foo")
res = await d
return res
# It's a coroutine...
r = run()
self.assertIsInstance(r, types.CoroutineType)
# Now it's a Deferred.
d = Deferred.fromCoroutine(r)
self.assertIsInstance(d, Deferred)
# The Deferred has the result we want.
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_basic(self) -> None:
"""
L{Deferred.fromCoroutine} allows a function to C{await} on a
L{Deferred}.
"""
async def run() -> str:
d = succeed("foo")
res = await d
return res
d = Deferred.fromCoroutine(run())
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_basicEnsureDeferred(self) -> None:
"""
L{ensureDeferred} allows a function to C{await} on a L{Deferred}.
"""
async def run() -> str:
d = succeed("foo")
res = await d
return res
d = ensureDeferred(run())
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_exception(self) -> None:
"""
An exception in a coroutine scheduled with L{Deferred.fromCoroutine}
will cause the returned L{Deferred} to fire with a failure.
"""
async def run() -> NoReturn:
d = succeed("foo")
await d
raise ValueError("Oh no!")
d = Deferred.fromCoroutine(run())
res = self.failureResultOf(d)
self.assertEqual(type(res.value), ValueError)
self.assertEqual(res.value.args, ("Oh no!",))
def test_synchronousDeferredFailureTraceback(self) -> None:
"""
When a Deferred is awaited upon that has already failed with a Failure
that has a traceback, both the place that the synchronous traceback
comes from and the awaiting line are shown in the traceback.
"""
def raises() -> None:
raise SampleException()
it = maybeDeferred(raises)
async def doomed() -> None:
return await it
failure = self.failureResultOf(Deferred.fromCoroutine(doomed()))
self.assertIn(", in doomed\n", failure.getTraceback())
self.assertIn(", in raises\n", failure.getTraceback())
def test_asyncDeferredFailureTraceback(self) -> None:
"""
When a Deferred is awaited upon that later fails with a Failure that
has a traceback, both the place that the synchronous traceback comes
from and the awaiting line are shown in the traceback.
"""
def returnsFailure() -> Failure:
try:
raise SampleException()
except SampleException:
return Failure()
it: Deferred[None] = Deferred()
async def doomed() -> None:
return await it
started = Deferred.fromCoroutine(doomed())
self.assertNoResult(started)
it.errback(returnsFailure())
failure = self.failureResultOf(started)
self.assertIn(", in doomed\n", failure.getTraceback())
self.assertIn(", in returnsFailure\n", failure.getTraceback())
def test_twoDeep(self) -> None:
"""
A coroutine scheduled with L{Deferred.fromCoroutine} that awaits a
L{Deferred} suspends its execution until the inner L{Deferred} fires.
"""
reactor = Clock()
sections = []
async def runone() -> str:
sections.append(2)
d: Deferred[int] = Deferred()
reactor.callLater(1, d.callback, 2)
await d
sections.append(3)
return "Yay!"
async def run() -> str:
sections.append(1)
result = await runone()
sections.append(4)
d: Deferred[int] = Deferred()
reactor.callLater(1, d.callback, 1)
await d
sections.append(5)
return result
d = Deferred.fromCoroutine(run())
reactor.advance(0.9)
self.assertEqual(sections, [1, 2])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.9)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4, 5])
res = self.successResultOf(d)
self.assertEqual(res, "Yay!")
def test_reraise(self) -> None:
"""
Awaiting an already failed Deferred will raise the exception.
"""
async def test() -> int:
try:
await fail(ValueError("Boom"))
except ValueError as e:
self.assertEqual(e.args, ("Boom",))
return 1
return 0
res = self.successResultOf(Deferred.fromCoroutine(test()))
self.assertEqual(res, 1)
def test_chained(self) -> None:
"""
Awaiting a paused & chained Deferred will give the result when it has
one.
"""
reactor = Clock()
async def test() -> None:
d: Deferred[None] = Deferred()
d2: Deferred[None] = Deferred()
d.addCallback(lambda ignored: d2)
d.callback(None)
reactor.callLater(0, d2.callback, "bye")
return await d
d = Deferred.fromCoroutine(test())
reactor.advance(0.1)
res = self.successResultOf(d)
self.assertEqual(res, "bye")

View File

@@ -0,0 +1,182 @@
# -*- test-case-name: twisted.internet.test.test_defer_yieldfrom -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for C{yield from} support in Deferreds.
"""
import types
from twisted.internet.defer import Deferred, ensureDeferred, fail, succeed
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
class YieldFromTests(TestCase):
"""
Tests for using Deferreds in conjunction with PEP-380.
"""
def test_ensureDeferred(self) -> None:
"""
L{ensureDeferred} will turn a coroutine into a L{Deferred}.
"""
def run():
d = succeed("foo")
res = yield from d
return res
# It's a generator...
r = run()
self.assertIsInstance(r, types.GeneratorType)
# Now it's a Deferred.
d = ensureDeferred(r)
self.assertIsInstance(d, Deferred)
# The Deferred has the result we want.
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_DeferredfromCoroutine(self) -> None:
"""
L{Deferred.fromCoroutine} will turn a coroutine into a L{Deferred}.
"""
def run():
d = succeed("bar")
yield from d
res = yield from run2()
return res
def run2():
d = succeed("foo")
res = yield from d
return res
# It's a generator...
r = run()
self.assertIsInstance(r, types.GeneratorType)
# Now it's a Deferred.
d = Deferred.fromCoroutine(r)
self.assertIsInstance(d, Deferred)
# The Deferred has the result we want.
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_basic(self) -> None:
"""
L{Deferred.fromCoroutine} allows a function to C{yield from} a
L{Deferred}.
"""
def run():
d = succeed("foo")
res = yield from d
return res
d = Deferred.fromCoroutine(run())
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_exception(self) -> None:
"""
An exception in a generator scheduled with L{Deferred.fromCoroutine}
will cause the returned L{Deferred} to fire with a failure.
"""
def run():
d = succeed("foo")
yield from d
raise ValueError("Oh no!")
d = Deferred.fromCoroutine(run())
res = self.failureResultOf(d)
self.assertEqual(type(res.value), ValueError)
self.assertEqual(res.value.args, ("Oh no!",))
def test_twoDeep(self) -> None:
"""
An exception in a generator scheduled with L{Deferred.fromCoroutine}
will cause the returned L{Deferred} to fire with a failure.
"""
reactor = Clock()
sections = []
def runone():
sections.append(2)
d = Deferred()
reactor.callLater(1, d.callback, None)
yield from d
sections.append(3)
return "Yay!"
def run():
sections.append(1)
result = yield from runone()
sections.append(4)
d = Deferred()
reactor.callLater(1, d.callback, None)
yield from d
sections.append(5)
return result
d = Deferred.fromCoroutine(run())
reactor.advance(0.9)
self.assertEqual(sections, [1, 2])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.9)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4, 5])
res = self.successResultOf(d)
self.assertEqual(res, "Yay!")
def test_reraise(self) -> None:
"""
Yielding from an already failed Deferred will raise the exception.
"""
def test():
try:
yield from fail(ValueError("Boom"))
except ValueError as e:
self.assertEqual(e.args, ("Boom",))
return 1
return 0
res = self.successResultOf(Deferred.fromCoroutine(test()))
self.assertEqual(res, 1)
def test_chained(self) -> None:
"""
Yielding from a paused & chained Deferred will give the result when it
has one.
"""
reactor = Clock()
def test():
d = Deferred()
d2 = Deferred()
d.addCallback(lambda ignored: d2)
d.callback(None)
reactor.callLater(0, d2.callback, "bye")
res = yield from d
return res
d = Deferred.fromCoroutine(test())
reactor.advance(0.1)
res = self.successResultOf(d)
self.assertEqual(res, "bye")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,230 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.epollreactor}.
"""
from unittest import skipIf
from twisted.internet.error import ConnectionDone
from twisted.internet.posixbase import _ContinuousPolling
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
try:
from twisted.internet import epollreactor
except ImportError:
epollreactor = None # type: ignore[assignment]
class Descriptor:
"""
Records reads and writes, as if it were a C{FileDescriptor}.
"""
def __init__(self):
self.events = []
def fileno(self):
return 1
def doRead(self):
self.events.append("read")
def doWrite(self):
self.events.append("write")
def connectionLost(self, reason):
reason.trap(ConnectionDone)
self.events.append("lost")
@skipIf(not epollreactor, "epoll not supported in this environment.")
class ContinuousPollingTests(TestCase):
"""
L{_ContinuousPolling} can be used to read and write from C{FileDescriptor}
objects.
"""
def test_addReader(self):
"""
Adding a reader when there was previously no reader starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertIsNone(poller._loop)
reader = object()
self.assertFalse(poller.isReading(reader))
poller.addReader(reader)
self.assertIsNotNone(poller._loop)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isReading(reader))
def test_addWriter(self):
"""
Adding a writer when there was previously no writer starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertIsNone(poller._loop)
writer = object()
self.assertFalse(poller.isWriting(writer))
poller.addWriter(writer)
self.assertIsNotNone(poller._loop)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isWriting(writer))
def test_removeReader(self):
"""
Removing a reader stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
poller.removeReader(reader)
self.assertIsNone(poller._loop)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isReading(reader))
def test_removeWriter(self):
"""
Removing a writer stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
poller.removeWriter(writer)
self.assertIsNone(poller._loop)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isWriting(writer))
def test_removeUnknown(self):
"""
Removing unknown readers and writers silently does nothing.
"""
poller = _ContinuousPolling(Clock())
poller.removeWriter(object())
poller.removeReader(object())
def test_multipleReadersAndWriters(self):
"""
Adding multiple readers and writers results in a single
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertIsNotNone(poller._loop)
poller.addWriter(object())
self.assertIsNotNone(poller._loop)
poller.addReader(object())
self.assertIsNotNone(poller._loop)
poller.addReader(object())
poller.removeWriter(writer)
self.assertIsNotNone(poller._loop)
self.assertTrue(poller._loop.running)
self.assertEqual(len(poller._reactor.getDelayedCalls()), 1)
def test_readerPolling(self):
"""
Adding a reader causes its C{doRead} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read", "read"])
def test_writerPolling(self):
"""
Adding a writer causes its C{doWrite} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write", "write"])
def test_connectionLostOnRead(self):
"""
If a C{doRead} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doRead = lambda: ConnectionDone()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_connectionLostOnWrite(self):
"""
If a C{doWrite} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doWrite = lambda: ConnectionDone()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_removeAll(self):
"""
L{_ContinuousPolling.removeAll} removes all descriptors and returns
the readers and writers.
"""
poller = _ContinuousPolling(Clock())
reader = object()
writer = object()
both = object()
poller.addReader(reader)
poller.addReader(both)
poller.addWriter(writer)
poller.addWriter(both)
removed = poller.removeAll()
self.assertEqual(poller.getReaders(), [])
self.assertEqual(poller.getWriters(), [])
self.assertEqual(len(removed), 3)
self.assertEqual(set(removed), {reader, writer, both})
def test_getReaders(self):
"""
L{_ContinuousPolling.getReaders} returns a list of the read
descriptors.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
self.assertIn(reader, poller.getReaders())
def test_getWriters(self):
"""
L{_ContinuousPolling.getWriters} returns a list of the write
descriptors.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertIn(writer, poller.getWriters())

View File

@@ -0,0 +1,40 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.error}
"""
from twisted.internet import error
from twisted.trial.unittest import SynchronousTestCase
class ConnectionAbortedTests(SynchronousTestCase):
"""
Tests for the L{twisted.internet.error.ConnectionAborted} exception.
"""
def test_str(self) -> None:
"""
The default message of L{ConnectionAborted} is a sentence which points
to L{ITCPTransport.abortConnection()}
"""
self.assertEqual(
("Connection was aborted locally" " using ITCPTransport.abortConnection."),
str(error.ConnectionAborted()),
)
def test_strArgs(self) -> None:
"""
Any arguments passed to L{ConnectionAborted} are included in its
message.
"""
self.assertEqual(
(
"Connection was aborted locally using"
" ITCPTransport.abortConnection:"
" foo bar."
),
str(error.ConnectionAborted("foo", "bar")),
)

View File

@@ -0,0 +1,422 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorFDSet}.
"""
import os
import socket
import traceback
from typing import TYPE_CHECKING
from unittest import skipIf
from zope.interface import implementer
from twisted.internet.abstract import FileDescriptor
from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor
from twisted.internet.tcp import EINPROGRESS, EWOULDBLOCK
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest, SynchronousTestCase
if TYPE_CHECKING:
PretendTestCase = SynchronousTestCase
else:
PretendTestCase = object
def socketpair():
serverSocket = socket.socket()
serverSocket.bind(("127.0.0.1", 0))
serverSocket.listen(1)
try:
client = socket.socket()
try:
client.setblocking(False)
try:
client.connect(("127.0.0.1", serverSocket.getsockname()[1]))
except OSError as e:
if e.args[0] not in (EINPROGRESS, EWOULDBLOCK):
raise
server, addr = serverSocket.accept()
except BaseException:
client.close()
raise
finally:
serverSocket.close()
return client, server
class ReactorFDSetTestsBuilder(ReactorBuilder, PretendTestCase):
"""
Builder defining tests relating to L{IReactorFDSet}.
"""
requiredInterfaces = [IReactorFDSet]
def _connectedPair(self):
"""
Return the two sockets which make up a new TCP connection.
"""
client, server = socketpair()
self.addCleanup(client.close)
self.addCleanup(server.close)
return client, server
def _simpleSetup(self):
reactor = self.buildReactor()
client, server = self._connectedPair()
fd = FileDescriptor(reactor)
fd.fileno = client.fileno
return reactor, fd, server
def test_addReader(self):
"""
C{reactor.addReader()} accepts an L{IReadDescriptor} provider and calls
its C{doRead} method when there may be data available on its C{fileno}.
"""
reactor, fd, server = self._simpleSetup()
def removeAndStop():
reactor.removeReader(fd)
reactor.stop()
fd.doRead = removeAndStop
reactor.addReader(fd)
server.sendall(b"x")
# The reactor will only stop if it calls fd.doRead.
self.runReactor(reactor)
# Nothing to assert, just be glad we got this far.
def test_removeReader(self):
"""
L{reactor.removeReader()} accepts an L{IReadDescriptor} provider
previously passed to C{reactor.addReader()} and causes it to no longer
be monitored for input events.
"""
reactor, fd, server = self._simpleSetup()
def fail():
self.fail("doRead should not be called")
fd.doRead = fail
reactor.addReader(fd)
reactor.removeReader(fd)
server.sendall(b"x")
# Give the reactor two timed event passes to notice that there's I/O
# (if it is incorrectly watching for I/O).
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
self.runReactor(reactor)
# Getting here means the right thing happened probably.
def test_addWriter(self):
"""
C{reactor.addWriter()} accepts an L{IWriteDescriptor} provider and
calls its C{doWrite} method when it may be possible to write to its
C{fileno}.
"""
reactor, fd, server = self._simpleSetup()
def removeAndStop():
reactor.removeWriter(fd)
reactor.stop()
fd.doWrite = removeAndStop
reactor.addWriter(fd)
self.runReactor(reactor)
# Getting here is great.
def _getFDTest(self, kind):
"""
Helper for getReaders and getWriters tests.
"""
reactor = self.buildReactor()
get = getattr(reactor, "get" + kind + "s")
add = getattr(reactor, "add" + kind)
remove = getattr(reactor, "remove" + kind)
client, server = self._connectedPair()
self.assertNotIn(client, get())
self.assertNotIn(server, get())
add(client)
self.assertIn(client, get())
self.assertNotIn(server, get())
remove(client)
self.assertNotIn(client, get())
self.assertNotIn(server, get())
def test_getReaders(self):
"""
L{IReactorFDSet.getReaders} reflects the additions and removals made
with L{IReactorFDSet.addReader} and L{IReactorFDSet.removeReader}.
"""
self._getFDTest("Reader")
def test_removeWriter(self):
"""
L{reactor.removeWriter()} accepts an L{IWriteDescriptor} provider
previously passed to C{reactor.addWriter()} and causes it to no longer
be monitored for outputability.
"""
reactor, fd, server = self._simpleSetup()
def fail():
self.fail("doWrite should not be called")
fd.doWrite = fail
reactor.addWriter(fd)
reactor.removeWriter(fd)
# Give the reactor two timed event passes to notice that there's I/O
# (if it is incorrectly watching for I/O).
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
self.runReactor(reactor)
# Getting here means the right thing happened probably.
def test_getWriters(self):
"""
L{IReactorFDSet.getWriters} reflects the additions and removals made
with L{IReactorFDSet.addWriter} and L{IReactorFDSet.removeWriter}.
"""
self._getFDTest("Writer")
def test_removeAll(self):
"""
C{reactor.removeAll()} removes all registered L{IReadDescriptor}
providers and all registered L{IWriteDescriptor} providers and returns
them.
"""
reactor = self.buildReactor()
reactor, fd, server = self._simpleSetup()
fd.doRead = lambda: self.fail("doRead should not be called")
fd.doWrite = lambda: self.fail("doWrite should not be called")
server.sendall(b"x")
reactor.addReader(fd)
reactor.addWriter(fd)
removed = reactor.removeAll()
# Give the reactor two timed event passes to notice that there's I/O
# (if it is incorrectly watching for I/O).
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
self.runReactor(reactor)
# Getting here means the right thing happened probably.
self.assertEqual(removed, [fd])
def test_removedFromReactor(self):
"""
A descriptor's C{fileno} method should not be called after the
descriptor has been removed from the reactor.
"""
reactor = self.buildReactor()
descriptor = RemovingDescriptor(reactor)
reactor.callWhenRunning(descriptor.start)
self.runReactor(reactor)
self.assertEqual(descriptor.calls, [])
def test_negativeOneFileDescriptor(self):
"""
If L{FileDescriptor.fileno} returns C{-1}, the descriptor is removed
from the reactor.
"""
reactor = self.buildReactor()
client, server = self._connectedPair()
class DisappearingDescriptor(FileDescriptor):
_fileno = server.fileno()
_received = b""
def fileno(self):
return self._fileno
def doRead(self):
self._fileno = -1
self._received += server.recv(1)
client.send(b"y")
def connectionLost(self, reason):
reactor.stop()
descriptor = DisappearingDescriptor(reactor)
reactor.addReader(descriptor)
client.send(b"x")
self.runReactor(reactor)
self.assertEqual(descriptor._received, b"x")
@skipIf(platform.isWindows(), "Cannot duplicate socket filenos on Windows")
def test_lostFileDescriptor(self):
"""
The file descriptor underlying a FileDescriptor may be closed and
replaced by another at some point. Bytes which arrive on the new
descriptor must not be delivered to the FileDescriptor which was
originally registered with the original descriptor of the same number.
Practically speaking, this is difficult or impossible to detect. The
implementation relies on C{fileno} raising an exception if the original
descriptor has gone away. If C{fileno} continues to return the original
file descriptor value, the reactor may deliver events from that
descriptor. This is a best effort attempt to ease certain debugging
situations. Applications should not rely on it intentionally.
"""
reactor = self.buildReactor()
name = reactor.__class__.__name__
if name in (
"EPollReactor",
"KQueueReactor",
"CFReactor",
"AsyncioSelectorReactor",
):
# Closing a file descriptor immediately removes it from the epoll
# set without generating a notification. That means epollreactor
# will not call any methods on Victim after the close, so there's
# no chance to notice the socket is no longer valid.
raise SkipTest(f"{name!r} cannot detect lost file descriptors")
client, server = self._connectedPair()
class Victim(FileDescriptor):
"""
This L{FileDescriptor} will have its socket closed out from under it
and another socket will take its place. It will raise a
socket.error from C{fileno} after this happens (because socket
objects remember whether they have been closed), so as long as the
reactor calls the C{fileno} method the problem will be detected.
"""
def fileno(self):
return server.fileno()
def doRead(self):
raise Exception("Victim.doRead should never be called")
def connectionLost(self, reason):
"""
When the problem is detected, the reactor should disconnect this
file descriptor. When that happens, stop the reactor so the
test ends.
"""
reactor.stop()
reactor.addReader(Victim())
# Arrange for the socket to be replaced at some unspecified time.
# Significantly, this will not be while any I/O processing code is on
# the stack. It is something that happens independently and cannot be
# relied upon to happen at a convenient time, such as within a call to
# doRead.
def messItUp():
newC, newS = self._connectedPair()
fileno = server.fileno()
server.close()
os.dup2(newS.fileno(), fileno)
newC.send(b"x")
reactor.callLater(0, messItUp)
self.runReactor(reactor)
# If the implementation feels like logging the exception raised by
# MessedUp.fileno, that's fine.
self.flushLoggedErrors(socket.error)
def test_connectionLostOnShutdown(self):
"""
Any file descriptors added to the reactor have their C{connectionLost}
called when C{reactor.stop} is called.
"""
reactor = self.buildReactor()
class DoNothingDescriptor(FileDescriptor):
def doRead(self):
return None
def doWrite(self):
return None
client, server = self._connectedPair()
fd1 = DoNothingDescriptor(reactor)
fd1.fileno = client.fileno
fd2 = DoNothingDescriptor(reactor)
fd2.fileno = server.fileno
reactor.addReader(fd1)
reactor.addWriter(fd2)
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertTrue(fd1.disconnected)
self.assertTrue(fd2.disconnected)
@implementer(IReadDescriptor)
class RemovingDescriptor:
"""
A read descriptor which removes itself from the reactor as soon as it
gets a chance to do a read and keeps track of when its own C{fileno}
method is called.
@ivar insideReactor: A flag which is true as long as the reactor has
this descriptor as a reader.
@ivar calls: A list of the bottom of the call stack for any call to
C{fileno} when C{insideReactor} is false.
"""
def __init__(self, reactor):
self.reactor = reactor
self.insideReactor = False
self.calls = []
self.read, self.write = socketpair()
def start(self):
self.insideReactor = True
self.reactor.addReader(self)
self.write.send(b"a")
def logPrefix(self):
return "foo"
def doRead(self):
self.reactor.removeReader(self)
self.insideReactor = False
self.reactor.stop()
self.read.close()
self.write.close()
def fileno(self):
if not self.insideReactor:
self.calls.append(traceback.extract_stack(limit=5)[:-1])
return self.read.fileno()
def connectionLost(self, reason):
# Ideally we'd close the descriptors here... but actually
# connectionLost is never called because we remove ourselves from the
# reactor before it stops.
pass
globals().update(ReactorFDSetTestsBuilder.makeTestCaseClasses())

View File

@@ -0,0 +1,94 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Whitebox tests for L{twisted.internet.abstract.FileDescriptor}.
"""
from zope.interface.verify import verifyClass
from twisted.internet.abstract import FileDescriptor
from twisted.internet.interfaces import IPushProducer
from twisted.trial.unittest import SynchronousTestCase
class MemoryFile(FileDescriptor):
"""
A L{FileDescriptor} customization which writes to a Python list in memory
with certain limitations.
@ivar _written: A C{list} of C{bytes} which have been accepted as written.
@ivar _freeSpace: A C{int} giving the number of bytes which will be accepted
by future writes.
"""
connected = True
def __init__(self):
FileDescriptor.__init__(self, reactor=object())
self._written = []
self._freeSpace = 0
def startWriting(self):
pass
def stopWriting(self):
pass
def writeSomeData(self, data):
"""
Copy at most C{self._freeSpace} bytes from C{data} into C{self._written}.
@return: A C{int} indicating how many bytes were copied from C{data}.
"""
acceptLength = min(self._freeSpace, len(data))
if acceptLength:
self._freeSpace -= acceptLength
self._written.append(data[:acceptLength])
return acceptLength
class FileDescriptorTests(SynchronousTestCase):
"""
Tests for L{FileDescriptor}.
"""
def test_writeWithUnicodeRaisesException(self):
"""
L{FileDescriptor.write} doesn't accept unicode data.
"""
fileDescriptor = FileDescriptor(reactor=object())
self.assertRaises(TypeError, fileDescriptor.write, "foo")
def test_writeSequenceWithUnicodeRaisesException(self):
"""
L{FileDescriptor.writeSequence} doesn't accept unicode data.
"""
fileDescriptor = FileDescriptor(reactor=object())
self.assertRaises(
TypeError, fileDescriptor.writeSequence, [b"foo", "bar", b"baz"]
)
def test_implementInterfaceIPushProducer(self):
"""
L{FileDescriptor} should implement L{IPushProducer}.
"""
self.assertTrue(verifyClass(IPushProducer, FileDescriptor))
class WriteDescriptorTests(SynchronousTestCase):
"""
Tests for L{FileDescriptor}'s implementation of L{IWriteDescriptor}.
"""
def test_kernelBufferFull(self):
"""
When L{FileDescriptor.writeSomeData} returns C{0} to indicate no more
data can be written immediately, L{FileDescriptor.doWrite} returns
L{None}.
"""
descriptor = MemoryFile()
descriptor.write(b"hello, world")
self.assertIsNone(descriptor.doWrite())

View File

@@ -0,0 +1,216 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
GObject Introspection reactor tests; i.e. `gireactor` module for gio/glib/gtk
integration.
"""
from __future__ import annotations
from unittest import skipIf
try:
from gi.repository import Gio
except ImportError:
giImported = False
gtkVersion = None
else:
giImported = True
# If we can import Gio, we ought to be able to import our reactor.
from os import environ
from gi import get_required_version, require_version
from twisted.internet import gireactor
def requireEach(someVersion: str) -> str:
try:
require_version("Gtk", someVersion)
except ValueError as ve:
return str(ve)
else:
return ""
errorMessage = ", ".join(
requireEach(version)
for version in environ.get("TWISTED_TEST_GTK_VERSION", "4.0,3.0").split(",")
)
actualVersion = get_required_version("Gtk")
gtkVersion = actualVersion if actualVersion is not None else errorMessage
from twisted.internet.error import ReactorAlreadyRunning
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.trial.unittest import SkipTest, TestCase
# Skip all tests if gi is unavailable:
if not giImported:
skip = "GObject Introspection `gi` module not importable"
noGtkSkip = (gtkVersion is None) or (gtkVersion not in ("3.0", "4.0"))
noGtkMessage = f"Unknown GTK version: {repr(gtkVersion)}"
if not noGtkSkip:
from gi.repository import Gtk
class GApplicationRegistrationTests(ReactorBuilder, TestCase):
"""
GtkApplication and GApplication are supported by
L{twisted.internet.gtk3reactor} and L{twisted.internet.gireactor}.
We inherit from L{ReactorBuilder} in order to use some of its
reactor-running infrastructure, but don't need its test-creation
functionality.
"""
def runReactor( # type: ignore[override]
self,
app: Gio.Application,
reactor: gireactor.GIReactor,
) -> None:
"""
Register the app, run the reactor, make sure app was activated, and
that reactor was running, and that reactor can be stopped.
"""
if not hasattr(app, "quit"):
raise SkipTest("Version of PyGObject is too old.")
result = []
def stop() -> None:
result.append("stopped")
reactor.stop()
def activate(widget: object) -> None:
result.append("activated")
reactor.callLater(0, stop)
app.connect("activate", activate)
# We want reactor.stop() to *always* stop the event loop, even if
# someone has called hold() on the application and never done the
# corresponding release() -- for more details see
# http://developer.gnome.org/gio/unstable/GApplication.html.
app.hold()
reactor.registerGApplication(app)
ReactorBuilder.runReactor(self, reactor)
self.assertEqual(result, ["activated", "stopped"])
def test_gApplicationActivate(self) -> None:
"""
L{Gio.Application} instances can be registered with a gireactor.
"""
self.reactorFactory = lambda: gireactor.GIReactor(useGtk=False)
reactor = self.buildReactor()
app = Gio.Application(
application_id="com.twistedmatrix.trial.gireactor",
flags=Gio.ApplicationFlags.FLAGS_NONE,
)
self.runReactor(app, reactor)
@skipIf(noGtkSkip, noGtkMessage)
def test_gtkAliases(self) -> None:
"""
L{twisted.internet.gtk3reactor} is now just a set of compatibility
aliases for L{twisted.internet.GIReactor}.
"""
from twisted.internet.gtk3reactor import (
Gtk3Reactor,
PortableGtk3Reactor,
install,
)
self.assertIs(Gtk3Reactor, gireactor.GIReactor)
self.assertIs(PortableGtk3Reactor, gireactor.PortableGIReactor)
self.assertIs(install, gireactor.install)
warnings = self.flushWarnings()
self.assertEqual(len(warnings), 1)
self.assertIn(
"twisted.internet.gtk3reactor was deprecated", warnings[0]["message"]
)
@skipIf(noGtkSkip, noGtkMessage)
def test_gtkApplicationActivate(self) -> None:
"""
L{Gtk.Application} instances can be registered with a gtk3reactor.
"""
self.reactorFactory = gireactor.GIReactor
reactor = self.buildReactor()
app = Gtk.Application(
application_id="com.twistedmatrix.trial.gtk3reactor",
flags=Gio.ApplicationFlags.FLAGS_NONE,
)
self.runReactor(app, reactor)
def test_portable(self) -> None:
"""
L{gireactor.PortableGIReactor} doesn't support application
registration at this time.
"""
self.reactorFactory = gireactor.PortableGIReactor
reactor = self.buildReactor()
app = Gio.Application(
application_id="com.twistedmatrix.trial.gireactor",
flags=Gio.ApplicationFlags.FLAGS_NONE,
)
self.assertRaises(NotImplementedError, reactor.registerGApplication, app)
def test_noQuit(self) -> None:
"""
Older versions of PyGObject lack C{Application.quit}, and so won't
allow registration.
"""
self.reactorFactory = lambda: gireactor.GIReactor(useGtk=False)
reactor = self.buildReactor()
# An app with no "quit" method:
app = object()
exc = self.assertRaises(RuntimeError, reactor.registerGApplication, app)
self.assertTrue(exc.args[0].startswith("Application registration is not"))
def test_cantRegisterAfterRun(self) -> None:
"""
It is not possible to register a C{Application} after the reactor has
already started.
"""
self.reactorFactory = lambda: gireactor.GIReactor(useGtk=False)
reactor = self.buildReactor()
app = Gio.Application(
application_id="com.twistedmatrix.trial.gireactor",
flags=Gio.ApplicationFlags.FLAGS_NONE,
)
def tryRegister() -> None:
exc = self.assertRaises(
ReactorAlreadyRunning, reactor.registerGApplication, app
)
self.assertEqual(
exc.args[0], "Can't register application after reactor was started."
)
reactor.stop()
reactor.callLater(0, tryRegister)
ReactorBuilder.runReactor(self, reactor)
def test_cantRegisterTwice(self) -> None:
"""
It is not possible to register more than one C{Application}.
"""
self.reactorFactory = lambda: gireactor.GIReactor(useGtk=False)
reactor = self.buildReactor()
app = Gio.Application(
application_id="com.twistedmatrix.trial.gireactor",
flags=Gio.ApplicationFlags.FLAGS_NONE,
)
reactor.registerGApplication(app)
app2 = Gio.Application(
application_id="com.twistedmatrix.trial.gireactor2",
flags=Gio.ApplicationFlags.FLAGS_NONE,
)
exc = self.assertRaises(RuntimeError, reactor.registerGApplication, app2)
self.assertEqual(
exc.args[0], "Can't register more than one application instance."
)

View File

@@ -0,0 +1,100 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.internet.glibbase.
"""
import sys
from twisted.internet._glibbase import ensureNotImported
from twisted.trial.unittest import TestCase
class EnsureNotImportedTests(TestCase):
"""
L{ensureNotImported} protects against unwanted past and future imports.
"""
def test_ensureWhenNotImported(self):
"""
If the specified modules have never been imported, and import
prevention is requested, L{ensureNotImported} makes sure they will not
be imported in the future.
"""
modules = {}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.", preventImports=["m1", "m2", "m3"])
self.assertEqual(modules, {"m1": None, "m2": None, "m3": None})
def test_ensureWhenNotImportedDontPrevent(self):
"""
If the specified modules have never been imported, and import
prevention is not requested, L{ensureNotImported} has no effect.
"""
modules = {}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.")
self.assertEqual(modules, {})
def test_ensureWhenFailedToImport(self):
"""
If the specified modules have been set to L{None} in C{sys.modules},
L{ensureNotImported} does not complain.
"""
modules = {"m2": None}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.", preventImports=["m1", "m2"])
self.assertEqual(modules, {"m1": None, "m2": None})
def test_ensureFailsWhenImported(self):
"""
If one of the specified modules has been previously imported,
L{ensureNotImported} raises an exception.
"""
module = object()
modules = {"m2": module}
self.patch(sys, "modules", modules)
e = self.assertRaises(
ImportError,
ensureNotImported,
["m1", "m2"],
"A message.",
preventImports=["m1", "m2"],
)
self.assertEqual(modules, {"m2": module})
self.assertEqual(e.args, ("A message.",))
try:
from twisted.internet import gireactor as _gireactor
except ImportError:
gireactor = None
else:
gireactor = _gireactor
missingGlibReactor = None
if gireactor is None:
missingGlibReactor = "gi reactor not available"
class GlibReactorBaseTests(TestCase):
"""
Tests for the private C{twisted.internet._glibbase.GlibReactorBase}
done via the public C{twisted.internet.gireactor.PortableGIReactor}
"""
skip = missingGlibReactor
def test_simulate(self):
"""
C{simulate} can be called without raising any errors when there are
no delayed calls for the reactor and hence there is no defined sleep
period.
"""
sut = gireactor.PortableGIReactor(useGtk=False)
# Double check that reactor has no sleep period.
self.assertIs(None, sut.timeout())
sut.simulate()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,569 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the inotify wrapper in L{twisted.internet.inotify}.
"""
import sys
from twisted.internet import defer, reactor
from twisted.python import filepath, runtime
from twisted.python.reflect import requireModule
from twisted.trial import unittest
if requireModule("twisted.python._inotify") is not None:
from twisted.internet import inotify
else:
inotify = None # type: ignore[assignment]
class INotifyTests(unittest.TestCase):
"""
Define all the tests for the basic functionality exposed by
L{inotify.INotify}.
"""
if not runtime.platform.supportsINotify():
skip = "This platform doesn't support INotify."
def setUp(self):
self.dirname = filepath.FilePath(self.mktemp())
self.dirname.createDirectory()
self.inotify = inotify.INotify()
self.inotify.startReading()
self.addCleanup(self.inotify.loseConnection)
def test_initializationErrors(self):
"""
L{inotify.INotify} emits a C{RuntimeError} when initialized
in an environment that doesn't support inotify as we expect it.
We just try to raise an exception for every possible case in
the for loop in L{inotify.INotify._inotify__init__}.
"""
class FakeINotify:
def init(self):
raise inotify.INotifyError()
self.patch(inotify.INotify, "_inotify", FakeINotify())
self.assertRaises(inotify.INotifyError, inotify.INotify)
def _notificationTest(self, mask, operation, expectedPath=None):
"""
Test notification from some filesystem operation.
@param mask: The event mask to use when setting up the watch.
@param operation: A function which will be called with the
name of a file in the watched directory and which should
trigger the event.
@param expectedPath: Optionally, the name of the path which is
expected to come back in the notification event; this will
also be passed to C{operation} (primarily useful when the
operation is being done to the directory itself, not a
file in it).
@return: A L{Deferred} which fires successfully when the
expected event has been received or fails otherwise.
"""
if expectedPath is None:
expectedPath = self.dirname.child("foo.bar")
notified = defer.Deferred()
def cbNotified(result):
(watch, filename, events) = result
self.assertEqual(filename.asBytesMode(), expectedPath.asBytesMode())
self.assertTrue(events & mask)
notified.addCallback(cbNotified)
self.inotify.watch(
self.dirname, mask=mask, callbacks=[lambda *args: notified.callback(args)]
)
operation(expectedPath)
return notified
def test_access(self):
"""
Reading from a file in a monitored directory sends an
C{inotify.IN_ACCESS} event to the callback.
"""
def operation(path):
path.setContent(b"foo")
path.getContent()
return self._notificationTest(inotify.IN_ACCESS, operation)
def test_modify(self):
"""
Writing to a file in a monitored directory sends an
C{inotify.IN_MODIFY} event to the callback.
"""
def operation(path):
with path.open("w") as fObj:
fObj.write(b"foo")
return self._notificationTest(inotify.IN_MODIFY, operation)
def test_attrib(self):
"""
Changing the metadata of a file in a monitored directory
sends an C{inotify.IN_ATTRIB} event to the callback.
"""
def operation(path):
path.touch()
path.touch()
return self._notificationTest(inotify.IN_ATTRIB, operation)
def test_closeWrite(self):
"""
Closing a file which was open for writing in a monitored
directory sends an C{inotify.IN_CLOSE_WRITE} event to the
callback.
"""
def operation(path):
path.open("w").close()
return self._notificationTest(inotify.IN_CLOSE_WRITE, operation)
def test_closeNoWrite(self):
"""
Closing a file which was open for reading but not writing in a
monitored directory sends an C{inotify.IN_CLOSE_NOWRITE} event
to the callback.
"""
def operation(path):
path.touch()
path.open("r").close()
return self._notificationTest(inotify.IN_CLOSE_NOWRITE, operation)
def test_open(self):
"""
Opening a file in a monitored directory sends an
C{inotify.IN_OPEN} event to the callback.
"""
def operation(path):
path.open("w").close()
return self._notificationTest(inotify.IN_OPEN, operation)
def test_movedFrom(self):
"""
Moving a file out of a monitored directory sends an
C{inotify.IN_MOVED_FROM} event to the callback.
"""
def operation(path):
path.open("w").close()
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(inotify.IN_MOVED_FROM, operation)
def test_movedTo(self):
"""
Moving a file into a monitored directory sends an
C{inotify.IN_MOVED_TO} event to the callback.
"""
def operation(path):
p = filepath.FilePath(self.mktemp())
p.touch()
p.moveTo(path)
return self._notificationTest(inotify.IN_MOVED_TO, operation)
def test_create(self):
"""
Creating a file in a monitored directory sends an
C{inotify.IN_CREATE} event to the callback.
"""
def operation(path):
path.open("w").close()
return self._notificationTest(inotify.IN_CREATE, operation)
def test_delete(self):
"""
Deleting a file in a monitored directory sends an
C{inotify.IN_DELETE} event to the callback.
"""
def operation(path):
path.touch()
path.remove()
return self._notificationTest(inotify.IN_DELETE, operation)
def test_deleteSelf(self):
"""
Deleting the monitored directory itself sends an
C{inotify.IN_DELETE_SELF} event to the callback.
"""
def operation(path):
path.remove()
return self._notificationTest(
inotify.IN_DELETE_SELF, operation, expectedPath=self.dirname
)
def test_deleteSelfForced(self):
"""
Deleting the monitored directory itself sends an
C{inotify.IN_DELETE_SELF} event to the callback
even if the mask isn't specified by the call to watch().
"""
def cbNotified(result):
(watch, filename, events) = result
self.assertEqual(filename.asBytesMode(), self.dirname.asBytesMode())
self.assertTrue(events & inotify.IN_DELETE_SELF)
self.inotify.watch(
self.dirname, mask=0x0, callbacks=[lambda *args: d.callback(args)]
)
d = defer.Deferred()
d.addCallback(cbNotified)
self.dirname.remove()
return d
def test_moveSelf(self):
"""
Renaming the monitored directory itself sends an
C{inotify.IN_MOVE_SELF} event to the callback.
"""
def operation(path):
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(
inotify.IN_MOVE_SELF, operation, expectedPath=self.dirname
)
def test_simpleSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} when initialized with autoAdd==True adds
also adds the created subdirectories to the watchlist.
"""
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True, callbacks=[_callback]
)
subdir = self.dirname.child("test")
d = defer.Deferred()
subdir.createDirectory()
return d
def test_simpleDeleteDirectory(self):
"""
L{inotify.INotify} removes a directory from the watchlist when
it's removed from the filesystem.
"""
calls = []
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
subdir.remove()
except Exception:
d.errback()
def _eb():
# second call, we have just removed the subdir
try:
self.assertFalse(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
if not calls:
# first call, it's the create subdir
calls.append(filename)
reactor.callLater(0, _)
else:
reactor.callLater(0, _eb)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True, callbacks=[_callback]
)
subdir = self.dirname.child("test")
d = defer.Deferred()
subdir.createDirectory()
return d
def test_deleteSelfLoseConnection(self):
"""
L{inotify.INotify} closes the file descriptor after removing a
directory from the filesystem (and therefore from the watchlist).
"""
def cbNotified(result):
def _():
try:
self.assertFalse(self.inotify._isWatched(self.dirname))
self.assertFalse(self.inotify.connected)
d.callback(None)
except Exception:
d.errback()
(ignored, filename, events) = result
self.assertEqual(filename.asBytesMode(), self.dirname.asBytesMode())
self.assertTrue(events & inotify.IN_DELETE_SELF)
reactor.callLater(0, _)
self.assertTrue(
self.inotify.watch(
self.dirname,
mask=inotify.IN_DELETE_SELF,
callbacks=[lambda *args: notified.callback(args)],
)
)
notified = defer.Deferred()
notified.addCallback(cbNotified)
self.dirname.remove()
d = defer.Deferred()
return d
def test_ignoreDirectory(self):
"""
L{inotify.INotify.ignore} removes a directory from the watchlist
"""
self.inotify.watch(self.dirname, autoAdd=True)
self.assertTrue(self.inotify._isWatched(self.dirname))
self.inotify.ignore(self.dirname)
self.assertFalse(self.inotify._isWatched(self.dirname))
def test_humanReadableMask(self):
"""
L{inotify.humaReadableMask} translates all the possible event
masks to a human readable string.
"""
for mask, value in inotify._FLAG_TO_HUMAN:
self.assertEqual(inotify.humanReadableMask(mask)[0], value)
checkMask = inotify.IN_CLOSE_WRITE | inotify.IN_ACCESS | inotify.IN_OPEN
self.assertEqual(
set(inotify.humanReadableMask(checkMask)),
{"close_write", "access", "open"},
)
def test_recursiveWatch(self):
"""
L{inotify.INotify.watch} with recursive==True will add all the
subdirectories under the given path to the watchlist.
"""
subdir = self.dirname.child("test")
subdir2 = subdir.child("test2")
subdir3 = subdir2.child("test3")
subdir3.makedirs()
dirs = [subdir, subdir2, subdir3]
self.inotify.watch(self.dirname, recursive=True)
# let's even call this twice so that we test that nothing breaks
self.inotify.watch(self.dirname, recursive=True)
for d in dirs:
self.assertTrue(self.inotify._isWatched(d))
def test_connectionLostError(self):
"""
L{inotify.INotify.connectionLost} if there's a problem while closing
the fd shouldn't raise the exception but should log the error
"""
import os
in_ = inotify.INotify()
os.close(in_._fd)
in_.loseConnection()
self.flushLoggedErrors()
def test_noAutoAddSubdirectory(self):
"""
L{inotify.INotify.watch} with autoAdd==False will stop inotify
from watching subdirectories created under the watched one.
"""
def _callback(wp, fp, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertFalse(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=False, callbacks=[_callback]
)
subdir = self.dirname.child("test")
d = defer.Deferred()
subdir.createDirectory()
return d
def test_seriesOfWatchAndIgnore(self):
"""
L{inotify.INotify} will watch a filepath for events even if the same
path is repeatedly added/removed/re-added to the watchpoints.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
notified = defer.Deferred()
def cbNotified(result):
(ignored, filename, events) = result
self.assertEqual(filename.asBytesMode(), expectedPath.asBytesMode())
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
# Watch, ignore, watch again to get into the state being tested.
self.assertTrue(self.inotify.watch(expectedPath, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
self.assertTrue(
self.inotify.watch(
expectedPath, mask=inotify.IN_DELETE_SELF, callbacks=[callIt]
)
)
notified.addCallback(cbNotified)
# Apparently in kernel version < 2.6.25, inofify has a bug in the way
# similar events are coalesced. So, be sure to generate a different
# event here than the touch() at the top of this method might have
# generated.
expectedPath.remove()
return notified
def test_ignoreFilePath(self):
"""
L{inotify.INotify} will ignore a filepath after it has been removed from
the watch list.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
expectedPath2 = self.dirname.child("foo.bar3")
expectedPath2.touch()
notified = defer.Deferred()
def cbNotified(result):
(ignored, filename, events) = result
self.assertEqual(filename.asBytesMode(), expectedPath2.asBytesMode())
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
self.assertTrue(
self.inotify.watch(expectedPath, inotify.IN_DELETE_SELF, callbacks=[callIt])
)
notified.addCallback(cbNotified)
self.assertTrue(
self.inotify.watch(
expectedPath2, inotify.IN_DELETE_SELF, callbacks=[callIt]
)
)
self.inotify.ignore(expectedPath)
expectedPath.remove()
expectedPath2.remove()
return notified
def test_ignoreNonWatchedFile(self):
"""
L{inotify.INotify} will raise KeyError if a non-watched filepath is
ignored.
"""
expectedPath = self.dirname.child("foo.ignored")
expectedPath.touch()
self.assertRaises(KeyError, self.inotify.ignore, expectedPath)
def test_complexSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} with autoAdd==True for a watched path
generates events for every file or directory already present
in a newly created subdirectory under the watched one.
This tests that we solve a race condition in inotify even though
we may generate duplicate events.
"""
calls = set()
def _callback(wp, filename, mask):
calls.add(filename)
if len(calls) == 6:
try:
self.assertTrue(self.inotify._isWatched(subdir))
self.assertTrue(self.inotify._isWatched(subdir2))
self.assertTrue(self.inotify._isWatched(subdir3))
created = someFiles + [subdir, subdir2, subdir3]
created = {f.asBytesMode() for f in created}
self.assertEqual(len(calls), len(created))
self.assertEqual(calls, created)
except Exception:
d.errback()
else:
d.callback(None)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True, callbacks=[_callback]
)
subdir = self.dirname.child("test")
subdir2 = subdir.child("test2")
subdir3 = subdir2.child("test3")
d = defer.Deferred()
subdir3.makedirs()
someFiles = [
subdir.child("file1.dat"),
subdir2.child("file2.dat"),
subdir3.child("file3.dat"),
]
# Add some files in pretty much all the directories so that we
# see that we process all of them.
for i, filename in enumerate(someFiles):
filename.setContent(filename.path.encode(sys.getfilesystemencoding()))
return d

View File

@@ -0,0 +1,212 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.iocpreactor}.
"""
import errno
import sys
import time
from array import array
from socket import AF_INET, AF_INET6, SOCK_STREAM, SOL_SOCKET, socket
from struct import pack
from unittest import skipIf
from zope.interface.verify import verifyClass
from twisted.internet.interfaces import IPushProducer
from twisted.python.log import msg
from twisted.trial.unittest import TestCase
try:
from twisted.internet.iocpreactor import iocpsupport as _iocp, tcp, udp
from twisted.internet.iocpreactor.abstract import FileHandle
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.reactor import (
EVENTS_PER_LOOP,
KEY_NORMAL,
IOCPReactor,
)
except ImportError:
if sys.platform == "win32":
raise
skip = "This test only applies to IOCPReactor"
try:
socket(AF_INET6, SOCK_STREAM).close()
except OSError as e:
ipv6Skip = True
ipv6SkipReason = str(e)
else:
ipv6Skip = False
ipv6SkipReason = ""
class SupportTests(TestCase):
"""
Tests for L{twisted.internet.iocpreactor.iocpsupport}, low-level reactor
implementation helpers.
"""
def _acceptAddressTest(self, family, localhost):
"""
Create a C{SOCK_STREAM} connection to localhost using a socket with an
address family of C{family} and assert that the result of
L{iocpsupport.get_accept_addrs} is consistent with the result of
C{socket.getsockname} and C{socket.getpeername}.
A port starts listening (is bound) at the low-level socket without
calling accept() yet.
A client is then connected.
After the client is connected IOCP accept() is called, which is the
target of these tests.
Most of the time, the socket is ready instantly, but sometimes
the socket is not ready right away after calling IOCP accept().
It should not take more than 5 seconds for a socket to be ready, as
the client connection is already made over the loopback interface.
These are flaky tests.
Tweak the failure rate by changing the number of retries and the
wait/sleep between retries.
If you will need to update the retries to wait more than 5 seconds
for the port to be available, then there might a bug in the code and
not the test (or a very, very busy VM running the tests).
"""
msg(f"family = {family!r}")
port = socket(family, SOCK_STREAM)
self.addCleanup(port.close)
port.bind(("", 0))
port.listen(1)
client = socket(family, SOCK_STREAM)
self.addCleanup(client.close)
client.setblocking(False)
try:
client.connect((localhost, port.getsockname()[1]))
except OSError as e:
self.assertIn(e.errno, (errno.EINPROGRESS, errno.EWOULDBLOCK))
server = socket(family, SOCK_STREAM)
self.addCleanup(server.close)
buff = array("B", b"\0" * 256)
self.assertEqual(0, _iocp.accept(port.fileno(), server.fileno(), buff, None))
for attemptsRemaining in reversed(range(5)):
# Calling setsockopt after _iocp.accept might fail for both IPv4
# and IPV6 with "[Errno 10057] A request to send or receive ..."
# This is when ERROR_IO_PENDING is returned and means that the
# socket is not yet ready and accept will be handled via the
# callback event.
# For this test, with the purpose of keeping the test simple,
# we don't implement the event callback.
# The event callback functionality is tested via the high level
# tests for general reactor API.
# We retry multiple times to cover.
try:
server.setsockopt(
SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, pack("P", port.fileno())
)
break
except OSError as socketError:
# getattr is used below to make mypy happy.
if socketError.errno != getattr(errno, "WSAENOTCONN"):
# This is not the expected error so re-raise the error without retrying.
raise
# The socket is not yet ready to accept connections,
# setsockopt fails.
if attemptsRemaining == 0:
# We ran out of retries.
raise
# Without a sleep here even retrying 20 times will fail.
# This should allow other threads to execute and hopefully with the next
# try setsockopt will succeed.
time.sleep(0.2)
self.assertEqual(
(family, client.getpeername()[:2], client.getsockname()[:2]),
_iocp.get_accept_addrs(server.fileno(), buff),
)
def test_ipv4AcceptAddress(self):
"""
L{iocpsupport.get_accept_addrs} returns a three-tuple of address
information about the socket associated with the file descriptor passed
to it. For a connection using IPv4:
- the first element is C{AF_INET}
- the second element is a two-tuple of a dotted decimal notation IPv4
address and a port number giving the peer address of the connection
- the third element is the same type giving the host address of the
connection
"""
self._acceptAddressTest(AF_INET, "127.0.0.1")
@skipIf(ipv6Skip, ipv6SkipReason)
def test_ipv6AcceptAddress(self):
"""
Like L{test_ipv4AcceptAddress}, but for IPv6 connections.
In this case:
- the first element is C{AF_INET6}
- the second element is a two-tuple of a hexadecimal IPv6 address
literal and a port number giving the peer address of the connection
- the third element is the same type giving the host address of the
connection
"""
self._acceptAddressTest(AF_INET6, "::1")
class IOCPReactorTests(TestCase):
def test_noPendingTimerEvents(self):
"""
Test reactor behavior (doIteration) when there are no pending time
events.
"""
ir = IOCPReactor()
ir.wakeUp()
self.assertFalse(ir.doIteration(None))
def test_reactorInterfaces(self):
"""
Verify that IOCP socket-representing classes implement IReadWriteHandle
"""
self.assertTrue(verifyClass(IReadWriteHandle, tcp.Connection))
self.assertTrue(verifyClass(IReadWriteHandle, udp.Port))
def test_fileHandleInterfaces(self):
"""
Verify that L{Filehandle} implements L{IPushProducer}.
"""
self.assertTrue(verifyClass(IPushProducer, FileHandle))
def test_maxEventsPerIteration(self):
"""
Verify that we don't lose an event when more than EVENTS_PER_LOOP
events occur in the same reactor iteration
"""
class FakeFD:
counter = 0
def logPrefix(self):
return "FakeFD"
def cb(self, rc, bytes, evt):
self.counter += 1
ir = IOCPReactor()
fd = FakeFD()
event = _iocp.Event(fd.cb, fd)
for _ in range(EVENTS_PER_LOOP + 1):
ir.port.postEvent(0, KEY_NORMAL, event)
ir.doIteration(None)
self.assertEqual(fd.counter, EVENTS_PER_LOOP)
ir.doIteration(0)
self.assertEqual(fd.counter, EVENTS_PER_LOOP + 1)

View File

@@ -0,0 +1,73 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.kqueuereactor}.
"""
from __future__ import annotations
import errno
from zope.interface import implementer
from twisted.trial.unittest import TestCase
try:
from twisted.internet.kqreactor import KQueueReactor, _IKQueue
kqueueSkip = None
except ImportError:
kqueueSkip = "KQueue not available."
def _fakeKEvent(*args: object, **kwargs: object) -> None:
"""
Do nothing.
"""
def makeFakeKQueue(testKQueue: object, testKEvent: object) -> _IKQueue:
"""
Create a fake that implements L{_IKQueue}.
@param testKQueue: Something that acts like L{select.kqueue}.
@param testKEvent: Something that acts like L{select.kevent}.
@return: An implementation of L{_IKQueue} that includes C{testKQueue} and
C{testKEvent}.
"""
@implementer(_IKQueue)
class FakeKQueue:
kqueue = testKQueue
kevent = testKEvent
return FakeKQueue()
class KQueueTests(TestCase):
"""
These are tests for L{KQueueReactor}'s implementation, not its real world
behaviour. For that, look at
L{twisted.internet.test.reactormixins.ReactorBuilder}.
"""
skip = kqueueSkip
def test_EINTR(self) -> None:
"""
L{KQueueReactor} handles L{errno.EINTR} in C{doKEvent} by returning.
"""
class FakeKQueue:
"""
A fake KQueue that raises L{errno.EINTR} when C{control} is called,
like a real KQueue would if it was interrupted.
"""
def control(self, *args: object, **kwargs: object) -> None:
raise OSError(errno.EINTR, "Interrupted")
reactor = KQueueReactor(makeFakeKQueue(FakeKQueue, _fakeKEvent))
# This should return cleanly -- should not raise the OSError we're
# spawning, nor get upset and raise about the incomplete KQueue fake.
reactor.doKEvent(0)

View File

@@ -0,0 +1,45 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.main}.
"""
from twisted.internet.error import ReactorAlreadyInstalledError
from twisted.internet.main import installReactor
from twisted.internet.test.modulehelpers import NoReactor
from twisted.trial import unittest
class InstallReactorTests(unittest.SynchronousTestCase):
"""
Tests for L{installReactor}.
"""
def test_installReactor(self) -> None:
"""
L{installReactor} installs a new reactor if none is present.
"""
with NoReactor():
newReactor = object()
installReactor(newReactor)
from twisted.internet import reactor
self.assertIs(newReactor, reactor)
def test_alreadyInstalled(self) -> None:
"""
If a reactor is already installed, L{installReactor} raises
L{ReactorAlreadyInstalledError}.
"""
with NoReactor():
installReactor(object())
self.assertRaises(ReactorAlreadyInstalledError, installReactor, object())
def test_errorIsAnAssertionError(self) -> None:
"""
For backwards compatibility, L{ReactorAlreadyInstalledError} is an
L{AssertionError}.
"""
self.assertTrue(issubclass(ReactorAlreadyInstalledError, AssertionError))

View File

@@ -0,0 +1,202 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._newtls}.
"""
from twisted.internet import interfaces
from twisted.internet.test.connectionmixins import (
ConnectableProtocol,
runProtocolsWithReactor,
)
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.test.test_tcp import TCPCreator
from twisted.internet.test.test_tls import (
ContextGeneratingMixin,
SSLCreator,
StartTLSClientCreator,
TLSMixin,
)
from twisted.trial import unittest
try:
from twisted.internet import _newtls as __newtls
from twisted.protocols import tls
except ImportError:
_newtls = None
else:
_newtls = __newtls
from zope.interface import implementer
class BypassTLSTests(unittest.TestCase):
"""
Tests for the L{_newtls._BypassTLS} class.
"""
if not _newtls:
skip = "Couldn't import _newtls, perhaps pyOpenSSL is old or missing"
def test_loseConnectionPassThrough(self):
"""
C{_BypassTLS.loseConnection} calls C{loseConnection} on the base
class, while preserving any default argument in the base class'
C{loseConnection} implementation.
"""
default = object()
result = []
class FakeTransport:
def loseConnection(self, _connDone=default):
result.append(_connDone)
bypass = _newtls._BypassTLS(FakeTransport, FakeTransport())
# The default from FakeTransport is used:
bypass.loseConnection()
self.assertEqual(result, [default])
# And we can pass our own:
notDefault = object()
bypass.loseConnection(notDefault)
self.assertEqual(result, [default, notDefault])
class FakeProducer:
"""
A producer that does nothing.
"""
def pauseProducing(self):
pass
def resumeProducing(self):
pass
def stopProducing(self):
pass
@implementer(interfaces.IHandshakeListener)
class ProducerProtocol(ConnectableProtocol):
"""
Register a producer, unregister it, and verify the producer hooks up to
innards of C{TLSMemoryBIOProtocol}.
"""
def __init__(self, producer, result):
self.producer = producer
self.result = result
def handshakeCompleted(self):
if not isinstance(self.transport.protocol, tls.BufferingTLSTransport):
# Either the test or the code have a bug...
raise RuntimeError("TLSMemoryBIOProtocol not hooked up.")
self.transport.registerProducer(self.producer, True)
# The producer was registered with the TLSMemoryBIOProtocol:
self.result.append(self.transport.protocol._producer._producer)
self.transport.unregisterProducer()
# The producer was unregistered from the TLSMemoryBIOProtocol:
self.result.append(self.transport.protocol._producer)
self.transport.loseConnection()
class ProducerTestsMixin(ReactorBuilder, TLSMixin, ContextGeneratingMixin):
"""
Test the new TLS code integrates C{TLSMemoryBIOProtocol} correctly.
"""
if not _newtls:
skip = "Could not import twisted.internet._newtls"
def test_producerSSLFromStart(self):
"""
C{registerProducer} and C{unregisterProducer} on TLS transports
created as SSL from the get go are passed to the
C{TLSMemoryBIOProtocol}, not the underlying transport directly.
"""
result = []
producer = FakeProducer()
runProtocolsWithReactor(
self,
ConnectableProtocol(),
ProducerProtocol(producer, result),
SSLCreator(),
)
self.assertEqual(result, [producer, None])
def test_producerAfterStartTLS(self):
"""
C{registerProducer} and C{unregisterProducer} on TLS transports
created by C{startTLS} are passed to the C{TLSMemoryBIOProtocol}, not
the underlying transport directly.
"""
result = []
producer = FakeProducer()
runProtocolsWithReactor(
self,
ConnectableProtocol(),
ProducerProtocol(producer, result),
StartTLSClientCreator(),
)
self.assertEqual(result, [producer, None])
def startTLSAfterRegisterProducer(self, streaming):
"""
When a producer is registered, and then startTLS is called,
the producer is re-registered with the C{TLSMemoryBIOProtocol}.
"""
clientContext = self.getClientContext()
serverContext = self.getServerContext()
result = []
producer = FakeProducer()
class RegisterTLSProtocol(ConnectableProtocol):
def connectionMade(self):
self.transport.registerProducer(producer, streaming)
self.transport.startTLS(serverContext)
# Store TLSMemoryBIOProtocol and underlying transport producer
# status:
if streaming:
# _ProducerMembrane -> producer:
result.append(self.transport.protocol._producer._producer)
result.append(self.transport.producer._producer)
else:
# _ProducerMembrane -> _PullToPush -> producer:
result.append(self.transport.protocol._producer._producer._producer)
result.append(self.transport.producer._producer._producer)
self.transport.unregisterProducer()
self.transport.loseConnection()
class StartTLSProtocol(ConnectableProtocol):
def connectionMade(self):
self.transport.startTLS(clientContext)
runProtocolsWithReactor(
self, RegisterTLSProtocol(), StartTLSProtocol(), TCPCreator()
)
self.assertEqual(result, [producer, producer])
def test_startTLSAfterRegisterProducerStreaming(self):
"""
When a streaming producer is registered, and then startTLS is called,
the producer is re-registered with the C{TLSMemoryBIOProtocol}.
"""
self.startTLSAfterRegisterProducer(True)
def test_startTLSAfterRegisterProducerNonStreaming(self):
"""
When a non-streaming producer is registered, and then startTLS is
called, the producer is re-registered with the
C{TLSMemoryBIOProtocol}.
"""
self.startTLSAfterRegisterProducer(False)
globals().update(ProducerTestsMixin.makeTestCaseClasses())

View File

@@ -0,0 +1,41 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._pollingfile}.
"""
from unittest import skipIf
from twisted.python.runtime import platform
from twisted.trial.unittest import TestCase
if platform.isWindows():
from twisted.internet import _pollingfile
else:
_pollingfile = None # type: ignore[assignment]
@skipIf(_pollingfile is None, "Test will run only on Windows.")
class PollableWritePipeTests(TestCase):
"""
Tests for L{_pollingfile._PollableWritePipe}.
"""
def test_writeUnicode(self) -> None:
"""
L{_pollingfile._PollableWritePipe.write} raises a C{TypeError} if an
attempt is made to append unicode data to the output buffer.
"""
p = _pollingfile._PollableWritePipe(1, lambda: None)
self.assertRaises(TypeError, p.write, "test")
def test_writeSequenceUnicode(self) -> None:
"""
L{_pollingfile._PollableWritePipe.writeSequence} raises a C{TypeError}
if unicode data is part of the data sequence to be appended to the
output buffer.
"""
p = _pollingfile._PollableWritePipe(1, lambda: None)
self.assertRaises(TypeError, p.writeSequence, ["test"])
self.assertRaises(TypeError, p.writeSequence, ("test",))

View File

@@ -0,0 +1,348 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.posixbase} and supporting code.
"""
import os
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IReadDescriptor
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.protocol import ServerFactory
from twisted.python.runtime import platform
from twisted.trial.unittest import TestCase
skipSockets = None
try:
from twisted.internet import unix
from twisted.test.test_unix import ClientProto
except ImportError:
skipSockets = "Platform does not support AF_UNIX sockets"
from twisted.internet import reactor
from twisted.internet.tcp import Port
class WarningCheckerTestCase(TestCase):
"""
A test case that will make sure that no warnings are left unchecked at the end of a test run.
"""
def setUp(self):
super().setUp()
# FIXME:
# https://twistedmatrix.com/trac/ticket/10332
# For now, try to start each test without previous warnings
# on Windows CI environment.
# We still want to see failures on local Windows development environment to make it easier to fix them,
# rather than ignoring the errors.
if os.environ.get("CI", "").lower() == "true" and platform.isWindows():
self.flushWarnings()
def tearDown(self):
try:
super().tearDown()
finally:
warnings = self.flushWarnings()
if os.environ.get("CI", "").lower() == "true" and platform.isWindows():
# FIXME:
# https://twistedmatrix.com/trac/ticket/10332
# For now don't raise errors on Windows as the existing tests are dirty and we don't have the dev resources to fix this.
# If you care about Twisted on Windows, enable this check and hunt for the test that is generating the warnings.
# Note that even with this check disabled, you can still see flaky tests on Windows, as due to stray delayed calls
# the warnings can be generated while another test is running.
return
self.assertEqual(
len(warnings), 0, f"Warnings found at the end of the test:\n{warnings}"
)
class TrivialReactor(PosixReactorBase):
def __init__(self):
self._readers = {}
self._writers = {}
PosixReactorBase.__init__(self)
def addReader(self, reader):
self._readers[reader] = True
def removeReader(self, reader):
del self._readers[reader]
def addWriter(self, writer):
self._writers[writer] = True
def removeWriter(self, writer):
del self._writers[writer]
class PosixReactorBaseTests(WarningCheckerTestCase):
"""
Tests for L{PosixReactorBase}.
"""
def _checkWaker(self, reactor):
self.assertIsInstance(reactor.waker, _Waker)
self.assertIn(reactor.waker, reactor._internalReaders)
self.assertIn(reactor.waker, reactor._readers)
def test_wakerIsInternalReader(self):
"""
When L{PosixReactorBase} is instantiated, it creates a waker and adds
it to its internal readers set.
"""
reactor = TrivialReactor()
self._checkWaker(reactor)
def test_removeAllSkipsInternalReaders(self):
"""
Any L{IReadDescriptor}s in L{PosixReactorBase._internalReaders} are
left alone by L{PosixReactorBase._removeAll}.
"""
reactor = TrivialReactor()
extra = object()
reactor._internalReaders.add(extra)
reactor.addReader(extra)
reactor._removeAll(reactor._readers, reactor._writers)
self._checkWaker(reactor)
self.assertIn(extra, reactor._internalReaders)
self.assertIn(extra, reactor._readers)
def test_removeAllReturnsRemovedDescriptors(self):
"""
L{PosixReactorBase._removeAll} returns a list of removed
L{IReadDescriptor} and L{IWriteDescriptor} objects.
"""
reactor = TrivialReactor()
reader = object()
writer = object()
reactor.addReader(reader)
reactor.addWriter(writer)
removed = reactor._removeAll(reactor._readers, reactor._writers)
self.assertEqual(set(removed), {reader, writer})
self.assertNotIn(reader, reactor._readers)
self.assertNotIn(writer, reactor._writers)
class TCPPortTests(WarningCheckerTestCase):
"""
Tests for L{twisted.internet.tcp.Port}.
"""
if not isinstance(reactor, PosixReactorBase):
skip = "Non-posixbase reactor"
def test_connectionLostFailed(self):
"""
L{Port.stopListening} returns a L{Deferred} which errbacks if
L{Port.connectionLost} raises an exception.
"""
port = Port(12345, ServerFactory())
port.connected = True
port.connectionLost = lambda reason: 1 // 0
return self.assertFailure(port.stopListening(), ZeroDivisionError)
class TimeoutReportReactor(PosixReactorBase):
"""
A reactor which is just barely runnable and which cannot monitor any
readers or writers, and which fires a L{Deferred} with the timeout
passed to its C{doIteration} method as soon as that method is invoked.
"""
def __init__(self):
PosixReactorBase.__init__(self)
self.iterationTimeout = Deferred()
self.now = 100
def addReader(self, reader: IReadDescriptor) -> None:
"""
Ignore the reader. This is necessary because the waker will be
added. However, we won't actually monitor it for any events.
"""
def removeReader(self, reader: IReadDescriptor) -> None:
"""
See L{addReader}.
"""
def removeAll(self):
"""
There are no readers or writers, so there is nothing to remove.
This will be called when the reactor stops, though, so it must be
implemented.
"""
return []
def seconds(self):
"""
Override the real clock with a deterministic one that can be easily
controlled in a unit test.
"""
return self.now
def doIteration(self, timeout):
d = self.iterationTimeout
if d is not None:
self.iterationTimeout = None
d.callback(timeout)
class IterationTimeoutTests(WarningCheckerTestCase):
"""
Tests for the timeout argument L{PosixReactorBase.run} calls
L{PosixReactorBase.doIteration} with in the presence of various delayed
calls.
"""
def _checkIterationTimeout(self, reactor):
timeout = []
reactor.iterationTimeout.addCallback(timeout.append)
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
reactor.run()
return timeout[0]
def test_noCalls(self):
"""
If there are no delayed calls, C{doIteration} is called with a
timeout of L{None}.
"""
reactor = TimeoutReportReactor()
timeout = self._checkIterationTimeout(reactor)
self.assertIsNone(timeout)
def test_delayedCall(self):
"""
If there is a delayed call, C{doIteration} is called with a timeout
which is the difference between the current time and the time at
which that call is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 100)
def test_timePasses(self):
"""
If a delayed call is scheduled and then some time passes, the
timeout passed to C{doIteration} is reduced by the amount of time
which passed.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
reactor.now += 25
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 75)
def test_multipleDelayedCalls(self):
"""
If there are several delayed calls, C{doIteration} is called with a
timeout which is the difference between the current time and the
time at which the earlier of the two calls is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(50, lambda: None)
reactor.callLater(10, lambda: None)
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 10)
def test_resetDelayedCall(self):
"""
If a delayed call is reset, the timeout passed to C{doIteration} is
based on the interval between the time when reset is called and the
new delay of the call.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 25
call.reset(15)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 15)
def test_delayDelayedCall(self):
"""
If a delayed call is re-delayed, the timeout passed to
C{doIteration} is based on the remaining time before the call would
have been made and the additional amount of time passed to the delay
method.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 10
call.delay(20)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 60)
def test_cancelDelayedCall(self):
"""
If the only delayed call is canceled, L{None} is the timeout passed
to C{doIteration}.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
call.cancel()
timeout = self._checkIterationTimeout(reactor)
self.assertIsNone(timeout)
class ConnectedDatagramPortTests(WarningCheckerTestCase):
"""
Test connected datagram UNIX sockets.
"""
if skipSockets is not None:
skip = skipSockets
def test_connectionFailedDoesntCallLoseConnection(self):
"""
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
in L{ConnectedDatagramPort.connectionFailed}.
"""
def loseConnection():
"""
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
should not get called.
"""
self.fail("loseConnection is deprecated and should not get called.")
port = unix.ConnectedDatagramPort(None, ClientProto())
port.loseConnection = loseConnection
port.connectionFailed("goodbye")
def test_connectionFailedCallsStopListening(self):
"""
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
instead of the deprecated C{loseConnection} in
L{ConnectedDatagramPort.connectionFailed}.
"""
self.called = False
def stopListening():
"""
Dummy C{stopListening} method.
"""
self.called = True
port = unix.ConnectedDatagramPort(None, ClientProto())
port.stopListening = stopListening
port.connectionFailed("goodbye")
self.assertTrue(self.called)
class WakerTests(WarningCheckerTestCase):
def test_noWakerConstructionWarnings(self):
"""
No warnings are generated when constructing the waker.
"""
waker = _Waker()
warnings = self.flushWarnings()
self.assertEqual(len(warnings), 0, warnings)
# Explicitly close the waker to leave a clean state at the end of the test.
waker.connectionLost(None)
warnings = self.flushWarnings()
self.assertEqual(len(warnings), 0, warnings)

View File

@@ -0,0 +1,349 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for POSIX-based L{IReactorProcess} implementations.
"""
import errno
import os
import sys
from typing import Optional
platformSkip: Optional[str]
try:
import fcntl
except ImportError:
platformSkip = "non-POSIX platform"
else:
from twisted.internet import process
platformSkip = None
from twisted.trial.unittest import TestCase
class FakeFile:
"""
A dummy file object which records when it is closed.
"""
def __init__(self, testcase, fd):
self.testcase = testcase
self.fd = fd
def close(self):
self.testcase._files.remove(self.fd)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class FakeResourceModule:
"""
Fake version of L{resource} which hard-codes a particular rlimit for maximum
open files.
@ivar _limit: The value to return for the hard limit of number of open files.
"""
RLIMIT_NOFILE = 1
def __init__(self, limit):
self._limit = limit
def getrlimit(self, no):
"""
A fake of L{resource.getrlimit} which returns a pre-determined result.
"""
if no == self.RLIMIT_NOFILE:
return [0, self._limit]
return [123, 456]
class FDDetectorTests(TestCase):
"""
Tests for _FDDetector class in twisted.internet.process, which detects
which function to drop in place for the _listOpenFDs method.
@ivar devfs: A flag indicating whether the filesystem fake will indicate
that /dev/fd exists.
@ivar accurateDevFDResults: A flag indicating whether the /dev/fd fake
returns accurate open file information.
@ivar procfs: A flag indicating whether the filesystem fake will indicate
that /proc/<pid>/fd exists.
"""
skip = platformSkip
devfs = False
accurateDevFDResults = False
procfs = False
def getpid(self):
"""
Fake os.getpid, always return the same thing
"""
return 123
def listdir(self, arg):
"""
Fake os.listdir, depending on what mode we're in to simulate behaviour.
@param arg: the directory to list
"""
accurate = map(str, self._files)
if self.procfs and arg == ("/proc/%d/fd" % (self.getpid(),)):
return accurate
if self.devfs and arg == "/dev/fd":
if self.accurateDevFDResults:
return accurate
return ["0", "1", "2"]
raise OSError()
def openfile(self, fname, mode):
"""
This is a mock for L{open}. It keeps track of opened files so extra
descriptors can be returned from the mock for L{os.listdir} when used on
one of the list-of-filedescriptors directories.
A L{FakeFile} is returned which can be closed to remove the new
descriptor from the open list.
"""
# Find the smallest unused file descriptor and give it to the new file.
f = FakeFile(self, min(set(range(1024)) - set(self._files)))
self._files.append(f.fd)
return f
def hideResourceModule(self):
"""
Make the L{resource} module unimportable for the remainder of the
current test method.
"""
sys.modules["resource"] = None
def revealResourceModule(self, limit):
"""
Make a L{FakeResourceModule} instance importable at the L{resource}
name.
@param limit: The value which will be returned for the hard limit of
number of open files by the fake resource module's C{getrlimit}
function.
"""
sys.modules["resource"] = FakeResourceModule(limit)
def replaceResourceModule(self, value):
"""
Restore the original resource module to L{sys.modules}.
"""
if value is None:
try:
del sys.modules["resource"]
except KeyError:
pass
else:
sys.modules["resource"] = value
def setUp(self):
"""
Set up the tests, giving ourselves a detector object to play with and
setting up its testable knobs to refer to our mocked versions.
"""
self.detector = process._FDDetector()
self.detector.listdir = self.listdir
self.detector.getpid = self.getpid
self.detector.openfile = self.openfile
self._files = [0, 1, 2]
self.addCleanup(self.replaceResourceModule, sys.modules.get("resource"))
def test_selectFirstWorking(self):
"""
L{FDDetector._getImplementation} returns the first method from its
C{_implementations} list which returns results which reflect a newly
opened file descriptor.
"""
def failWithException():
raise ValueError("This does not work")
def failWithWrongResults():
return [0, 1, 2]
def correct():
return self._files[:]
self.detector._implementations = [
failWithException,
failWithWrongResults,
correct,
]
self.assertIs(correct, self.detector._getImplementation())
def test_selectLast(self):
"""
L{FDDetector._getImplementation} returns the last method from its
C{_implementations} list if none of the implementations manage to return
results which reflect a newly opened file descriptor.
"""
def failWithWrongResults():
return [3, 5, 9]
def failWithOtherWrongResults():
return [0, 1, 2]
self.detector._implementations = [
failWithWrongResults,
failWithOtherWrongResults,
]
self.assertIs(failWithOtherWrongResults, self.detector._getImplementation())
def test_identityOfListOpenFDsChanges(self):
"""
Check that the identity of _listOpenFDs changes after running
_listOpenFDs the first time, but not after the second time it's run.
In other words, check that the monkey patching actually works.
"""
# Create a new instance
detector = process._FDDetector()
first = detector._listOpenFDs.__name__
detector._listOpenFDs()
second = detector._listOpenFDs.__name__
detector._listOpenFDs()
third = detector._listOpenFDs.__name__
self.assertNotEqual(first, second)
self.assertEqual(second, third)
def test_devFDImplementation(self):
"""
L{_FDDetector._devFDImplementation} raises L{OSError} if there is no
I{/dev/fd} directory, otherwise it returns the basenames of its children
interpreted as integers.
"""
self.devfs = False
self.assertRaises(OSError, self.detector._devFDImplementation)
self.devfs = True
self.accurateDevFDResults = False
self.assertEqual([0, 1, 2], self.detector._devFDImplementation())
def test_procFDImplementation(self):
"""
L{_FDDetector._procFDImplementation} raises L{OSError} if there is no
I{/proc/<pid>/fd} directory, otherwise it returns the basenames of its
children interpreted as integers.
"""
self.procfs = False
self.assertRaises(OSError, self.detector._procFDImplementation)
self.procfs = True
self.assertEqual([0, 1, 2], self.detector._procFDImplementation())
def test_resourceFDImplementation(self):
"""
L{_FDDetector._fallbackFDImplementation} uses the L{resource} module if
it is available, returning a range of integers from 0 to the
minimum of C{1024} and the hard I{NOFILE} limit.
"""
# When the resource module is here, use its value.
self.revealResourceModule(512)
self.assertEqual(
list(range(512)), list(self.detector._fallbackFDImplementation())
)
# But limit its value to the arbitrarily selected value 1024.
self.revealResourceModule(2048)
self.assertEqual(
list(range(1024)), list(self.detector._fallbackFDImplementation())
)
def test_fallbackFDImplementation(self):
"""
L{_FDDetector._fallbackFDImplementation}, the implementation of last
resort, succeeds with a fixed range of integers from 0 to 1024 when the
L{resource} module is not importable.
"""
self.hideResourceModule()
self.assertEqual(
list(range(1024)), list(self.detector._fallbackFDImplementation())
)
class FileDescriptorTests(TestCase):
"""
Tests for L{twisted.internet.process._listOpenFDs}
"""
skip = platformSkip
def test_openFDs(self):
"""
File descriptors returned by L{_listOpenFDs} are mostly open.
This test assumes that zero-legth writes fail with EBADF on closed
file descriptors.
"""
for fd in process._listOpenFDs():
try:
fcntl.fcntl(fd, fcntl.F_GETFL)
except OSError as err:
self.assertEqual(
errno.EBADF,
err.errno,
"fcntl(%d, F_GETFL) failed with unexpected errno %d"
% (fd, err.errno),
)
def test_expectedFDs(self):
"""
L{_listOpenFDs} lists expected file descriptors.
"""
# This is a tricky test. A priori, there is no way to know what file
# descriptors are open now, so there is no way to know what _listOpenFDs
# should return. Work around this by creating some new file descriptors
# which we can know the state of and then just making assertions about
# their presence or absence in the result.
# Expect a file we just opened to be listed.
f = open(os.devnull)
openfds = process._listOpenFDs()
self.assertIn(f.fileno(), openfds)
# Expect a file we just closed not to be listed - with a caveat. The
# implementation may need to open a file to discover the result. That
# open file descriptor will be allocated the same number as the one we
# just closed. So, instead, create a hole in the file descriptor space
# to catch that internal descriptor and make the assertion about a
# different closed file descriptor.
# This gets allocated a file descriptor larger than f's, since nothing
# has been closed since we opened f.
fd = os.dup(f.fileno())
# But sanity check that; if it fails the test is invalid.
self.assertTrue(
fd > f.fileno(),
"Expected duplicate file descriptor to be greater than original",
)
try:
# Get rid of the original, creating the hole. The copy should still
# be open, of course.
f.close()
self.assertIn(fd, process._listOpenFDs())
finally:
# Get rid of the copy now
os.close(fd)
# And it should not appear in the result.
self.assertNotIn(fd, process._listOpenFDs())

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More