-
Notifications
You must be signed in to change notification settings - Fork 9
munet: add Commander.async_spawn() #58
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -603,6 +603,41 @@ | |
p = pexpect.spawn(actual_cmd[0], actual_cmd[1:], echo=echo, **defaults) | ||
return p, actual_cmd | ||
|
||
def _spawn_with_logging( | ||
self, | ||
cmd, | ||
use_pty=False, | ||
logfile=None, | ||
logfile_read=None, | ||
logfile_send=None, | ||
**kwargs, | ||
): | ||
"""Create a spawned process with logging to files configured.""" | ||
if is_file_like(cmd): | ||
assert not use_pty | ||
ac = "*socket*" | ||
p = self._fdspawn(cmd, **kwargs) | ||
else: | ||
p, ac = self._spawn(cmd, use_pty=use_pty, **kwargs) | ||
|
||
if logfile: | ||
p.logfile = logfile | ||
if logfile_read: | ||
p.logfile_read = logfile_read | ||
if logfile_send: | ||
p.logfile_send = logfile_send | ||
|
||
# for spawned shells (i.e., a direct command an not a console) | ||
# this is wrong and will cause 2 prompts | ||
if not use_pty: | ||
# This isn't very nice looking | ||
p.echo = False | ||
if not is_file_like(cmd): | ||
p.isalive = lambda: p.proc.poll() is None | ||
if not hasattr(p, "close"): | ||
p.close = p.wait | ||
return p, ac | ||
|
||
def spawn( | ||
self, | ||
cmd, | ||
|
@@ -638,29 +673,123 @@ | |
CalledProcessError if EOF is seen and `cmd` exited then | ||
raises a CalledProcessError to indicate the failure. | ||
""" | ||
if is_file_like(cmd): | ||
assert not use_pty | ||
ac = "*socket*" | ||
p = self._fdspawn(cmd, **kwargs) | ||
else: | ||
p, ac = self._spawn(cmd, use_pty=use_pty, **kwargs) | ||
p, ac = self._spawn_with_logging( | ||
cmd, | ||
use_pty, | ||
logfile, | ||
logfile_read, | ||
logfile_send, | ||
**kwargs, | ||
) | ||
|
||
if logfile: | ||
p.logfile = logfile | ||
if logfile_read: | ||
p.logfile_read = logfile_read | ||
if logfile_send: | ||
p.logfile_send = logfile_send | ||
# Do a quick check to see if we got the prompt right away, otherwise we may be | ||
# at a console so we send a \n to re-issue the prompt | ||
index = p.expect([spawned_re, pexpect.TIMEOUT, pexpect.EOF], timeout=0.1) | ||
if index == 0: | ||
assert p.match is not None | ||
self.logger.debug( | ||
"%s: got spawned_re quick: '%s' matching '%s'", | ||
self, | ||
p.match.group(0), | ||
spawned_re, | ||
) | ||
return p | ||
|
||
# for spawned shells (i.e., a direct command an not a console) | ||
# this is wrong and will cause 2 prompts | ||
if not use_pty: | ||
# This isn't very nice looking | ||
p.echo = False | ||
if not is_file_like(cmd): | ||
p.isalive = lambda: p.proc.poll() is None | ||
if not hasattr(p, "close"): | ||
p.close = p.wait | ||
# Now send a CRLF to cause the prompt (or whatever else) to re-issue | ||
p.send("\n") | ||
try: | ||
patterns = [spawned_re, *expects] | ||
|
||
self.logger.debug("%s: expecting: %s", self, patterns) | ||
|
||
while index := p.expect(patterns): | ||
if trace: | ||
assert p.match is not None | ||
self.logger.debug( | ||
"%s: got expect: '%s' matching %d '%s', sending '%s'", | ||
self, | ||
p.match.group(0), | ||
index, | ||
patterns[index], | ||
sends[index - 1], | ||
) | ||
if sends[index - 1]: | ||
p.send(sends[index - 1]) | ||
|
||
self.logger.debug("%s: expecting again: %s", self, patterns) | ||
self.logger.debug( | ||
"%s: got spawned_re: '%s' matching '%s'", | ||
self, | ||
p.match.group(0), | ||
spawned_re, | ||
) | ||
return p | ||
except pexpect.TIMEOUT: | ||
self.logger.error( | ||
"%s: TIMEOUT looking for spawned_re '%s' expect buffer so far:\n%s", | ||
self, | ||
spawned_re, | ||
indent(p.buffer), | ||
) | ||
raise | ||
except pexpect.EOF as eoferr: | ||
if p.isalive(): | ||
raise | ||
rc = p.status | ||
before = indent(p.before) | ||
error = CalledProcessError(rc, ac, output=before) | ||
self.logger.error( | ||
"%s: EOF looking for spawned_re '%s' before EOF:\n%s", | ||
self, | ||
spawned_re, | ||
before, | ||
) | ||
p.close() | ||
raise error from eoferr | ||
|
||
async def async_spawn( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In order to create this new function you did a large copy and paste. That's a good indication we need to refactor the function so that more code is shared. :) That is hard to do sometimes with async/await. I feel like there should be some way to share code better between asyncio and non-asyncio, but I haven't figured it out yet. This is why you see other places in the code where I run the async function inside an A good place to test this would be in an FRR topotest which uses spawn. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't quite like the
The only other real option then in refactoring is to try and split off as much synchronous code as possible and place it into helper methods. I don't find such a solution satisfying, since that would leave a large portion of the methods untouched, but don't see any other real alternatives. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See the latest commit for a minor refactor that collects code that is synchronous in either version (sync/async) into a single method to reduce duplicate code. I am struggling to determine if there are any other changes that I could make to reduce code duplication. The presence of the await statement within the try/except block + while block make it difficult to reduce the logic any further imo. |
||
self, | ||
cmd, | ||
spawned_re, | ||
expects=(), | ||
sends=(), | ||
use_pty=False, | ||
logfile=None, | ||
logfile_read=None, | ||
logfile_send=None, | ||
trace=None, | ||
**kwargs, | ||
): | ||
"""Create an async spawned send/expect process. | ||
|
||
Args: | ||
cmd: list of args to exec/popen with, or an already open socket | ||
spawned_re: what to look for to know when done, `spawn` returns when seen | ||
expects: a list of regex other than `spawned_re` to look for. Commonly, | ||
"ogin:" or "[Pp]assword:"r. | ||
sends: what to send when an element of `expects` matches. So e.g., the | ||
username or password if thats what corresponding expect matched. Can | ||
be the empty string to send nothing. | ||
use_pty: true for pty based expect, otherwise uses popen (pipes/files) | ||
trace: if true then log send/expects | ||
**kwargs - kwargs passed on the _spawn. | ||
|
||
Returns: | ||
A pexpect process. | ||
|
||
Raises: | ||
pexpect.TIMEOUT, pexpect.EOF as documented in `pexpect` | ||
CalledProcessError if EOF is seen and `cmd` exited then | ||
raises a CalledProcessError to indicate the failure. | ||
""" | ||
p, ac = self._spawn_with_logging( | ||
cmd, | ||
use_pty, | ||
logfile, | ||
logfile_read, | ||
logfile_send, | ||
**kwargs, | ||
) | ||
|
||
# Do a quick check to see if we got the prompt right away, otherwise we may be | ||
# at a console so we send a \n to re-issue the prompt | ||
|
@@ -682,7 +811,28 @@ | |
|
||
self.logger.debug("%s: expecting: %s", self, patterns) | ||
|
||
while index := p.expect(patterns): | ||
# The timestamp is only used for the case of use_pty != True | ||
timeout = kwargs.get("timeout", 120) | ||
timeout_ts = datetime.datetime.now() + datetime.timedelta(seconds=timeout) | ||
index = None | ||
while True: | ||
if use_pty is True: | ||
index = await p.expect(patterns, async_=True) | ||
else: | ||
# Due to an upstream issue, async_=True cannot be mixed with | ||
# pipes (pexpect.popen_spawn.PopenSpawn). This hack is used | ||
# to bypass that problem. | ||
await asyncio.sleep(0) # Avoid blocking other coroutines | ||
try: | ||
index = p.expect(patterns, timeout=0.1) | ||
except pexpect.TIMEOUT: | ||
# We must declare when a timeout occurs instead of pexpect | ||
if timeout_ts < datetime.datetime.now(): | ||
raise | ||
liambrady marked this conversation as resolved.
Show resolved
Hide resolved
|
||
continue | ||
if index == 0: | ||
break | ||
|
||
if trace: | ||
assert p.match is not None | ||
self.logger.debug( | ||
|
@@ -761,7 +911,7 @@ | |
combined_prompt = r"({}|{})".format(re.escape(PEXPECT_PROMPT), prompt) | ||
|
||
assert not is_file_like(cmd) or not use_pty | ||
p = self.spawn( | ||
p = await self.async_spawn( | ||
cmd, | ||
combined_prompt, | ||
expects=expects, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The majority of synchronous spawn() is tested, but not all of the duplicate code. I was unable to determine a solution that allowed the send/expect logic to be tested while ensuring that we also test functionality across all the different shells.