Package pybaz :: Package backends :: Module forkexec
[frames] | no frames]

Source Code for Module pybaz.backends.forkexec

  1  # arch-tag: 5b9da267-93df-418e-bf4a-47fb9ec7f6de 
  2  # Copyright (C) 2004 Canonical Ltd. 
  3  #               Author: David Allouche <david@canonical.com> 
  4  # 
  5  #    This program is free software; you can redistribute it and/or modify 
  6  #    it under the terms of the GNU General Public License as published by 
  7  #    the Free Software Foundation; either version 2 of the License, or 
  8  #    (at your option) any later version. 
  9  # 
 10  #    This program is distributed in the hope that it will be useful, 
 11  #    but WITHOUT ANY WARRANTY; without even the implied warranty of 
 12  #    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 13  #    GNU General Public License for more details. 
 14  # 
 15  #    You should have received a copy of the GNU General Public License 
 16  #    along with this program; if not, write to the Free Software 
 17  #    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 18   
 19  """PyArch specific process spawning 
 20  """ 
 21   
 22  import sys 
 23  import os 
 24  import threading 
 25  from pybaz.errors import ExecProblem 
 26  import commandline 
 27   
 28  import logging 
 29  logging = logging.getLogger('pybaz.forkexec') 
 30   
 31   
 32  ### Internal use constant 
 33   
34 -class StdoutType(object):
35 - def __repr__(self):
36 return 'pybaz.backends.forkexec.STDOUT'
37 38 STDOUT = StdoutType() 39 40 41 ### Spawning strategy 42
43 -class PyArchSpawningStrategy(commandline.SpawningStrategy):
44
45 - def __init__(self, command, logger):
46 self._command = command 47 self._logger = logger
48
49 - def sequence_cmd(self, args, chdir=None, expected=(0,), stderr_too=False):
50 return _pyarch_sequence_cmd( 51 self._command, args, chdir, expected, self._logger, 52 stderr_too=stderr_too)
53
54 - def status_cmd(self, args, chdir, expected):
55 return exec_safe( 56 self._command, args, expected=expected, chdir=chdir, 57 logger=self._logger)
58
59 - def status_text_cmd(self, args, chdir, expected):
60 return exec_safe_status_stdout( 61 self._command, args, expected=expected, chdir=chdir, 62 logger=self._logger)
63 64
65 -class _pyarch_sequence_cmd(object):
66 - def __init__(self, command, args, chdir, expected, logger, stderr_too):
67 if stderr_too: 68 stderr = STDOUT 69 else: 70 stderr = None 71 self._proc = exec_safe_iter_stdout( 72 command, args, stderr=stderr, chdir=chdir, expected=expected, 73 logger=logger)
74 - def __iter__(self):
75 return self
76 - def next(self):
77 line = self._proc.next() 78 return line.rstrip('\n')
79 - def _get_finished(self):
80 return self._proc.finished
81 finished = property(_get_finished)
82 - def _get_status(self):
83 return self._proc.status
84 status = property(_get_status)
85 86 87 ### Process handling commands 88
89 -def exec_safe(program, args = [], stdout = None, stdin = None, 90 stderr = None, expected = 0, chdir = None, logger = None):
91 """Fork/exec a process and and raises an exception if the program 92 died with a signal or returned an error code other than expected. 93 This function will always wait.""" 94 proc = ChildProcess(program, args, expected, chdir, logger) 95 proc.spawn(stdout, stdin, stderr) 96 proc.wait() 97 return proc.status
98 99
100 -def exec_safe_status_stdout(program, args = [], expected = 0, 101 chdir = None, logger = None):
102 """Run the specified program and return a tuple of two items: 103 1. exit status of the program; 104 2. output of the program as a single string. 105 """ 106 read_end, write_end = os.pipe() 107 proc = ChildProcess(program, args, expected, chdir, logger) 108 proc.spawn(stdout=write_end) 109 os.close(write_end) 110 fd = os.fdopen(read_end, 'r') 111 output = fd.read() 112 proc.wait() 113 fd.close() 114 return (proc.status, output)
115 116
117 -class exec_safe_iter_stdout(object):
118 119 """Iterator over the output of a child process. 120 121 Fork/exec a process with its output piped. Each iteration will 122 cause a iteration of the process output pipe. The pipe is properly 123 closed whenever the output is exhausted or the iterator is 124 deleted. 125 126 If the output is exhausted, the process exit status is checked and 127 an ExecError exception will be raised for abnormal or unexpected 128 exit status. 129 """ 130
131 - def __init__(self, program, args=[], stdin=None, stderr=None, 132 expected=0, delsignal=None, chdir=None, logger=None):
133 self.delsignal = delsignal 134 self._read_file, self._pid = None, None 135 read_end, write_end = os.pipe() 136 if stderr == STDOUT: 137 stderr = write_end 138 self.proc = ChildProcess(program, args, expected, chdir, logger) 139 try: 140 self.proc.spawn(stdout=write_end, stdin=stdin, stderr=stderr, 141 closefds=[read_end]) 142 self._pid = self.proc.pid 143 except: 144 os.close(write_end) 145 self.errlog = '<deleted>' 146 os.close(read_end) 147 raise 148 self._read_file = os.fdopen(read_end, 'r', 0) 149 os.close(write_end)
150
151 - def __del__(self):
152 """Destructor. If needed, close the pipe and wait the child process. 153 154 If child process has already been joined, it means the iterator was 155 deleted before being exhausted. It is assumed the return status is not 156 cared about. 157 """ 158 if self._pid is not None: 159 if self._read_file is not None: 160 self._read_file.close() 161 # Some buggy programs (e.g. ls -R) do not properly 162 # terminate when their output pipe is broken. They 163 # must be killed by an appropriate signal before 164 # waiting. SIGINT should be okay most of the time. 165 if self.delsignal: 166 os.kill(self.proc.pid, self.delsignal) 167 self.proc.wait_nocheck()
168
169 - def _get_finished(self):
170 """Whether the underlying process has terminated.""" 171 return self.proc.status is not None or self.proc.signal is not None
172 finished = property(_get_finished) 173
174 - def _get_status(self):
175 """Exit status of the underlying process. 176 177 Raises ValueError if the process has not yet finished. (Hm... 178 should be AttributeError). Can be None if the process was 179 killed by a signal. 180 """ 181 if self.proc.status is None: 182 raise ValueError, "no status, process has not finished" 183 return self.proc.status
184 status = property(_get_status) 185
186 - def close(self):
187 """Close the pipe and wait the child process. 188 189 If the output is not exhausted yet, you should be prepared to 190 handle the error condition caused by breaking the pipe. 191 """ 192 self._read_file.close() 193 try: 194 self.proc.wait() 195 self._pid = None 196 return self.proc.status 197 except ExecProblem: 198 self._pid = None 199 raise
200
201 - def __iter__(self):
202 """Iterator. Return self.""" 203 return self
204
205 - def next(self):
206 """Iteration method. Iterate on the pipe file. 207 208 Close the pipe and wait the child process once output is exhausted. 209 210 Use `file.readline` instead of `file.next` because we want 211 maximal responsiveness to incremental output. The pipe 212 mechanism itself provides buffering. 213 """ 214 try: 215 line = self._read_file.readline() 216 except ValueError: 217 if self._pid is None: raise StopIteration 218 else: raise 219 if line: 220 return line 221 else: 222 self.close() 223 raise StopIteration
224 225 226 ### Low-level process handling 227
228 -class ChildProcess:
229 230 """Description of a child process, for error handling.""" 231
232 - def __init__(self, command, args, expected=0, chdir=None, logger=None):
233 """Create a child process descriptor. 234 235 The child process must have already been created. The 236 `command`, `args` and `expected` parameters are used for error 237 reporting. 238 239 :param command: name of the child process executable 240 :type command: str 241 :param args: child process arguments (argv) 242 :type args: sequence of str 243 :param expected: valid exit status values 244 :type expected: int or sequence of int 245 """ 246 self.pid = None 247 if not isinstance(command, str): 248 raise TypeError( 249 "command must be a string, but was: %r" % (command,)) 250 self.command = command 251 args = tuple(args) 252 for arg in args: 253 if not isinstance(arg, str): 254 raise TypeError( 255 "args must be a sequence of strings, but was %r" % (args,)) 256 self.args = args 257 if isinstance(expected, int): expected = (expected,) 258 self.expected = expected 259 self.signal = None 260 self.status = None 261 self.error = None 262 if chdir is not None and not isinstance(chdir, str): 263 raise TypeError( 264 "chdir must be a string or None, but was: %r" % (chdir,)) 265 self.chdir = chdir 266 self._logger = logger 267 self._outlog = None 268 self._errlog = None 269 logging.debug('ChildProcess, %r, %r, expected=%r, chdir=%r', 270 self.command, self.args, self.expected, self.chdir)
271
272 - def spawn(self, stdout=None, stdin=None, stderr=None, closefds=[]):
273 """Fork, setup file descriptors, and exec child process. 274 275 The `stdout`, `stdin` and `stderr` file-like objects can be 276 raw file descriptors (ints) or file-like objects with a 277 ``fileno`` method returning a file descriptor. 278 279 When building a pipe, one side of a pipe is used as `stdout`, 280 `stdin` or `stderr`, and the other is included in the 281 `closefds` list, so the child process will be able to detect a 282 broken pipe. 283 284 :param stdin: use as standard input of child process, defaults 285 to ``/dev/null``. 286 :type stdin: file-like object with a ``fileno`` method or raw 287 file descriptor (``int``). 288 :param stdout: use as standard output of child process, 289 defaults to internal pipe or ``/dev/null``. 290 :type stdout: file-like object with a ``fileno`` method or raw 291 file descriptor (``int``). If a logger was specified, 292 defaults to a pipe for logging, if no logger was 293 specified, defaults to ``/dev/null``. 294 :param stderr: use as standard error of child process. 295 Defaults to a pipe for error reporting (see `ExecProblem`) 296 and logging. 297 :type stderr: file-like object with a ``fileno`` method or raw 298 file descriptor (``int``). 299 :param closefds: file descriptors to close in the child process. 300 :type closefds: iterable of int 301 """ 302 __pychecker__ = 'no-moddefvalue' 303 if self.pid is not None: 304 raise ValueError, "child process was already spawned" 305 if self._logger is not None: 306 self._logger.log_command(self.command, self.args) 307 if stdout is None: 308 self._outlog = stdout = StringOutput() 309 if stderr is None: 310 self._errlog = stderr = StringOutput() 311 self.pid = os.fork() 312 if self.pid: 313 return # the parent process, nothing more to do 314 try: 315 source_fds = [stdin, stdout, stderr] 316 closefds = list(closefds) 317 for i in range(3): 318 if source_fds[i] is None: 319 source_fds[i] = getnull() 320 if hasattr(source_fds[i], 'fileno'): 321 source_fds[i] = source_fds[i].fileno() 322 # multiple dup2 can step their own toes if a source fd is smaller 323 # than its target fd so start by duplicating low fds 324 for i in range(1, 3): 325 if source_fds[i] is not None: 326 while source_fds[i] < i: 327 closefds.append(source_fds[i]) 328 source_fds[i] = os.dup(source_fds[i]) 329 # must close before dup2, because closefds may contain 330 # values in the range 0..2 331 for fd in closefds: 332 os.close(fd) 333 # finally, move the given fds to their final location 334 for dest, source in enumerate(source_fds): 335 if source is not None and source != dest: 336 os.dup2(source, dest) 337 if source not in source_fds[dest+1:]: 338 os.close(source) 339 340 if self.chdir: 341 os.chdir(self.chdir) 342 os.execvp(self.command, (self.command,) + self.args) 343 except: 344 sys.excepthook(*sys.exc_info()) 345 os._exit(255)
346
347 - def wait_nocheck(self):
348 """Wait for process and return exit result. 349 350 This (internally used) variant of `wait` is useful when you 351 want to wait for a child process, but not raise an exception 352 if it was terminated abnormally. Typically, that's what you 353 want if you killed the child process. 354 355 :return: second element of the tuple returned by os.wait() 356 """ 357 if self._outlog is not None: 358 self._logger.log_output(self._outlog.read()) 359 if self._errlog is not None: 360 self.error = self._errlog.read() 361 if self._logger is not None: 362 self._logger.log_error(self.error) 363 return os.waitpid(self.pid, 0)[1]
364
365 - def wait(self):
366 """Wait for process and check its termination status. 367 368 If the process exited, set ``self.status`` to its exit status. 369 370 if the process was terminated by a signal, set ``self.signal`` 371 to the value of this signal. 372 373 :raise ExecProblem: process was killed by a signal or exit 374 status is not is ``self.expected`` 375 """ 376 result = self.wait_nocheck() 377 if os.WIFSIGNALED(result): 378 self.signal = os.WTERMSIG(result) 379 if os.WIFEXITED(result): 380 self.status = os.WEXITSTATUS(result) 381 if not os.WIFEXITED(result): 382 raise ExecProblem(self) 383 if os.WEXITSTATUS(result) not in self.expected: 384 raise ExecProblem(self)
385 386 387 nulldev = None 388
389 -def getnull():
390 """Return a file object of /dev/null/ opened for writing.""" 391 global nulldev 392 if not nulldev: 393 nulldev = open("/dev/null", "w+") 394 return nulldev
395 396 397 threadcount = 0 398
399 -class StringOutput(object):
400 401 """Memory buffer storing a pipe output asynchronously.""" 402
403 - def __init__(self):
404 read_end, self.write_end = os.pipe() 405 self.readfile = os.fdopen(read_end, 'r', 0) 406 self.thread = threading.Thread(target=self.__run) 407 self.thread.setDaemon(True) 408 self.thread.start()
409
410 - def _close_write_end(self):
411 if self.write_end is not None: 412 os.close(self.write_end) 413 self.write_end = None
414
415 - def _join(self):
416 #global threadcount 417 if self.thread is not None: 418 self.thread.join() 419 self.thread = None
420 #threadcount -= 1 421 #print " %s joined (%d)" % (self, threadcount) 422
423 - def __del__(self):
424 #print "%s\tdeleting" % self 425 self._close_write_end() 426 self.readfile.close() 427 self._join()
428
429 - def __run(self):
430 #global threadcount 431 #threadcount += 1 432 self.data = self.readfile.read()
433
434 - def fileno(self):
435 return self.write_end
436
437 - def read(self):
438 #print "%s\treading" % self 439 self._close_write_end() 440 self._join() 441 self.readfile.close() 442 return self.data
443