Python multiprocessing - logging.FileHandler object raises PicklingError

Asked
Active3 hr before
Viewed126 times

5 Answers

raisespythonobjectpicklingerrorfilehandlermultiprocessing
90%

Speaking logging messages,This page contains a number of recipes related to logging, which have been found useful in the past,,An example of how you can define a namer and rotator is given in the following snippet, which shows zlib-based compression of the log file:,Then any events that you log to the adapter will have the value of some_conn_id prepended to the log messages

Example_snippet/controller/utility/_raises.js/ import logging import auxiliar. . .
import logging
import auxiliary_module

# create logger with 'spam_application'
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)

logger.info('creating an instance of auxiliary_module.Auxiliary')
a = auxiliary_module.Auxiliary()
logger.info('created an instance of auxiliary_module.Auxiliary')
logger.info('calling auxiliary_module.Auxiliary.do_something')
a.do_something()
logger.info('finished auxiliary_module.Auxiliary.do_something')
logger.info('calling auxiliary_module.some_function()')
auxiliary_module.some_function()
logger.info('done with auxiliary_module.some_function()')
load more v
88%

参见Python multiprocessing - logging,FileHandler object raises PicklingError的第一个答案,在多处理模块中有一个multiprocessing aware logger。然而,我总是觉得这个太局限了。在,odoo11-addon-base-location-geonames-import

Example_snippet/controller/utility/_raises.js/ import multiprocessing, loggin. . .
import multiprocessing, logging

def setup_logger(name_logfile, path_logfile):
   logger = logging.getLogger(name_logfile)
formatter = logging.Formatter('%(asctime)s:   %(message)s', datefmt = '%Y/%m/%d %H:%M:%S')
fileHandler = logging.FileHandler(path_logfile, mode = 'w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)

logger.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
logger.addHandler(streamHandler)
return logger

def MyFunc(A):
   print A
logger = setup_logger('Logfile%s' % A, '/dev/shm/Logfile%s.log' % A)
logger.info('text to be written to logfile')

pool = multiprocessing.Pool(2)
pool.map(MyFunc, [1, 2])
pool.close()
pool.join()
Step 2 continued with 1 2 2015/02/12 14:05:09: tex. . .
1
2
2015/02/12 14:05:09: text to be written to logfile
2015/02/12 14:05:09: text to be written to logfile
Process PoolWorker-1:
Traceback (most recent call last):
File "/usr/lib64/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib64/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib64/python2.7/multiprocessing/pool.py", line 99, in worker
Process PoolWorker-2:
put((job, i, result))
File "/usr/lib64/python2.7/multiprocessing/queues.py", line 392, in put
Traceback (most recent call last):
File "/usr/lib64/python2.7/multiprocessing/process.py", line 258, in _bootstrap
return send(obj)
PicklingError: Can't pickle <type 'thread.lock'>: attribute lookup thread.lock failed
   self.run()
   File "/usr/lib64/python2.7/multiprocessing/process.py", line 114, in run
   self._target(*self._args, **self._kwargs)
   File "/usr/lib64/python2.7/multiprocessing/pool.py", line 99, in worker
   put((job, i, result))
   File "/usr/lib64/python2.7/multiprocessing/queues.py", line 392, in put
   return send(obj)
   PicklingError: Can't pickle <type 'thread.lock'>: attribute lookup thread.lock failed
load more v
72%

本文地址:IT屋 » Python多重处理-logging,FileHandler对象引发PicklingError, logging模块和multiprocessing作业的处理程序似乎不混合:,我不太了解PicklingError

Example_snippet/controller/utility/_raises.js/ import functools import loggin. . .
import functools
import logging
import multiprocessing as mp

logger = logging.getLogger('myLogger')
handler = logging.FileHandler('logFile')

def worker(x, handler):
   print x ** 2

pWorker = functools.partial(worker, handler = handler)

#
if __name__ == '__main__':
   pool = mp.Pool(processes = 1)
pool.map(pWorker, range(3))
pool.close()
pool.join()
Step 2 continued with cPickle.PicklingError: Can't p. . .
cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute lookup thread.lock failed
Step 3 continued with # this works def pWorker( x ) . . .
# this works
def pWorker(x):
   worker(x, handler)

# this works too
pWorker = functools.partial(worker, handler = open('logFile'))
Step 4 continued with from multiprocessing import Po. . .
from multiprocessing
import Pool
import logging

logger = logging.getLogger('myLogger')

def worker(x):
   print handler
print x ** 2

def initializer(handle):
   global handler
handler = handle

if __name__ == "__main__":
   handler = logging.FileHandler('logFile')
#pWorker = functools.partial(worker, handler = handler)
pool = Pool(processes = 4, initializer = initializer, initargs = (handler, ))
pool.map(worker, range(3))
pool.close()
pool.join
load more v
65%

In this organization All GitHub ↵ Jump to ↵ , Go to definition R

Example_snippet/controller/utility/_python.js/ Automerge-Triggered-By: GH:vsa. . .
Automerge - Triggered - By: GH: vsajip
75%

I don't really understand the PicklingError, Is it because objects of class logging

Example_snippet/controller/utility/_python.js/ from multiprocessing import Po. . .
from multiprocessing
import Pool
import logging

logger = logging.getLogger('myLogger')

def worker(x):
   print handler
print x ** 2

def initializer(handle):
   global handler
handler = handle

if __name__ == "__main__":
   handler = logging.FileHandler('logFile')
#pWorker = functools.partial(worker, handler = handler)
pool = Pool(processes = 4, initializer = initializer, initargs = (handler, ))
pool.map(worker, range(3))
pool.close()
pool.join