-
Notifications
You must be signed in to change notification settings - Fork 0
/
threadpool.py
217 lines (192 loc) · 7.93 KB
/
threadpool.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
#coding=utf-8
import sys
import threading
import Queue
import traceback
# 定义一些Exception,用于自定义异常处理
class NoResultsPending(Exception):
"""All works requests have been processed"""
pass
class NoWorkersAvailable(Exception):
"""No worket threads available to process remaining requests."""
pass
def _handle_thread_exception(request, exc_info):
"""默认的异常处理函数,只是简单的打印"""
traceback.print_exception(*exc_info)
#classes
class WorkerThread(threading.Thread):
"""后台线程,真正的工作线程,从请求队列(requestQueue)中获取work,
并将执行后的结果添加到结果队列(resultQueue)"""
def __init__(self,requestQueue,resultQueue,poll_timeout=5,**kwds):
threading.Thread.__init__(self,**kwds)
'''设置为守护进行'''
self.setDaemon(True)
self._requestQueue = requestQueue
self._resultQueue = resultQueue
self._poll_timeout = poll_timeout
'''设置一个flag信号,用来表示该线程是否还被dismiss,默认为false'''
self._dismissed = threading.Event()
self.start()
def run(self):
'''每个线程尽可能多的执行work,所以采用loop,
只要线程可用,并且requestQueue有work未完成,则一直loop'''
while True:
if self._dismissed.is_set():
break
try:
'''
Queue.Queue队列设置了线程同步策略,并且可以设置timeout。
一直block,直到requestQueue有值,或者超时
'''
request = self._requestQueue.get(True,self._poll_timeout)
except Queue.Empty:
continue
else:
'''之所以在这里再次判断dimissed,是因为之前的timeout时间里,很有可能,该线程被dismiss掉了'''
if self._dismissed.is_set():
self._requestQueue.put(request)
break
try:
'''执行callable,讲请求和结果以tuple的方式放入requestQueue'''
result = request.callable(*request.args,**request.kwds)
#print self.getName()
self._resultQueue.put((request,result))
except:
'''异常处理'''
request.exception = True
self._resultQueue.put((request,sys.exc_info()))
def dismiss(self):
'''设置一个标志,表示完成当前work之后,退出'''
self._dismissed.set()
class WorkRequest:
'''
@param callable_:,可定制的,执行work的函数
@param args: 列表参数
@param kwds: 字典参数
@param requestID: id
@param callback: 可定制的,处理resultQueue队列元素的函数
@param exc_callback:可定制的,处理异常的函数
'''
def __init__(self,callable_,args=None,kwds=None,requestID=None,
callback=None,exc_callback=_handle_thread_exception):
if requestID == None:
self.requestID = id(self)
else:
try:
self.requestID = hash(requestID)
except TypeError:
raise TypeError("requestId must be hashable")
self.exception = False
self.callback = callback
self.exc_callback = exc_callback
self.callable = callable_
self.args = args or []
self.kwds = kwds or {}
def __str__(self):
return "WorkRequest id=%s args=%r kwargs=%r exception=%s" % \
(self.requestID,self.args,self.kwds,self.exception)
class ThreadPool:
'''
@param num_workers:初始化的线程数量
@param q_size,resq_size: requestQueue和result队列的初始大小
@param poll_timeout: 设置工作线程WorkerThread的timeout,也就是等待requestQueue的timeout
'''
def __init__(self,num_workers,q_size=0,resq_size=0,poll_timeout=5):
self._requestQueue = Queue.Queue(q_size)
self._resultQueue = Queue.Queue(resq_size)
self.workers = []
self.dismissedWorkers = []
self.workRequests = {} #设置个字典,方便使用
self.createWorkers(num_workers,poll_timeout)
def createWorkers(self,num_workers,poll_timeout=5):
'''创建num_workers个WorkThread,默认timeout为5'''
for i in range(num_workers):
self.workers.append(WorkerThread(self._requestQueue,self._resultQueue,poll_timeout=poll_timeout))
def dismissWorkers(self,num_workers,do_join=False):
'''停用num_workers数量的线程,并加入dismiss_list'''
dismiss_list = []
for i in range(min(num_workers,len(self.workers))):
worker = self.workers.pop()
worker.dismiss()
dismiss_list.append(worker)
if do_join :
for worker in dismiss_list:
worker.join()
else:
self.dismissedWorkers.extend(dismiss_list)
def joinAllDismissedWorkers(self):
'''join 所有停用的thread'''
#print len(self.dismissedWorkers)
for worker in self.dismissedWorkers:
worker.join()
self.dismissedWorkers = []
def putRequest(self,request ,block=True,timeout=None):
assert isinstance(request,WorkRequest)
assert not getattr(request,'exception',None)
'''当queue满了,也就是容量达到了前面设定的q_size,它将一直阻塞,直到有空余位置,或是timeout'''
self._requestQueue.put(request, block, timeout)
self.workRequests[request.requestID] = request
def poll(self,block = False):
while True:
if not self.workRequests:
raise NoResultsPending
elif block and not self.workers:
raise NoWorkersAvailable
try:
'''默认只要resultQueue有值,则取出,否则一直block'''
request , result = self._resultQueue.get(block=block)
if request.exception and request.exc_callback:
request.exc_callback(request,result)
if request.callback and not (request.exception and request.exc_callback):
request.callback(request,result)
del self.workRequests[request.requestID]
except Queue.Empty:
break
def wait(self):
while True:
try:
self.poll(True)
except NoResultsPending:
break
def workersize(self):
return len(self.workers)
def stop(self):
'''join 所有的thread,确保所有的线程都执行完毕'''
self.dismissWorkers(self.workersize(),True)
self.joinAllDismissedWorkers()
if __name__=='__main__':
import random
import time
import datetime
import urllib2
count=0
map = {}
def do_work(url):
global count
count+=1
url_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'}
request = urllib2.Request(url,headers=url_headers)
try:
response = urllib2.urlopen(request)
except:
return "ERROR"
content = response.read()
return "Success"
def print_result(request,result):
print "---Result from request %s : %r" % (request.requestID,result)
map[request.requestID] = result
main = ThreadPool(4)
req_list = []
for i in range(40):
req = WorkRequest(do_work,args=['http://www.baidu.com'],kwds={},callback=print_result)
main.putRequest(req)
req_list.append(req.requestID)
print "work request #%s added." % req.requestID
print '-'*20, main.workersize(),'-'*20
main.wait()
main.stop()
for reqID in req_list:
result = map[reqID]
handle(result)
print count
print "Stop"