forked from marciopocebon/Tishna
-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathcrawler.py
63 lines (53 loc) · 1.82 KB
/
crawler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup
import queue
import threading
import argparse
class discoveryWebCrawlerClass():
def __init__(self,domain,level):
self.domain = domain
self.q = queue.Queue()
self.urls = []
self.levelsToCrawl = level
def crawlURL(self,crawlUrl,currentLevel):
s = requests.Session()
r = s.get(crawlUrl,verify=False,timeout=10)
soup = BeautifulSoup(r.content,'html.parser')
links = soup.find_all('a')
for url in links:
try:
url = url.get('href')
# some href values dont have a full url. They look somthing like : /login.php
if url[0] == '/':
url = self.domain + url
# check to see if link matches crawl domain
if url.split("/")[2] == self.domain.split('/')[2] and url not in self.urls:
self.urls.append(url)
#insert into queue update crawl level
if currentLevel+1 < self.levelsToCrawl:
self.q.put({'url':url,'level':currentLevel +1})
except Exception as e:
pass
def worker(self):
while 1:
crawlUrlDict = self.q.get()
self.crawlURL(crawlUrlDict['url'],crawlUrlDict['level'])
self.q.task_done()
def start(self):
self.q.put({'url':self.domain,'level':0})
for i in range(0,100):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
self.q.join()
parser = argparse.ArgumentParser()
parser.add_argument("-d","--domain", help="Domain Name; EX: https://test.com")
parser.add_argument("-l","--level", help="Levels deep to crawl. EX: 2")
args = parser.parse_args()
if args.domain and args.level:
webcrawler = discoveryWebCrawlerClass(args.domain,int(args.level))
webcrawler.start()
for i in range(0,len(webcrawler.urls)):
print("{0}\t{1}".format(i,webcrawler.urls[i]))