Skip to content

Commit

Permalink
add verrou on view, code to show mbz tags as they are downloaded
Browse files Browse the repository at this point in the history
  • Loading branch information
lachhebo committed Jun 12, 2019
1 parent 8d3d7a2 commit 227316c
Show file tree
Hide file tree
Showing 5 changed files with 227 additions and 146 deletions.
81 changes: 39 additions & 42 deletions src/Crawler_dir.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from threading import Thread, RLock
from threading import Thread
from .data_crawler import Data_Crawler
from .model import Model
import time

verrou = RLock()

class Crawler_Dir(Thread):

Expand All @@ -17,43 +16,41 @@ def __init__(self, directory, store):

def run(self):
"""Code à exécuter pendant l'exécution du thread."""
with verrou :

filelist = self.data_crawler.get_filelist(self.directory)

filelist1 = []
filelist2 = []
filelist3 = []
filelist4 = []

i= 1
for filen in filelist :
if i == 1:
filelist1.append(filen)
i = 2
elif i == 2:
filelist2.append(filen)
i = 3
elif i == 3 :
filelist3.append(filen)
i = 4
elif i == 4 :
filelist4.append(filen)
i = 1

thread_mbz1 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist1,self.directory)) #Writing data
thread_mbz2 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist2,self.directory))
thread_mbz3 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist3,self.directory))
thread_mbz4 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist4,self.directory))


thread_mbz1.start()
thread_mbz2.start()
thread_mbz3.start()
thread_mbz4.start()


thread_mbz1.join()
thread_mbz2.join()
thread_mbz3.join()
thread_mbz4.join()
filelist = self.data_crawler.get_filelist(self.directory)

filelist1 = []
filelist2 = []
filelist3 = []
filelist4 = []

i= 1
for filen in filelist :
if i == 1:
filelist1.append(filen)
i = 2
elif i == 2:
filelist2.append(filen)
i = 3
elif i == 3 :
filelist3.append(filen)
i = 4
elif i == 4 :
filelist4.append(filen)
i = 1

thread_mbz1 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist1,self.directory)) #Writing data
thread_mbz2 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist2,self.directory))
thread_mbz3 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist3,self.directory))
thread_mbz4 = Thread(target = self.data_crawler.get_data_from_online, args=(filelist4,self.directory))


thread_mbz1.start()
thread_mbz2.start()
thread_mbz3.start()
thread_mbz4.start()


thread_mbz1.join()
thread_mbz2.join()
thread_mbz3.join()
thread_mbz4.join()
50 changes: 24 additions & 26 deletions src/Crawler_modif.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from threading import Thread, RLock
from threading import Thread
from .data_crawler import Data_Crawler
from .model import Model
from .treeview import TreeView

verrou = RLock()

class Crawler_Modif(Thread):

Expand Down Expand Up @@ -33,38 +32,37 @@ def __init__(self, modifs, store, selection, some_file):


def run(self):
with verrou :
"""Code à exécuter pendant l'exécution du thread."""
if self.some_file == 1 :
print("modified some tags :")
model, listiter = self.selection.get_selected_rows()
"""Code à exécuter pendant l'exécution du thread."""
if self.some_file == 1 :
print("modified some tags :")
model, listiter = self.selection.get_selected_rows()

for i in range(len(listiter)): ## TODO
namefile = model[listiter[i]][0]
if namefile in self.modifs :
self.data_crawler.update_data_crawled([namefile],self.directory)
for i in range(len(listiter)): ## TODO
namefile = model[listiter[i]][0]
if namefile in self.modifs :
self.data_crawler.update_data_crawled([namefile],self.directory)

else :
self.data_crawler.update_data_crawled(self.modifs,self.directory)
else :
self.data_crawler.update_data_crawled(self.modifs,self.directory)


if(self.selectionequal(self.model.selection)):
model, listiter = self.model.selection.get_selected_rows()
if(self.selectionequal(self.model.selection)):
model, listiter = self.model.selection.get_selected_rows()

if len(listiter)> 1 :
multiple_line_selected = 1
else :
multiple_line_selected = 0
if len(listiter)> 1 :
multiple_line_selected = 1
else :
multiple_line_selected = 0


data_scrapped = self.data_crawler.get_tags(model, listiter, multiple_line_selected)
lyrics_scrapped = self.data_crawler.get_lyrics(model, listiter, multiple_line_selected)
data_scrapped = self.data_crawler.get_tags(model, listiter, multiple_line_selected)
lyrics_scrapped = self.data_crawler.get_lyrics(model, listiter, multiple_line_selected)

if(self.selectionequal(self.model.selection)):
self.model.view.show_mbz(data_scrapped)
self.model.view.show_lyrics(lyrics_scrapped)
else :
pass
if(self.selectionequal(self.model.selection)):
self.model.view.show_mbz(data_scrapped)
self.model.view.show_lyrics(lyrics_scrapped)
else :
pass



Expand Down
34 changes: 20 additions & 14 deletions src/data_crawler.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
from os import walk

from threading import RLock

verrou = RLock()

import musicbrainzngs as mb
from PyLyrics import *

Expand Down Expand Up @@ -157,29 +153,38 @@ def get_lyrics(self,model,listiter, multiline_selected):
else :
return "Lyrics not avalaible"
else :
return "File not crawled yet on lyrics.wikia"
return None

def get_tags(self,model,listiter, multiline_selected):

namefile = model[listiter][0]
if namefile in self.tag_finder :
candidat = self.tag_finder[namefile].copy()
else :
return { "title":"", "artist":"", "album":"", "track":"", "year":"", "genre":"", "cover":""}
if multiline_selected == 0 :

namefile = model[listiter][0]
if namefile in self.tag_finder :
return self.tag_finder[namefile].copy()
else :
return None

elif multiline_selected == 1 :

if multiline_selected == 1 :
for i in range(1,len(listiter)):
namefile = model[listiter][0]
if namefile in self.tag_finder :
candidat = self.tag_finder[namefile].copy()
else :
return None

for i in range(1,len(listiter)):
beta = model[listiter[i]][0]
if beta in self.tag_finder :
for tagi in ["artist","album","year","genre","cover"] :
if candidat[tagi] != self.tag_finder[beta][tagi] :
candidat[tagi] = ""
candidat["title"] = ""
candidat["track"] = ""
else :
return None


return candidat
return candidat

def reorder_data(self,mzdata):
'''
Expand Down Expand Up @@ -257,3 +262,4 @@ def getInstance():
if Data_Crawler.__instance == None:
Data_Crawler()
return Data_Crawler.__instance

79 changes: 76 additions & 3 deletions src/model.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from os import walk
#from time import sleep
from threading import Thread
from .moteur import Moteur
from .view import View
from .data_crawler import Data_Crawler
from .treeview import TreeView

from gi.repository import Pango
#from gi.repository import Pango

import os

Expand Down Expand Up @@ -147,9 +149,79 @@ def update_view(self,selection):
lyrics_scrapped = self.data_crawler.get_lyrics(model, listiter, multiple_line_selected)

self.view.show_tags(self.tagdico, multiple_line_selected)
self.view.show_mbz(data_scrapped)
self.view.show_lyrics(lyrics_scrapped)

if data_scrapped == None :
self.view.show_mbz({ "title":"", "artist":"", "album":"", "track":"", "year":"", "genre":"", "cover":""})

lenselec = len(listiter)
fileselec = []

for i in range(len(listiter)):
namefile = model[listiter[i]][0]
fileselec.append(namefile)

thread_waiting_mbz = Thread(target = self.wait_for_mbz, args=(model,listiter,lenselec,fileselec,multiple_line_selected))
thread_waiting_mbz.start()
else :
self.view.show_mbz(data_scrapped)


if lyrics_scrapped == None :
self.view.show_lyrics("File not crawled yet on lyrics.wikia")
lenselec = len(listiter)
fileselec = []

for i in range(len(listiter)):
namefile = model[listiter[i]][0]
fileselec.append(namefile)

#thread_waiting_lyr = Thread(target = self.wait_for_lyrics, args=(model,listiter,lenselec,fileselec,multiple_line_selected)) #Writing data
#self.wait_for_lyrics(model,listiter,multiple_line_selected)
#thread_waiting_lyr.start()
else :
self.view.show_lyrics(lyrics_scrapped)

def wait_for_mbz(self,model,listiter,lenselec,fileselec,multiple_line_selected):
print("Entering thread waiting for mbz")
is_waiting_mbz = 1

while self.is_selectionequal(self.selection,lenselec,fileselec) and is_waiting_mbz == 1:
data_scrapped = self.data_crawler.get_tags(model, listiter, multiple_line_selected)
if data_scrapped != None and self.is_selectionequal(self.selection,lenselec,fileselec) :
print("mbz found :", data_scrapped["title"])
is_waiting_mbz = 0
self.view.show_mbz(data_scrapped)



def wait_for_lyrics(self,model,listiter,lenselec,fileselec,multiple_line_selected):
print("Entering thread waiting for lyrics")
is_waiting_lyrics = 1

while self.is_selectionequal(self.selection,lenselec,fileselec) and is_waiting_lyrics == 1 :
lyrics_scrapped = self.data_crawler.get_lyrics(model, listiter, multiple_line_selected)
if lyrics_scrapped != None and self.is_selectionequal(self.selection,lenselec,fileselec) :
print("lyrics found :",lyrics_scrapped[0:10])
is_waiting_lyrics = 0
self.view.show_lyrics(lyrics_scrapped)


def is_selectionequal(self,selec, lenselec2,filelistselec2):
model, listiter = selec.get_selected_rows()
#print("la taille est :", len(listiter) )
#print("la taille autorisée :",self.lenselection )

if len(listiter) == lenselec2 :
for i in range(len(listiter)):
namefile = model[listiter[i]][0]
if namefile not in filelistselec2:
#print("why element ? ",namefile)
return False
else :
#print("why size ?")
return False

return True

def rename_files(self):

Expand Down Expand Up @@ -415,3 +487,4 @@ def getInstance():
if Model.__instance == None:
Model()
return Model.__instance

Loading

0 comments on commit 227316c

Please sign in to comment.