Commit f1847b97 authored by Félix Hartmann's avatar Félix Hartmann
Browse files

Two parameters of the skeleton detection algorithms are now tunable

from the GUI:
* 'percent_diam': transverse range for searching the next point
                  (in diameter unit)
* 'detection_step': step size for the skeleton detecvtion (in pixels)
parent cff4f762
......@@ -24,7 +24,7 @@ Changes:
04/05/2019: [Hugo] Correct bugs on export when tige_id_mapper was defined with string names for bases. Allow float in the slide to select sensitivity.
24/09/2018: [Hugo] Remove test dectection (ne marche pas en Thread avec matplotlib!) + Correction bug pour traiter une seule image.
08/06/2018: [Hugo] Correct queue.qsize() bug in osX (car marche pas sur cette plateforme
18/04/2018: [Hugo] correct datetime bug. Set percentdiam to 1.4 in MethodOlivier (previous 0.9). Windows system can now use thread
18/04/2018: [Hugo] correct datetime bug. Set percent_diam to 1.4 in MethodOlivier (previous 0.9). Windows system can now use thread
22/10/2017: [Hugo] Optimisation du positionnement du GUI, utilisation de Tk.grid a la place de Tk.pack
16/10/2015: [Hugo] Ajout de divers options pour la tige (suppression etc..) avec menu click droit
+Refactorisation de plot_image et de la gestion du global_tige_id_mapper (pour gerer les suppressions)
......@@ -44,12 +44,13 @@ else:
if python2:
import Tkinter as Tk, tkFileDialog as filedialog, tkMessageBox as messagebox
from ttk import Style, Button, Frame, Progressbar, Entry, Scale, Checkbutton
from ttk import Style, Button, Frame, Progressbar, Label, Entry, Scale, Checkbutton
import Queue as queue
else:
import tkinter as Tk
from tkinter import filedialog, messagebox
from tkinter.ttk import Style, Button, Frame, Progressbar, Entry, Scale, Checkbutton
from tkinter.ttk import (Style, Button, Frame, Progressbar, Label, Entry, Scale,
Checkbutton)
import queue
if python2:
......@@ -136,12 +137,15 @@ add_dist_draw = False
dist_measure_pts = []
pixel_distance = None
cm_distance = None
interekt = None # Contains the main window
tk_list_images = None # Contient l'objet TK listbox qui accueil les noms des images
tk_toplevel_listimages = None # Contient la fenetre TK qui contient la list des images
tk_tige_offset = None # Contains the offset for tige skeleton detection
hdf5file = None #Contient l'emplacement du fichier hdf5 qui contient les données
PAS_TRAITEMENT = 0.3
interekt = None # contains the main window
hdf5file = None # path to hdf5 data file
tk_list_images = None # Tk listbox object which contain the list of images
tk_toplevel_listimages = None # Tk window which displays the list of images
tk_tige_offset = None # contains the offset for tige skeleton detection
tk_tige_percent_diam = None # contains the diameter multplier for the transverse range of tige skeleton detection
tk_detection_step = None # contains the space step for the skeleton detection process
PERCENT_DIAM_DEFAULT = 1.4 # default value chosen by Hugo
DETECTION_STEP_DEFAULT = 0.3 # default value chosen by Hugo
CURVATURE_AVERAGING_LENGTH = 2 # Length (in cm) of the zone over which the curvature is averaged
tiges_names = [] #Contient un nom pour les tiges
......@@ -858,21 +862,19 @@ def check_process():
def process_data(display_message=True):
global data_out, tiges_data
#Get queue result
# Get queue result
data_out = data_out.get()
# print(data_out.keys())
#When it's done get the tiges_data from the data from data_out
# When it's done get the tiges_data from the data from data_out
tiges_data = data_out['data']['tiges_data']
#On affiche que l'on fait la sauvegarde
# On affiche que l'on fait la sauvegarde
if display_message:
interekt.display_message(_("Saving data into the .h5 file"))
for idt in range(len(base_tiges)):
# Recup l'id dans le fichier hdf
idhdf = get_hdf5_tigeid(hdf5file, idt)
tige_name = h5store.get_tiges_names(hdf5file, idhdf)
h5store.tige_to_hdf5(hdf5file, idhdf, tige_name, base_tiges[idt],
tiges_data.xc[idt], tiges_data.yc[idt],
tiges_data.theta[idt], tiges_data.diam[idt],
......@@ -882,18 +884,19 @@ def process_data(display_message=True):
# On lance le traitement et on le sauvegarde dans le fichier hdf5
if display_message:
interekt.display_message(_("Processing extracted data"))
Traite_data(save_to_hdf5=True)
# Recharge les données des tiges a partir du fichier hdf5
load_hdf5_tiges(hdf5file)
#Plot the first image in the list
# Plot the first image in the list
interekt.plot_image(cur_image, force_clear=True)
interekt.change_button_state()
interekt.prog_bar.stop()
#Restore focus on the current canvas
# Restore focus on the current canvas
interekt.canvas.get_tk_widget().focus_force()
def launch_process():
......@@ -918,27 +921,64 @@ def launch_process():
# On regarde le nombre d'image que l'on a
Nimgs = h5store.get_number_of_images(hdf5file)
# Récup les options pour les tiges (on a que le seuil de coupure)
# Retrieve the detection step from the Tkinter DoubleVar
if tk_detection_step is not None:
detection_step = tk_detection_step.get()
else:
detection_step = DETECTION_STEP_DEFAULT
# Save the value of detection step into the hdf5 file
try:
h5store.save_detection_step(hdf5file, detection_step)
except Exception as e:
print("Failure to update the detection step")
print(e)
# Récup les options pour les tiges (seuil de coupure et percent_diam)
# Attention cela retourne un dictionnaire avec aa['hdf5_id'] = value
hdf_tiges_seuil_offset = h5store.get_postprocessing(hdf5file, 'Tiges_seuil_offset')
hdf_tiges_percent_diam = h5store.get_postprocessing(hdf5file, 'Tiges_percent_diam')
# Il faut convertir les id hdf en position des tiges qui correspondent a base_tiges
Tiges_seuil_offset = {}
Tiges_percent_diam = {}
for i in range(len(base_tiges)):
hdf5_id = get_hdf5_tigeid(hdf5file, i)
if hdf5_id in hdf_tiges_seuil_offset:
hdf5_tigeid = get_hdf5_tigeid(hdf5file, i)
if hdf5_tigeid in hdf_tiges_seuil_offset:
# On regarde si la valeur enregistrée est pas None et
# si c'est le cas on met la valeur de l'offset à 0
tmp_offset = hdf_tiges_seuil_offset[hdf5_id]
tmp_offset = hdf_tiges_seuil_offset[hdf5_tigeid]
if tmp_offset is None:
tmp_offset = 0
Tiges_seuil_offset[i] = tmp_offset
else:
Tiges_seuil_offset[i] = 0
try:
# Mise a jour des données de la tiges dans le fichier hdf5
new_data_tige = {"Tiges_seuil_offset": Tiges_seuil_offset[i]}
h5store.save_tige_dict_to_hdf(hdf5file, hdf5_tigeid, new_data_tige)
except Exception as e:
print("La mise à jour des options a raté")
print(e)
if hdf5_tigeid in hdf_tiges_percent_diam:
# On regarde si la valeur enregistrée est pas None et
# si c'est le cas on met la valeur par défault de percent_diam
tmp_percent_diam = hdf_tiges_percent_diam[hdf5_tigeid]
if tmp_percent_diam is None:
tmp_percent_diam = PERCENT_DIAM_DEFAULT
Tiges_percent_diam[i] = tmp_percent_diam
else:
Tiges_percent_diam[i] = PERCENT_DIAM_DEFAULT
try:
# Mise a jour des données de la tiges dans le fichier hdf5
new_data_tige = {'Tiges_percent_diam': Tiges_percent_diam[i]}
h5store.save_tige_dict_to_hdf(hdf5file, hdf5_tigeid, new_data_tige)
except Exception as e:
print("La mise à jour des options a raté")
print(e)
#print('Les offset des tiges')
#print(Tiges_seuil_offset)
if Nimgs > 0:
# Doit on faire un pre-processing pour estimer la taille max (en faisant la détection sur la dernière image)
......@@ -951,11 +991,12 @@ def launch_process():
num_images=[Nimgs-1, Nimgs],
num_tiges=len(base_tiges),
base_points=base_tiges, thread=is_thread,
pas=PAS_TRAITEMENT,
pas=detection_step,
tiges_seuil_offset=Tiges_seuil_offset,
tiges_percent_diam=Tiges_percent_diam,
output_function=none_print)
tiges_x = traite_tiges2(pre_data[0]['tiges_data'], pas=PAS_TRAITEMENT)[0]
tiges_x = traite_tiges2(pre_data[0]['tiges_data'], pas=detection_step)[0]
#print(tiges_tailles/0.3)
#print(tiges_x.shape)
max_array_size = tiges_x.shape[2] + 100
......@@ -974,9 +1015,10 @@ def launch_process():
'output_function':plot_progress,
#'output_function_args': {'root':root},
'thread':is_thread,
'pas':PAS_TRAITEMENT,
'pas': detection_step,
'outputdata':data_out,
'tiges_seuil_offset': Tiges_seuil_offset,
'tiges_percent_diam': Tiges_percent_diam,
'memory_size': max_array_size,
'crops':Crops_data})
......@@ -1005,9 +1047,15 @@ def Traite_data(save_to_hdf5=False):
if not tiges_data: # if no tige has been processed
return
# Retrieve the detection step from the h5 file
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is None:
detection_step = DETECTION_STEP_DEFAULT
tiges_x, tiges_y, tiges_s, tiges_tailles, tiges_angles, tiges_tip_angles, \
tiges_lines, tiges_measure_zone = traite_tiges2(
tiges_data, pas=PAS_TRAITEMENT, return_estimation_zone=True)
tiges_data, pas=detection_step, return_estimation_zone=True)
if save_to_hdf5:
for id_tige in range(len(tiges_x)):
......@@ -1114,6 +1162,11 @@ def export_one_tige():
l'écran en csv
"""
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is None:
detection_step = DETECTION_STEP_DEFAULT
# Id de la tige dans le fichier h5
hdf_tige_id = get_hdf5_tigeid(hdf5file, cur_tige)
......@@ -1133,7 +1186,7 @@ def export_one_tige():
tx = tiges_data.xc[cur_tige,cur_image]
ty = tiges_data.yc[cur_tige,cur_image]
tsmoothx, tsmoothy, tangle, ts, tW = traite_tige2(
tx, ty, tiges_data.diam[cur_tige, cur_image]/2.0, pas=PAS_TRAITEMENT)
tx, ty, tiges_data.diam[cur_tige, cur_image]/2.0, pas=detection_step)
tsmoothx = tsmoothx[:-1]
tsmoothy = tsmoothy[:-1]
tcourbure = diff(tangle)/diff(ts)
......@@ -1159,12 +1212,14 @@ def save_tige_options():
new_tige_name = tktigeid.get()
new_offset = tk_tige_offset.get()
new_percent_diam = tk_tige_percent_diam.get()
hdf5_tigeid = get_hdf5_tigeid(hdf5file, cur_tige)
try:
h5store.save_tige_name(hdf5file, hdf5_tigeid, new_tige_name)
# Mise a jour des données de la tiges dans le fichier hdf5
new_data_tige = {"Tiges_seuil_offset": new_offset}
new_data_tige = {"Tiges_seuil_offset": new_offset,
'Tiges_percent_diam': new_percent_diam}
#besoin de recup l'id de la tiges dans le fichier hdf5
h5store.save_tige_dict_to_hdf(hdf5file, hdf5_tigeid, new_data_tige)
except Exception as e:
......@@ -1179,7 +1234,7 @@ def save_tige_options():
def show_tige_options():
global toptige, tktigeid, tk_tige_offset
global toptige, tktigeid, tk_tige_offset, tk_tige_percent_diam
# Force la fermeture du menu popup dans tk
interekt.floatmenuisopen = False
......@@ -1188,6 +1243,7 @@ def show_tige_options():
tige_name = tiges_names[cur_tige]
hdf5_tigeid = get_hdf5_tigeid(hdf5file, cur_tige)
seuil = h5store.get_postprocessing(hdf5file, 'Tiges_seuil_offset', hdf5_tigeid)
percent_diam = h5store.get_postprocessing(hdf5file, 'Tiges_percent_diam', hdf5_tigeid)
# Ajout d'une boite tk pour l'export
toptige = Tk.Toplevel(master=root)
......@@ -1214,6 +1270,18 @@ def show_tige_options():
orient=Tk.HORIZONTAL)
w2.pack(fill='x', expand=True)
Tk.Label(idframe, text=_("Diameter detection range:")).pack(fill='x', expand=True)
tk_tige_percent_diam = Tk.DoubleVar()
if percent_diam is not None:
tk_tige_percent_diam.set(percent_diam)
else:
tk_tige_percent_diam.set(PERCENT_DIAM_DEFAULT)
w3 = Tk.Scale(idframe, from_=0, to=10, resolution=0.1, variable=tk_tige_percent_diam,
orient=Tk.HORIZONTAL)
w3.pack(fill='x', expand=True)
Tk.Button(idframe, text=_("Save"),
command=save_tige_options).pack(fill='x', expand=True)
......@@ -1392,6 +1460,12 @@ def show_angle_and_curvature(tige_id=None):
# Force la fermeture du menu popup dans tk
interekt.floatmenuisopen = False
# Retrieve the detection step from the h5 file
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is None:
detection_step = DETECTION_STEP_DEFAULT
# Retrieving the tige id from the hdf5 file
tige_hdf_id = get_hdf5_tigeid(hdf5file, cur_tige)
......@@ -1413,10 +1487,10 @@ def show_angle_and_curvature(tige_id=None):
s *= scale_cmpix
lines *= scale_cmpix
nb_averaging_points = int(round(CURVATURE_AVERAGING_LENGTH
/ (scale_cmpix * PAS_TRAITEMENT)))
/ (scale_cmpix * detection_step)))
# Size of the averaging window
W = int(4 * round(tiges_data.diam[cur_tige].mean() / PAS_TRAITEMENT))
W = int(4 * round(tiges_data.diam[cur_tige].mean() / detection_step))
# grid = mpl.GridSpec(3, 2, wspace=.7, hspace=.7)
grid = mpl.GridSpec(3, 2)
......@@ -1790,6 +1864,12 @@ def show_growth_length():
# Force la fermeture du menu popup dans tk
interekt.floatmenuisopen = False
# Retrieve the detection step from the h5 file
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is None:
detection_step = DETECTION_STEP_DEFAULT
# Recup l'id de la tige dans le fichier h5
hdf_tige_id = get_hdf5_tigeid(hdf5file, cur_tige)
......@@ -1881,7 +1961,7 @@ def show_growth_length():
axProfiles.axis('off')
# Size of the averaging window
W = int(4 * round(tiges_data.diam[cur_tige].mean() / PAS_TRAITEMENT))
W = int(4 * round(tiges_data.diam[cur_tige].mean() / detection_step))
# Smoothing and computation of the curvatures
smooth_angles = zeros_like(angles) - 3000
......@@ -2402,7 +2482,7 @@ def show_beta(tige_id=None):
fig_beta.show()
def show_gamma(tige_id=None, pas=0.3):
def show_gamma(tige_id=None):
"""Estimation interactive du paramètre de proprioception gamma.
Ce code s'applique aux résultats issus du protocole expérimental suivant :
......@@ -2753,6 +2833,12 @@ def show_B(tige_id=None):
# Force la fermeture du menu popup dans tk
interekt.floatmenuisopen = False
# Retrieve the detection step from the h5 file
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is None:
detection_step = DETECTION_STEP_DEFAULT
# Recup l'id de la tige dans le fichier h5
hdf_tige_id = get_hdf5_tigeid(hdf5file, cur_tige)
......@@ -2793,7 +2879,7 @@ def show_B(tige_id=None):
s = s[:imax]
# Size of the averaging window
W = int(4 * round(tiges_data.diam[cur_tige].mean() / PAS_TRAITEMENT))
W = int(4 * round(tiges_data.diam[cur_tige].mean() / detection_step))
dummy, smooth_angles = get_tige_curvature(angles, s, smoothing=True, window_width=W)
......@@ -3096,7 +3182,8 @@ def save_tige_idmapper():
'B_data': B_data,
'beta_data': beta_data,
'gamma_data': gamma_data,
'Tiges_seuil_offset': Tiges_seuil_offset}, f)
'Tiges_seuil_offset': Tiges_seuil_offset,
'Tiges_percent_diam': Tiges_percent_diam}, f)
def measure_pixels():
......@@ -3168,9 +3255,9 @@ def pixel_calibration():
if scale_cmpix is not None:
cm_distance.insert(0, str('%0.4f'%scale_cmpix))
Tk.Label(calframe, text='pixel:').pack()
Label(calframe, text='pixel:').pack()
pixel_distance.pack()
Tk.Label(calframe, text='cm:').pack()
Label(calframe, text='cm:').pack()
cm_distance.pack()
calframe.pack()
......@@ -3183,6 +3270,35 @@ def pixel_calibration():
calbutton_updatecalibration.pack(fill=Tk.X)
def detection_step_setting():
global tk_detection_step
# Ajout d'une boite tk pour l'export
top_step = Tk.Toplevel(master=root)
top_step.title(_("Skeleton detection step"))
step_frame = Frame(top_step)
w1 = Label(step_frame, text=_("Size of the detection step (in pixels):"))
w1.pack(fill='x', expand=True)
tk_detection_step = Tk.DoubleVar()
# Retrieve the detection step from the h5 file
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is not None:
tk_detection_step.set(detection_step)
else:
tk_detection_step.set(DETECTION_STEP_DEFAULT)
w2 = Tk.Scale(step_frame, from_=0.1, to=5, resolution=0.01, variable=tk_detection_step,
orient=Tk.HORIZONTAL)
w2.pack(fill='x', expand=True)
step_frame.pack()
def onSelectSteadyStateImage(event):
"""Called when an image has been selected from the list as the steady-state image."""
sender = event.widget
......@@ -3507,6 +3623,9 @@ class Interekt:
# Set the scale
options_menu.add_command(label=_("Scale"), command=pixel_calibration)
# Set the space step for skeleton detection process
options_menu.add_command(label=_("Step for skeleton detection"), command=detection_step_setting)
#TODO: Pour trier ou non les photos
#sort_photo_num = Tk.BooleanVar()
#sort_photo_num.set(True)
......@@ -4265,6 +4384,12 @@ class Interekt:
if tiges_data is None: # no tige present
return False
# Retrieve the detection step from the h5 file
detection_step = h5store.get_detection_step(hdf5file)
if detection_step is None:
detection_step = DETECTION_STEP_DEFAULT
image = mark['image']
A, B = mark["start"], mark["end"]
mark_center = (A + B) / 2
......@@ -4318,11 +4443,11 @@ class Interekt:
mark["s"] = s[i]
mark["intersection_point"] = intersection_point
mark["intersection_point_from_base"] = intersection_point_from_base
W = int(4 * round(tiges_data.diam[cur_tige].mean() / PAS_TRAITEMENT))
W = int(4 * round(tiges_data.diam[cur_tige].mean() / detection_step))
curvatures = get_tige_curvature(angles, s,
smoothing=True, window_width=W)[0]
mark["curvature"] = curvatures[i]
averaging_points = int(round(averaging_zone / PAS_TRAITEMENT))
averaging_points = int(round(averaging_zone / detection_step))
if i + averaging_points <= len(curvatures):
mean_curvature = mean(curvatures[i:i+averaging_points])
mark["mean_curvature"] = mean_curvature
......
......@@ -601,6 +601,33 @@ def get_pixelscale(hdf5file):
return scaleout
def save_detection_step(hdf5file, step):
"""
Fonction pour sauvegarder le pas d'espace pour la détection des squelettes.
"""
if step is None:
goods = None
else:
goods = float(step)
dict_data = {'detection_step': goods}
dicttoh5(treedict=dict_data, hdf5file=hdf5file, h5path='/', mode='a')
def get_detection_step(hdf5file):
"""
Fonction pour récupérer le pas d'espace pour la détection des squelettes.
"""
step = None
with h5py.File(hdf5file, 'r') as f:
if 'detection_step' in f:
step = h5todict(hdf5file, path='detection_step')
if step == {}:
step = None
return step
def get_postprocessing(hdf5file, postprocessing_name, tige_num=None):
"""
......
......@@ -414,7 +414,7 @@ def get_min_max(z, coupure=0.1):
def MethodeOlivier(image, tige_table, id_tige, nbimage, xi, yi, pas, Np,
seuil_coupure=0.2, show_tige=False, rayonfilter=True,
seuil_coupure=0.2, percent_diam=1.4, show_tige=False, rayonfilter=True,
target=None, output_fig=None):
"""
Methode d'Olivier
......@@ -431,7 +431,6 @@ def MethodeOlivier(image, tige_table, id_tige, nbimage, xi, yi, pas, Np,
passflag = True
imp = image
# Mon reglage avnt 0.9 et oliv 1.4
percent_diam = 1.4
# Astuce pour petit grain de temps dans la boucle
add_tiges_pts = tige_table.add_point
......@@ -682,10 +681,10 @@ class Queue(multiprocessing.queues.Queue):
class TraiteImageThread:
def __init__(self, image_file, image_num, xypoints, max_iter,
pas=0.3, seuil="auto", Np=100, show_tige=False,
rayonfilter=False, method="Olivier", end_points={},
tiges_seuil_offset={}, output_fig=None):
def __init__(self, image_file, image_num, xypoints, max_iter, pas=0.3, seuil="auto",
Np=100, show_tige=False, rayonfilter=False, method="Olivier",
end_points={}, tiges_seuil_offset={}, tiges_percent_diam={},
output_fig=None):
self.img = image_file
self.image_num = image_num
......@@ -699,6 +698,7 @@ class TraiteImageThread:
self.method = method
self.end_points = end_points
self.tiges_seuil_offset = tiges_seuil_offset
self.tiges_percent_diam = tiges_percent_diam
self.output_fig = output_fig
# Create memory space and special variables
......@@ -717,7 +717,7 @@ class TraiteImageThread:
imageprocessor.load(self.img)
image_bw = imageprocessor.render()
# TODO: Clean the tige_data to default value before porcessing
# TODO: Clean the tige_data to default value before processing
for i in range(self.Ntige):
xystart = self.xypoints[i]
......@@ -732,18 +732,24 @@ class TraiteImageThread:
if self.method == "Olivier":
if self.seuil == "auto":
seuiln = auto_seuil(image_bw, xi, yi)
seuil_i = auto_seuil(image_bw, xi, yi)
else:
seuiln = self.seuil
seuil_i = self.seuil
if i in self.tiges_seuil_offset:
dseuil = seuiln * (float(self.tiges_seuil_offset[i]))
dseuil = seuil_i * (float(self.tiges_seuil_offset[i]))
# Quand sensibilité negative on doit augmenter le seuil pour être moins sensible au gradient d'intentensité
seuiln -= dseuil
seuil_i -= dseuil
MethodeOlivier(image_bw, self.tige_data, i, self.image_num, xi, yi, self.pas, self.Np, seuiln,
self.show_tige, self.rayonfilter, target, self.output_fig)
if i in self.tiges_percent_diam:
percent_diam_i = float(self.tiges_percent_diam[i])
else:
percent_diam_i = 1.4 # default value chosen by Hugo
MethodeOlivier(image_bw, self.tige_data, i, self.image_num, xi, yi,
self.pas, self.Np, seuil_i, percent_diam_i, self.show_tige,
self.rayonfilter, target, self.output_fig)
return self.image_num, self.tige_data
......@@ -826,12 +832,13 @@ def default_output_print(**kwargs):
print("Traitement de %i / %i" % (imnum, tot))
def ProcessImages(file_names, num_images, num_tiges, pas=0.3, seuil="auto",
Np=100, thread=False, show_tige=False, base_points=None,
rois=None, gains=20, cut_off=0.2, disk_size=4, rayonfilter=False,
method="Olivier", use_bw=True, color_transform=None, color_band=None,
output_function=default_output_print, output_function_args={}, outputdata=None,
end_points={}, tiges_seuil_offset={}, memory_size=10000, crops=[], output_fig=None):
def ProcessImages(file_names, num_images, num_tiges, pas=0.3, seuil="auto", Np=100,
thread=False, show_tige=False, base_points=None, rois=None, gains=20,
cut_off=0.2, disk_size=4, rayonfilter=False, method="Olivier",
use_bw=True, color_transform=None, color_band=None,
output_function=default_output_print, output_function_args={},
outputdata=None, end_points={}, tiges_seuil_offset={},
tiges_percent_diam={}, memory_size=10000, crops=[], output_fig=None):
"""
Fonction pour lancer le traitement des tiges par recherche de maximum
......@@ -871,6 +878,9 @@ def ProcessImages(file_names, num_images, num_tiges, pas=0.3, seuil="auto",
-tiges_seuil_offset: dict[tige_id] = offset seuil, en % pour ajouter au seuil auto afin de rendre + (+xx%) ou - (-xx%) sensible la detection
-tiges_percent_diam: dict[tige_id] = percent_diam, width of the transverse range for
skeleton detection, in units of the actual diameter.
-output_fig: [optional, default: None] store the figure to display the result of the treatment in live
"""
......@@ -947,12 +957,13 @@ def ProcessImages(file_names, num_images, num_tiges, pas=0.3, seuil="auto",
# Add callable class to our task list
for i, img_name in enumerate(imgs):
tasks.put( TraiteImageThread( img_name, i, xypoints, memory_size, pas, seuil, Np, show_tige, rayonfilter,
method, end_points, tiges_seuil_offset, output_fig) )
tasks.put(TraiteImageThread( img_name, i, xypoints, memory_size, pas, seuil, Np,
show_tige, rayonfilter, method, end_points, tiges_seuil_offset,
tiges_percent_diam, output_fig))
# Add the stop to kill workers at the end
for i in range(num_worker):
tasks.put( None )
tasks.put(None)
# Loop to display information
while tasks.qsize() > 0:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment