Commit da901539 authored by Penom Nom's avatar Penom Nom

maj jflow workflows

parent 0fc91c0d
......@@ -16,12 +16,18 @@
#
import cherrypy
import cgi
import tempfile
import json
import sys
import datetime
from functools import wraps
import time
import datetime
import os
import argparse
import logging
from argparse import ArgumentTypeError
try:
......@@ -33,18 +39,61 @@ import jflow
from jflow.workflows_manager import WorkflowsManager
from jflow.config_reader import JFlowConfigReader
from jflow.workflow import Workflow
from jflow.parameter import browsefile, localfile, urlfile, inputfile, create_test_function
from workflows.types import *
import jflow.utils as utils
from cctools.util import time_format
# function in charge to upload large files
class UploadFieldStorage(cgi.FieldStorage):
"""Our version uses a named temporary file instead of the default
non-named file; keeping it visibile (named), allows us to create a
2nd link after the upload is done, thus avoiding the overhead of
making a copy to the destination filename."""
def get_tmp_directory(self):
jflowconf = JFlowConfigReader()
return jflowconf.get_tmp_directory()
def make_file(self, binary=None):
tmp_folder = self.get_tmp_directory()
if not os.path.exists( tmp_folder ):
try : os.mkdir(tmp_folder)
except : pass
return tempfile.NamedTemporaryFile(dir=tmp_folder)
def noBodyProcess():
"""Sets cherrypy.request.process_request_body = False, giving
us direct control of the file upload destination. By default
cherrypy loads it to memory, we are directing it to disk."""
cherrypy.request.process_request_body = False
cherrypy.tools.noBodyProcess = cherrypy.Tool('before_request_body', noBodyProcess)
# define functions in charge to handle cross domain calls
def CORS():
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'OPTIONS, GET, POST'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Content-Range, Content-Disposition'
cherrypy.tools.CORS = cherrypy.Tool('before_finalize', CORS)
class JFlowJSONEncoder (json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime)):
return obj.strftime( JFlowConfigReader().get_date_format() )
else:
return json.JSONEncoder.default(self, obj)
class JFlowServer (object):
MULTIPLE_TYPE_SPLITER = "._."
MULTIPLE_TYPE_SPLITER = "___"
JFLOW_WDATA = "data"
def __init__(self):
# Create a workflow manager to get access to our workflows
self.wfmanager = WorkflowsManager()
self.jflow_config_reader = JFlowConfigReader()
def jsonify(func):
'''JSON and JSONP decorator for CherryPy'''
......@@ -53,13 +102,12 @@ class JFlowServer (object):
value = func(*args, **kw)
cherrypy.response.headers["Content-Type"] = "application/json"
# if JSONP request
if kw.has_key("callback"): return kw["callback"] + "(" + json.dumps(value) + ")"
if kw.has_key("callback"): return kw["callback"] + "(" + json.dumps(value, cls=JFlowJSONEncoder) + ")"
# else return the JSON
else: return json.dumps(value)
return wrapper
def jsonify_workflow_status(self, workflow, init_to_zero=False):
if workflow.start_time: start_time = time.asctime(time.localtime(workflow.start_time))
else: start_time = "-"
if workflow.start_time and workflow.end_time: elapsed_time = str(workflow.end_time-workflow.start_time)
......@@ -97,25 +145,27 @@ class JFlowServer (object):
"running": status_info["running"],
"aborted": status_info["aborted"],
"completed": status_info["completed"]})
status = {"id":utils.get_nb_string(workflow.id),
"errors": workflow.get_errors(),
"name": workflow.name,
"metadata": workflow.metadata,
"status": workflow.get_status(),
"elapsed_time": str(elapsed_time),
"elapsed_time": str(datetime.timedelta(seconds=int(str(elapsed_time).split(".")[0]))),
"start_time": start_time,
"end_time": end_time,
"components": components}
return status
@cherrypy.expose
@jsonify
def get_available_workflows(self, **kwargs):
workflows = []
wf_instances = self.wfmanager.get_available_workflows()
wf_instances, wf_methodes = self.wfmanager.get_available_workflows()
for instance in wf_instances:
parameters, parameters_per_groups, groups = [], {}, ["default"]
for param in instance.parameters:
for param in instance.get_parameters():
# if it's a multiple action change the action by the name
if param.action == jflow.parameter.MiltipleAction:
action = "MiltipleAction"
......@@ -132,9 +182,19 @@ class JFlowServer (object):
"name": param.name,
"display_name": param.display_name,
"group": param.group}
if hash_param["type"] == "date":
hash_param["format"] = self.jflow_config_reader.get_date_format()
if hash_param["format"] == '%d/%m/%Y':
hash_param["format"] = 'dd/mm/yyyy'
elif hash_param["format"] == '%d/%m/%y':
hash_param["format"] = 'dd/mm/yy'
elif hash_param["format"] == '%Y/%m/%d':
hash_param["format"] = 'yyyy/mm/dd'
elif hash_param["format"] == '%y/%m/%d':
hash_param["format"] = 'yy/mm/dd'
# if it's a multiple type add sub parameters
if type(param.type) == jflow.parameter.MultipleParameters:
hash_param["sub_parameters"] = []
hash_param["sub_parameters"] = []
for sub_param in param.sub_parameters:
hash_param["sub_parameters"].append({"help": sub_param.help,
"required": sub_param.required,
......@@ -145,6 +205,16 @@ class JFlowServer (object):
"name": param.name + JFlowServer.MULTIPLE_TYPE_SPLITER + sub_param.flag,
"display_name": sub_param.display_name,
"group": param.group})
if hash_param["type"] == "date":
hash_param["format"] = self.jflow_config_reader.get_date_format()
if hash_param["format"] == '%d/%m/%Y':
hash_param["format"] = 'dd/mm/yyyy'
elif hash_param["format"] == '%d/%m/%y':
hash_param["format"] = 'dd/mm/yy'
elif hash_param["format"] == '%Y/%m/%d':
hash_param["format"] = 'yyyy/mm/dd'
elif hash_param["format"] == '%y/%m/%d':
hash_param["format"] = 'yy/mm/dd'
parameters.append(hash_param)
if parameters_per_groups.has_key(param.group):
parameters_per_groups[param.group].append(hash_param)
......@@ -161,32 +231,112 @@ class JFlowServer (object):
@cherrypy.expose
@jsonify
def run_workflow(self, **kwargs):
kwargs_modified = {}
for key in kwargs.keys():
parts = key.split(JFlowServer.MULTIPLE_TYPE_SPLITER)
if len(parts) == 1:
kwargs_modified[key] = kwargs[key]
elif len(parts) == 2:
if kwargs_modified.has_key(parts[0]):
kwargs_modified[parts[0]].append((parts[1], kwargs[key]))
else:
kwargs_modified[parts[0]] = [(parts[1], kwargs[key])]
workflow = self.wfmanager.run_workflow(kwargs_modified["workflow_class"], kwargs_modified)
return self.jsonify_workflow_status(workflow, True)
try:
kwargs_modified = {}
for key in kwargs.keys():
parts = key.split(JFlowServer.MULTIPLE_TYPE_SPLITER)
# if this is a classic Parameter
if len(parts) == 1:
kwargs_modified[key] = kwargs[key]
# if this is a MultiParameter
elif len(parts) == 2:
if kwargs_modified.has_key(parts[0]):
kwargs_modified[parts[0]].append((parts[1], kwargs[key]))
else:
kwargs_modified[parts[0]] = [(parts[1], kwargs[key])]
# if this is a MultiParameterList
# TODO: du cote interface faire qq chose du genre: key.sub_key.1 ... donc si len == 3
# l'objectif etant d'avoir une structure de type: [[(sub_key1: val), (sub_key2: val)], [(sub_key1: val2), (sub_key2: val2)]]
workflow = self.wfmanager.run_workflow(kwargs_modified["workflow_class"], kwargs_modified)
return { "status" : 0, "content" : self.jsonify_workflow_status(workflow, True) }
except Exception as err:
return { "status" : 1, "content" : str(err) }
@cherrypy.expose
@jsonify
def rerun_workflow(self, **kwargs):
self.wfmanager.rerun_workflow(kwargs["workflow_id"])
workflow = self.wfmanager.get_workflow(kwargs["workflow_id"])
workflow = self.wfmanager.rerun_workflow(kwargs["workflow_id"])
return self.jsonify_workflow_status(workflow)
@cherrypy.expose
@jsonify
def reset_workflow_component(self, **kwargs):
workflow = self.wfmanager.reset_workflow_component(kwargs["workflow_id"], kwargs["component_name"])
return self.jsonify_workflow_status(workflow)
@cherrypy.expose
def upload_light(self, **kwargs):
uniq_directory = ""
for key in kwargs.keys():
if key == "uniq_directory":
uniq_directory = kwargs['uniq_directory']
else:
file_param = key
# the file transfer can take a long time; by default cherrypy
# limits responses to 300s; we increase it to 1h
cherrypy.response.timeout = 3600
# upload file by chunks
file_dir = os.path.join( self.jflow_config_reader.get_tmp_directory(), uniq_directory )
os.mkdir( file_dir )
FH_sever_file = open(os.path.join(file_dir, kwargs[file_param].filename.encode('ascii','ignore')), "w")
while True:
data = kwargs[file_param].file.read(8192)
if not data:
break
FH_sever_file.write(data)
FH_sever_file.close()
@cherrypy.expose
@cherrypy.tools.noBodyProcess()
@cherrypy.tools.CORS()
def upload(self):
# the file transfer can take a long time; by default cherrypy
# limits responses to 300s; we increase it to 1h
cherrypy.response.timeout = 3600
# convert the header keys to lower case
lcHDRS = {}
for key, val in cherrypy.request.headers.iteritems():
lcHDRS[key.lower()] = val
# at this point we could limit the upload on content-length...
# incomingBytes = int(lcHDRS['content-length'])
# create our version of cgi.FieldStorage to parse the MIME encoded
# form data where the file is contained
formFields = UploadFieldStorage(fp=cherrypy.request.rfile,
headers=lcHDRS,
environ={'REQUEST_METHOD':'POST'},
keep_blank_values=True)
# we now create a link to the file, using the submitted
# filename; if we renamed, there would be a failure because
# the NamedTemporaryFile, used by our version of cgi.FieldStorage,
# explicitly deletes the original filename
for current in formFields.keys():
if current != 'uniq_directory':
currentFile = formFields[current]
fileDir = os.path.join(self.jflow_config_reader.get_tmp_directory(), formFields.getvalue("uniq_directory"))
os.mkdir(fileDir)
os.link(
currentFile.file.name,
os.path.join(fileDir, currentFile.filename.encode('ascii','ignore'))
)
@cherrypy.expose
@jsonify
def get_workflows_status(self, **kwargs):
if kwargs.has_key("workflow_id"):
workflow = self.wfmanager.get_workflow(kwargs["workflow_id"])
return self.jsonify_workflow_status(workflow)
if kwargs["display"] == "list":
return self.jsonify_workflow_status(workflow)
elif kwargs["display"] == "graph":
g = workflow.get_execution_graph()
status = self.jsonify_workflow_status(workflow)
status["edges"] = g.edges()
return status
else:
status = []
workflows = self.wfmanager.get_workflows()
......@@ -204,6 +354,22 @@ class JFlowServer (object):
status.append(self.jsonify_workflow_status(workflow))
return status
def _webify_outputs(self, web_path, path):
work_dir = self.jflow_config_reader.get_work_directory()
if work_dir.endswith("/"): work_dir = work_dir[:-1]
socket_opt = self.jflow_config_reader.get_socket_options()
return "http://" + socket_opt[0] + ":" + str(socket_opt[1]) + "/" + path.replace(work_dir, web_path)
@cherrypy.expose
@jsonify
def get_workflow_outputs(self, **kwargs):
on_disk_outputs, on_web_outputs = self.wfmanager.get_workflow_ouputs(kwargs["workflow_id"]), {}
for cpt_name in on_disk_outputs.keys():
on_web_outputs[cpt_name] = {}
for outf in on_disk_outputs[cpt_name]:
on_web_outputs[cpt_name][outf] = self._webify_outputs(JFlowServer.JFLOW_WDATA, on_disk_outputs[cpt_name][outf])
return on_web_outputs
@cherrypy.expose
@jsonify
def validate_field(self, **kwargs):
......@@ -213,18 +379,17 @@ class JFlowServer (object):
if key != "type" and key != "callback" and key != "_":
value_key = key
break
eval(kwargs["type"])(kwargs[value_key])
create_test_function(kwargs["type"])(kwargs[value_key])
return True
except Exception, e:
return str(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--daemon", action="store_true", dest="daemon", default=False, help="Run the server as daemon")
args = vars(parser.parse_args())
# daemonize the server if asked to
if args["daemon"]:
from cherrypy.process.plugins import Daemonizer
......@@ -232,8 +397,20 @@ if __name__ == '__main__':
# define the socket host and port
jflowconf = JFlowConfigReader()
socket_opts = jflowconf.get_socket_options()
# remove any limit on the request body size; cherrypy's default is 100MB
# (maybe we should just increase it ?)
cherrypy.server.max_request_body_size = 0
# increase server socket timeout to 60s; we are more tolerant of bad
# quality client-server connections (cherrypy's defult is 10s)
cherrypy.server.socket_timeout = 60
app_conf = {'/': {'tools.staticdir.root': jflowconf.get_work_directory()},
os.path.join('/', JFlowServer.JFLOW_WDATA): {'tools.staticdir.on' : True,
'tools.staticdir.dir' : jflowconf.get_work_directory()}}
cherrypy.config.update({'server.socket_host': socket_opts[0],
'server.socket_port': socket_opts[1]})
# start the server
cherrypy.quickstart(JFlowServer())
\ No newline at end of file
cherrypy.quickstart(JFlowServer(), config=app_conf)
\ No newline at end of file
......@@ -27,7 +27,6 @@ except ImportError:
from jflow.workflows_manager import WorkflowsManager
from jflow.workflow import Workflow
import jflow.utils as utils
from cctools.util import time_format
class JflowArgumentParser (argparse.ArgumentParser):
def _read_args_from_files(self, arg_strings):
......@@ -55,50 +54,6 @@ class JflowArgumentParser (argparse.ArgumentParser):
# return the modified argument list
return new_arg_strings
def display_workflow_status(workflow, detailed=False):
if workflow.start_time: start_time = time.asctime(time.localtime(workflow.start_time))
else: start_time = "-"
if workflow.start_time and workflow.end_time: elapsed_time = str(workflow.end_time-workflow.start_time)
elif workflow.start_time: elapsed_time = str(time.time()-workflow.start_time)
else: elapsed_time = "-"
if workflow.end_time: end_time = time.asctime(time.localtime(workflow.end_time))
else: end_time = "-"
if detailed:
status = "Workflow #" + utils.get_nb_string(workflow.id) + " (" + workflow.name + ") is " + \
workflow.get_status() + ", time elapsed: " + str(elapsed_time) + " (from " + start_time + \
" to " + end_time + "):\n"
for i, component in enumerate(workflow.get_components_nameid()):
status_info = workflow.get_component_status(component)
try: perc_waiting = (status_info["waiting"]*100.0)/status_info["tasks"]
except: perc_waiting = 0
try: perc_running = (status_info["running"]*100.0)/status_info["tasks"]
except: perc_running = 0
try: perc_failed = (status_info["failed"]*100.0)/status_info["tasks"]
except: perc_failed = 0
try: perc_aborted = (status_info["aborted"]*100.0)/status_info["tasks"]
except: perc_aborted = 0
try: perc_completed = (status_info["completed"]*100.0)/status_info["tasks"]
except: perc_completed = 0
if status_info["running"] > 0: running = "\033[94mrunning:" + str(status_info["running"]) + "\033[0m"
else: running = "running:" + str(status_info["running"])
if status_info["waiting"] > 0: waiting = "\033[93mwaiting:" + str(status_info["waiting"]) + "\033[0m"
else: waiting = "waiting:" + str(status_info["waiting"])
if status_info["failed"] > 0: failed = "\033[91mfailed:" + str(status_info["failed"]) + "\033[0m"
else: failed = "failed:" + str(status_info["failed"])
if status_info["aborted"] > 0: aborted = "\033[95maborted:" + str(status_info["aborted"]) + "\033[0m"
else: aborted = "aborted:" + str(status_info["aborted"])
if status_info["completed"] == status_info["tasks"] and status_info["completed"] > 0: completed = "\033[92mcompleted:" + str(status_info["completed"]) + "\033[0m"
else: completed = "completed:" + str(status_info["completed"])
status += " - " + component + ", time elapsed " + time_format(status_info["time"]) + \
" (total:" + str(status_info["tasks"]) + ", " + waiting + ", " + running + ", " + failed + \
", " + aborted + ", " + completed + ")"
if i<len(workflow.get_components_nameid())-1: status += "\n"
return status
else:
return utils.get_nb_string(workflow.id) + "\t" + workflow.name + "\t" + workflow.get_status() + \
"\t" + elapsed_time + "\t" + start_time + "\t" + end_time
if __name__ == '__main__':
......@@ -110,21 +65,37 @@ if __name__ == '__main__':
subparsers = parser.add_subparsers(title='Available sub commands')
# Add rerun workflow availability
sub_parser = subparsers.add_parser("rerun", help="rerun a specific workflow")
sub_parser = subparsers.add_parser("rerun", help="Rerun a specific workflow")
sub_parser.add_argument("--workflow-id", type=str, help="Which workflow should be rerun",
required=True, dest="workflow_id")
sub_parser.set_defaults(cmd_object="rerun")
# Add rerun workflow availability
sub_parser = subparsers.add_parser("reset", help="Reset a workflow component")
sub_parser.add_argument("--workflow-id", type=str, help="Which workflow should be used",
required=True, dest="workflow_id")
sub_parser.add_argument("--component-name", type=str, help="Which component should be reseted",
required=True, dest="component_name")
sub_parser.set_defaults(cmd_object="reset")
# Add rerun workflow availability
sub_parser = subparsers.add_parser("execution-graph", help="Display the workflow execution graph")
sub_parser.add_argument("--workflow-id", type=str, help="Which workflow should be considered",
required=True, dest="workflow_id")
sub_parser.set_defaults(cmd_object="execution_graph")
# Add status workflow availability
sub_parser = subparsers.add_parser("status", help="monitor a specific workflow")
sub_parser = subparsers.add_parser("status", help="Monitor a specific workflow")
sub_parser.add_argument("--workflow-id", type=str, help="Which workflow status should be displayed",
default=None, dest="workflow_id")
sub_parser.add_argument("--all", action="store_true", help="Display all workflows status",
default=False, dest="all")
sub_parser.add_argument("--errors", action="store_true", help="Display failed commands",
default=False, dest="display_errors")
sub_parser.set_defaults(cmd_object="status")
# Add available pipelines
wf_instances = wfmanager.get_available_workflows()
wf_instances, wf_methodes = wfmanager.get_available_workflows()
wf_classes = []
for instance in wf_instances:
wf_classes.append(instance.__class__.__name__)
......@@ -132,7 +103,7 @@ if __name__ == '__main__':
sub_parser = subparsers.add_parser(instance.name, help=instance.description, fromfile_prefix_chars='@')
sub_parser.convert_arg_line_to_args = instance.__class__.config_parser
[parameters_groups, parameters_order] = instance.get_parameters_per_groups()
for group in parameters_order:
for group in parameters_order:
if group == "default":
for param in parameters_groups[group]:
sub_parser.add_argument(param.flag, **param.export_to_argparse())
......@@ -152,21 +123,26 @@ if __name__ == '__main__':
pgroup.add_argument(param.flag, **param.export_to_argparse())
sub_parser.set_defaults(cmd_object=instance.__class__.__name__)
args = vars(parser.parse_args())
if args["cmd_object"] in wf_classes:
wfmanager.run_workflow(args["cmd_object"], args)
elif args["cmd_object"] == "rerun":
wfmanager.rerun_workflow(args["workflow_id"])
elif args["cmd_object"] == "reset":
wfmanager.reset_workflow_component(args["workflow_id"], args["component_name"])
elif args["cmd_object"] == "execution_graph":
workflow = wfmanager.get_workflow(args["workflow_id"])
print workflow.get_execution_graph()
elif args["cmd_object"] == "status":
if args["workflow_id"]:
workflow = wfmanager.get_workflow(args["workflow_id"])
print display_workflow_status(workflow, True)
print workflow.get_status_under_text_format(True, args["display_errors"])
else:
workflows = wfmanager.get_workflows()
if len(workflows) > 0:
status = "ID\tNAME\tSTATUS\tELAPSED_TIME\tSTART_TIME\tEND_TIME\n"
for i, workflow in enumerate(workflows):
status += display_workflow_status(workflow)
status += workflow.get_status_under_text_format()
if i<len(workflows)-1: status += "\n"
else: status = "no workflow available"
print status
......@@ -22,6 +22,9 @@ import os
from jflow.config_reader import JFlowConfigReader
# Define some Error classes
class InvalidFormatError(Exception): pass
jflowconf = JFlowConfigReader()
# if log file directory does not exist, create it
......
......@@ -19,28 +19,29 @@ import os
import sys
import inspect
import tempfile
import types
from jflow.workflows_manager import WorkflowsManager
from jflow.config_reader import JFlowConfigReader
from jflow.dataset import ArrayList
from jflow.utils import which
from jflow.iotypes import DynamicOutput
from jflow.parameter import *
from weaver.util import parse_string_list
class Component(object):
"""
"""
def __init__(self):
self.prefix = "default"
self.params_order = []
self.output_directory = None
self.config_reader = JFlowConfigReader()
self.version = self.get_version()
self.batch_options = self.config_reader.get_component_batch_options(self.__class__.__name__)
def is_dynamic(self):
def is_dynamic(self):
return len(self.get_dynamic_outputs()) != 0
def get_dynamic_outputs(self):
......@@ -52,7 +53,160 @@ class Component(object):
if issubclass( attribute_value.__class__, DynamicOutput ):
dynamic_outputs.append( attribute_value )
return dynamic_outputs
def get_output_files(self):
outputs = {}
for attribute_value in self.__dict__.values():
if ( issubclass( attribute_value.__class__, DynamicOutput ) or
issubclass( attribute_value.__class__, OutputFileList) ):
for f in attribute_value:
outputs[os.path.basename(f)] = f
elif issubclass( attribute_value.__class__, OutputFile):
outputs[os.path.basename(attribute_value)] = attribute_value
return outputs
def add_input_file(self, name, help, file_format="any", default=None, type="inputfile",
required=False, flag=None, group="default", display_name=None, add_to=None):
new_param = InputFile(name, help, flag=flag, file_format=file_format, default=default,
type=type, required=required, group=group, display_name=display_name)
new_param.component_nameid = self.get_nameid()
# store where the parameter is coming from
if issubclass( default.__class__, AbstractOutputFile ):
new_param.parent_component_nameid.append(default.component_nameid)
elif issubclass( default.__class__, list ):
for val in default:
if issubclass( val.__class__, AbstractOutputFile ):
new_param.parent_component_nameid.append(val.component_nameid)
# if this input should be added to a particular parameter
if add_to:
try:
self.__getattribute__(add_to).add_sub_parameter(new_param)
except: pass
# otherwise, add it to the class itself
else:
self.params_order.append(name)
self.__setattr__(name, new_param)
def reset(self):
for file in os.listdir(self.output_directory):
os.remove(os.path.join(self.output_directory, file))
def add_input_file_list(self, name, help, file_format="any", default=None, type="inputfile",
required=False, flag=None, group="default", display_name=None, add_to=None):
if default == None: default = []
new_param = InputFileList(name, help, flag=flag, file_format=file_format, default=default,
type=type, required=required, group=group, display_name=display_name)
new_param.component_nameid = self.get_nameid()
# store where the parameter is coming from
if issubclass( default.__class__, AbstractOutputFile ):
new_param.parent_component_nameid.append(default.component_nameid)
elif issubclass( default.__class__, list ):
for val in default:
if issubclass( val.__class__, AbstractOutputFile ):
new_param.parent_component_nameid.append(val.component_nameid)
# if this input should be added to a particular parameter
if add_to:
try:
self.__getattribute__(add_to).add_sub_parameter(new_param)
except: pass
# otherwise, add it to the class itself
else:
self.params_order.append(name)
self.__setattr__(name, new_param)
def add_parameter(self, name, help, default=None, type=types.StringType, choices=None,
required=False, flag=None, group="default", display_name=None, add_to=None):
new_param = ParameterFactory.factory(name, help, flag=flag, default=default, type=type, choices=choices,
required=required, group=group, display_name=display_name)
# if this input should be added to a particular parameter
if add_to:
try:
self.__getattribute__(add_to).add_sub_parameter(new_param)
except: pass
# otherwise, add it to the class itself
else:
self.params_order.append(name)
self.__setattr__(name, new_param)
def add_parameter_list(self, name, help, default=None, type=types.StringType, choices=None,
required=False, flag=None, group="default", display_name=None, add_to=None):
if default == None: default = []
new_param = ParameterList(name, help, flag=flag, default=default, type=type, choices=choices,
required=required, group=group, display_name=display_name)
# if this input should be added to a particular parameter
if add_to: