Maintenance - Mise à jour mensuelle Lundi 1 Juin 2020 entre 7h00 et 9h00

Commit ad087c8c authored by Penom Nom's avatar Penom Nom

ng6 python3

parent 690cedc0
......@@ -129,7 +129,11 @@ if __name__ == '__main__':
pgroup.add_argument(param.flag, **param.export_to_argparse())
sub_parser.set_defaults(cmd_object=instance.__class__.__name__)
args = vars(parser.parse_args())
if not "cmd_object" in args:
print(parser.format_help())
parser.exit(0, "")
if args["cmd_object"] in wf_classes:
wfmanager.run_workflow(args["cmd_object"], args)
elif args["cmd_object"] == "rerun":
......@@ -160,9 +164,9 @@ if __name__ == '__main__':
inputs.append(gr.node_attributes(node)[1])
elif Workflow.COMPONENT_GRAPH_LABEL in gr.node_attributes(node):
components.append(gr.node_attributes(node)[1])
print "inputs: ", inputs
print "components: ", components
print "edges: ", gr.edges()
print("inputs: ", inputs)
print("components: ", components)
print("edges: ", gr.edges())
elif args["cmd_object"] == "status":
if args["workflow_id"]:
......@@ -170,7 +174,7 @@ if __name__ == '__main__':
workflow = wfmanager.get_workflow(args["workflow_id"])
except Exception as e:
utils.display_error_message(str(e))
print Workflow.get_status_under_text_format(workflow, True, args["display_errors"])
print(Workflow.get_status_under_text_format(workflow, True, args["display_errors"]))
else:
try:
workflows = wfmanager.get_workflows(use_cache=True)
......@@ -187,5 +191,5 @@ if __name__ == '__main__':
status += Workflow.get_status_under_text_format(workflows_by_id[wfid])
if i<len(workflows)-1: status += "\n"
else: status = "no workflow available"
print status
print(status)
\ No newline at end of file
......@@ -19,15 +19,11 @@ import os
import sys
import argparse
import time
from ConfigParser import ConfigParser
import tempfile, os, sys, re, hashlib, urllib, zipfile
from configparser import ConfigParser
import tempfile, os, sys, re, hashlib, urllib.request, urllib.parse, urllib.error, zipfile
import distutils.dir_util as dirutil
import shutil
import uuid
try:
from MySQLdb import *
except:
print "Import Error: MySQLdb is required to use t3MySQLdb object !"
try:
import _preamble
......@@ -35,6 +31,9 @@ except ImportError:
sys.exc_clear()
from ng6.t3MySQLdb import t3MySQLdb
if __name__ == '__main__':
# Create the top-level parser
......@@ -46,7 +45,7 @@ if __name__ == '__main__':
args = vars(parser.parse_args())
args["web_path"] = re.sub( r"\/$", "", args["web_path"] )
print "Downloading web site source code ..."
print("Downloading web site source code ...")
dummy_tmp_zipfile = tempfile.NamedTemporaryFile(prefix = 'dummy_', suffix = '.zip').name
dummy_tmp_dir = tempfile.mkdtemp(suffix='_typo3', prefix='dummy_')
ng6reader = ConfigParser()
......@@ -59,9 +58,9 @@ if __name__ == '__main__':
fileadmin = os.path.join(args["web_path"], "fileadmin")
# Downloading typo3 package
urllib.urlretrieve(ng6reader.get('resources', 'typo3_src'), dummy_tmp_zipfile)
urllib.request.urlretrieve(ng6reader.get('resources', 'typo3_src'), dummy_tmp_zipfile)
print "Installing web site ..."
print("Installing web site ...")
# Testing and Unzipping typo3 package
with zipfile.ZipFile(dummy_tmp_zipfile, 'r') as zipf:
if zipf.testzip() is None:
......@@ -108,7 +107,7 @@ if __name__ == '__main__':
f.write("$TYPO3_CONF_VARS['GFX']['im_combine_filename'] = 'composite';\n")
# Typo3 password hash
random=uuid.uuid4().hex[:10]
install_tool_password_hash = hashlib.md5(random).hexdigest()
install_tool_password_hash = hashlib.md5(random.encode('utf-8')).hexdigest()
f.write("# password : "+random+";\n")
f.write("$TYPO3_CONF_VARS['BE']['installToolPassword'] = '" + install_tool_password_hash + "';\n")
# Testing unzip binary path :
......@@ -138,21 +137,26 @@ if __name__ == '__main__':
database[iline] = re.sub("plugin.tx_nG6_pi5.server_name=([\w\.]+)", "plugin.tx_nG6_pi5.server_name=" + server_parameters[0], database[iline])
# Inserting tables in typo3 database
conn = connect(db_host, db_user, db_password, db_name)
with conn:
curs = conn.cursor()
curs.execute("show tables;")
tables = []
for i in curs.fetchall() : tables.append(i[0])
erase = "n"
if len(tables) > 0 and "tx_nG6_project" in tables :
erase = raw_input('The database already contains a ng6 web site, do you want to DROP it (y/n) ? ')
elif len(tables) > 0 and ("tt_content" in tables and "pages" in tables and "be_users" in tables ):
erase = raw_input('The database already contains a ng6 web site, do you want to DROP it (y/n) ? ')
if erase == "y" :
curs.execute("Drop table "+ ",".join(tables) +";")
curs.execute("".join(database))
curs.close()
t3m = t3MySQLdb()
connection = t3m.get_connection()
try :
with connection.cursor() as curs:
curs.execute("show tables;")
tables = []
for i in curs.fetchall() : tables.append(i[0])
erase = "n"
if len(tables) > 0 and "tx_nG6_project" in tables :
erase = input('The database already contains a ng6 web site, do you want to DROP it (y/n) ? ')
elif len(tables) > 0 and ("tt_content" in tables and "pages" in tables and "be_users" in tables ):
erase = input('The database already contains a ng6 web site, do you want to DROP it (y/n) ? ')
if erase == "y" :
curs.execute("Drop table "+ ",".join(tables) +";")
curs.execute("".join(database))
connection.commit()
except :
raise
finally:
connection.close()
# Fix permissions typo3 folder
for dir in [os.path.join(args["web_path"], "typo3conf"),
......@@ -165,6 +169,6 @@ if __name__ == '__main__':
os.chmod( r , 0o777)
web_name = args["web_path"].split("/var/www/")
print "The Web site is available at http://"+server_parameters[0]+"/"+web_name[1]
print "Go to http://"+server_parameters[0]+"/"+web_name[1]+"/index.php?id=3 to configure the installation"
print("The Web site is available at http://"+server_parameters[0]+"/"+web_name[1])
print("Go to http://"+server_parameters[0]+"/"+web_name[1]+"/index.php?id=3 to configure the installation")
\ No newline at end of file
#
# Copyright (C) 2012 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse, os, sys
from ConfigParser import ConfigParser
from MySQLdb import *
def get_storage_size(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def update_storage_size(savedir, host, user, passwd, db):
conn = connect(host, user, passwd, db)
with conn :
curs = conn.cursor()
for tablename in ["tx_nG6_run", "tx_nG6_analyze"] :
curs.execute("SELECT uid, directory FROM " + tablename)
res = curs.fetchall()
update_list = []
for data in res :
try :
size = get_storage_size( savedir + data[1] )
update_list.append((size, data[0]) )
except:
e = sys.exc_info()[0]
print "Error while updating {0} : {1}" .format(savedir + data[1] , e)
curs.executemany( "UPDATE " + tablename + " SET storage_size='%s' WHERE uid= %s ", update_list )
conn.commit()
curs.close()
print "Updated database " + db
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "update analysis and run storage size in ng6 database")
parser.parse_args()
ng6reader = ConfigParser()
ng6reader.read(os.path.join(os.path.dirname(__file__), "..", "application.properties"))
db_host = ng6reader.get('database', 'host')
db_user = ng6reader.get('database', 'user')
db_password = ng6reader.get('database', 'passwd')
db_name = ng6reader.get('database', 'dbname')
savedir = ng6reader.get('storage', 'save_directory')
if not os.path.isdir(savedir):
sys.exit("Directory '" + savedir + "' does not exists. Pleache change your application.properties file")
update_storage_size(savedir, db_host, db_user, db_password, db_name)
\ No newline at end of file
......@@ -6,10 +6,6 @@
__all__ = ['map']
try:
from itertools import imap
map = imap
except ImportError:
map = map
map = map
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
......@@ -4,8 +4,6 @@
""" CCTools Debugging module """
from __future__ import print_function
__all__ = ['set_name', 'set_file', 'set_flag', 'print_flags', 'clear_flags']
from _cctools import debug_config, debug_config_file, debug_config_file_size
......
......@@ -4,7 +4,7 @@
""" cctools Makeflow dag module """
from __future__ import print_function
from cctools.compat import map
from cctools.error import raise_parser_error, ParserError
......@@ -82,7 +82,7 @@ class DAG(object):
self.exports.update(line.split()[1:])
def parse_node(self, line):
output_files, input_files = map(lambda s: set(shlex.split(s)), line.split(':'))
output_files, input_files = [set(shlex.split(s)) for s in line.split(':')]
command = None
symbol = None
variables = []
......
......@@ -4,7 +4,7 @@
""" cctools Makeflow log module """
from __future__ import print_function
from cctools.compat import map
from cctools.error import raise_parser_error, ParserError
......@@ -572,7 +572,7 @@ class Reporter(object):
if sort_field:
log.nodes.sort(key=operator.attrgetter(sort_field))
if filters:
nodes = filter(lambda node: all(map(lambda f: eval(f, {'node': node}), filters)), log.nodes)
nodes = [node for node in log.nodes if all([eval(f, {'node': node}) for f in filters])]
else:
nodes = log.nodes
......@@ -662,7 +662,7 @@ class JSONReporter(Reporter):
if sort_field:
log.nodes.sort(key=operator.attrgetter(sort_field))
if filters:
nodes = filter(lambda node: all(map(lambda f: eval(f, {'node': node}), filters)), log.nodes)
nodes = [node for node in log.nodes if all([eval(f, {'node': node}) for f in filters])]
else:
nodes = log.nodes
......
......@@ -4,8 +4,8 @@
""" CCTools Plotting module """
from __future__ import print_function
from __future__ import with_statement
import os
......@@ -62,7 +62,7 @@ def gnuplot(plot_path, data_path, plot_format=None, plot_fields=None, **kwargs):
with open(script_path, 'w+') as fs:
print('set output "{0}"'.format(plot_path), file=fs)
for key, value in kwargs.items():
for key, value in list(kwargs.items()):
if key in GNUPLOT_STRING_FIELDS:
print('set {0} "{1}"'.format(key, value), file=fs)
else:
......
......@@ -4,7 +4,7 @@
""" CCTools Utility module """
from __future__ import print_function
from cctools.compat import map
......
......@@ -30,7 +30,7 @@ jflowconf = JFlowConfigReader()
# if log file directory does not exist, create it
log_directory = os.path.dirname(jflowconf.get_log_file_path())
if not os.path.isdir(log_directory):
os.makedirs(log_directory, 0751)
os.makedirs(log_directory, 0o751)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
......
......@@ -90,7 +90,7 @@ class MultiMap(Abstraction):
else:
iteration_inputs.append(inputs[iter])
input_pattern = self._longestCommonSubstr(map(os.path.basename, map(str, iteration_inputs)))
input_pattern = self._longestCommonSubstr(list(map(os.path.basename, list(map(str, iteration_inputs)))))
iteration_outputs = []
if isinstance(self.outputs, list):
......
......@@ -44,6 +44,8 @@ class Component(object):
self.output_directory = None
self.config_reader = JFlowConfigReader()
self.version = self.get_version()
if isinstance(self.version, bytes):
self.version = self.version.decode()
self.batch_options = self.config_reader.get_component_batch_options(self.__class__.__name__)
def is_dynamic(self):
......@@ -54,14 +56,14 @@ class Component(object):
@return : the list of outputs updated at the end of component execution.
"""
dynamic_outputs = list()
for attribute_value in self.__dict__.values():
for attribute_value in list(self.__dict__.values()):
if issubclass( attribute_value.__class__, DynamicOutput ):
dynamic_outputs.append( attribute_value )
return dynamic_outputs
def get_output_files(self):
outputs = {}
for attribute_value in self.__dict__.values():
for attribute_value in list(self.__dict__.values()):
if ( issubclass( attribute_value.__class__, DynamicOutput ) or
issubclass( attribute_value.__class__, OutputFileList) ):
for f in attribute_value:
......@@ -70,6 +72,18 @@ class Component(object):
outputs[os.path.basename(attribute_value)] = attribute_value
return outputs
def add_input_directory(self, name, help, default=None, required=False, flag=None,
group="default",
display_name=None, cmd_format="", argpos=-1):
new_param = InputDirectory(name, help, flag=flag, default=default, required=required, group=group,
display_name=display_name, cmd_format=cmd_format, argpos=argpos)
# store where the parameter is coming from
new_param.linkTrace_nameid = self.get_nameid()
if issubclass( default.__class__, LinkTraceback ):
new_param.parent_linkTrace_nameid = [default.linkTrace_nameid]
# add it to the class itself
self.params_order.append(name)
self.__setattr__(name, new_param)
def add_input_file(self, name, help, file_format="any", default=None, type="inputfile",
required=False, flag=None, group="default", display_name=None,
......@@ -118,7 +132,7 @@ class Component(object):
self.params_order.append(name)
self.__setattr__(name, new_param)
def add_parameter(self, name, help, default=None, type=types.StringType, choices=None,
def add_parameter(self, name, help, default=None, type=str, choices=None,
required=False, flag=None, group="default", display_name=None,
cmd_format="", argpos=-1):
new_param = ParameterFactory.factory(name, help, flag=flag, default=default, type=type, choices=choices,
......@@ -128,7 +142,7 @@ class Component(object):
self.params_order.append(name)
self.__setattr__(name, new_param)
def add_parameter_list(self, name, help, default=None, type=types.StringType, choices=None,
def add_parameter_list(self, name, help, default=None, type=str, choices=None,
required=False, flag=None, group="default", display_name=None,
cmd_format="", argpos=-1):
if default == None: default = []
......@@ -301,7 +315,7 @@ class Component(object):
def execute(self):
# first create the output directory
if not os.path.isdir(self.output_directory):
os.makedirs(self.output_directory, 0751)
os.makedirs(self.output_directory, 0o751)
# then run the component
self.process()
......@@ -410,7 +424,7 @@ class Component(object):
def get_temporary_file(self, suffix=".txt"):
# first check if tmp directory exists
if not os.path.isdir(self.config_reader.get_tmp_directory()):
os.makedirs(self.config_reader.get_tmp_directory(), 0751)
os.makedirs(self.config_reader.get_tmp_directory(), 0o751)
tempfile_name = os.path.basename(tempfile.NamedTemporaryFile(suffix=suffix).name)
return os.path.join(self.config_reader.get_tmp_directory(), tempfile_name)
......
......@@ -70,7 +70,7 @@ def get_priority( requests, evaluated_request ):
priority is 1.
"""
priority = None
requests.sort( cmp=priority_compare )
requests.sort( key=priority_compare_key(priority_compare) )
for idx, current_request in enumerate(requests):
if current_request['timestamp'] == evaluated_request['timestamp'] and current_request['PID'] == evaluated_request['PID'] and current_request['random'] == evaluated_request['random']:
if priority is None:
......@@ -89,7 +89,7 @@ def get_requests( request_dir, shared_file ):
@return: [list] The list of access requests.
"""
all_requests = list()
pattern = os.path.basename(shared_file) + "_accessRequest_[^\|]+\|\d+\|\d+"
pattern = os.path.basename(shared_file) + "_accessRequest_[^\-]+\-\d+\-\d+"
for request_file in os.listdir(request_dir):
if re.match(pattern, request_file):
try:
......@@ -122,7 +122,7 @@ def stopRetry( priorities, max_stable_priorities ):
stop_retry = False
return stop_retry
def exec_on_shared( process_fct, shared_file, tmp_dir="/tmp", time_between_retry=3.0, max_stable_priorities=100 ):
def exec_on_shared( process_fct, shared_file, tmp_dir="/tmp", time_between_retry=0.7, max_stable_priorities=100, metadata=None ):
"""
@summmary: Manages concurrent access in writing mode between several
processes on a shared file.
......@@ -132,11 +132,12 @@ def exec_on_shared( process_fct, shared_file, tmp_dir="/tmp", time_between_retry
between the concurrent processes.
@param tmp_dir: [str] The path to the directory where the temporary files
(request and lock) are stored.
@param time_between_retry: [float] the number of seconds between each
@param time_between_retry: [float] the number of seconds between each
retry.
@param max_stable_priorities: [int] The number of retry with the same
priority for consider situation as a
deadlock.
@param metadata: [dict] The metadata added in access request file.
@return: The process_fct return or None if the process_fct does not have
return.
@note: Bug if 1 node with a late timestamp executes the reservation, the
......@@ -151,11 +152,12 @@ def exec_on_shared( process_fct, shared_file, tmp_dir="/tmp", time_between_retry
current_request = {
'timestamp': time.time(),
'PID': os.getpid(),
'random': random.randint(1, 10000000000000000)
'random': random.randint(1, 10000000000000000),
'metadata': metadata
}
# Set request file
request_filename = "{}_accessRequest_{}|{}|{}".format(os.path.basename(shared_file), current_request['timestamp'], current_request['PID'], current_request['random'])
request_filename = "{}_accessRequest_{}-{}-{}".format(os.path.basename(shared_file), current_request['timestamp'], current_request['PID'], current_request['random'])
current_request_file = os.path.join( tmp_dir, request_filename )
try:
......
......@@ -20,7 +20,7 @@ import sys
import inspect
import logging
from ConfigParser import ConfigParser, NoOptionError
from configparser import RawConfigParser, NoOptionError
from jflow.utils import which, display_error_message
......@@ -34,12 +34,12 @@ class JFlowConfigReader(object):
def __init__(self):
"""
"""
self.reader = ConfigParser()
self.reader = RawConfigParser()
self.reader.read(os.path.join(os.path.dirname(inspect.getfile(self.__class__)), self.CONFIG_FILE_PATH))
def get_tmp_directory(self):
if not os.path.isdir(self.reader.get("storage", "tmp_directory")):
os.makedirs(self.reader.get("storage", "tmp_directory"), 0751)
os.makedirs(self.reader.get("storage", "tmp_directory"), 0o751)
return self.reader.get("storage", "tmp_directory")
def get_work_directory(self):
......@@ -48,7 +48,7 @@ class JFlowConfigReader(object):
def get_exec(self, software):
try:
return self.reader.get("softwares", software)
except NoOptionError, e:
except NoOptionError:
return None
def get_resource(self, resource):
......@@ -62,12 +62,12 @@ class JFlowConfigReader(object):
try:
return self.reader.get('storage', 'log_file')
except :
raise Error("Failed when parsing the config file, no section logging found!")
raise NoOptionError("Failed when parsing the config file, no section logging found!")
def get_makeflow_path(self):
try:
exec_path = self.reader.get("global", "makeflow")
except NoOptionError, e:
except NoOptionError:
exec_path = None
if exec_path is None: exec_path = "makeflow"
if which(exec_path) == None:
......@@ -79,7 +79,7 @@ class JFlowConfigReader(object):
try:
date_format = self.reader.get("global", "date_format")
except:
raise Error("Failed when parsing the config file, no parameter date_format!")
raise NoOptionError("Failed when parsing the config file, no parameter date_format!")
return date_format
def get_batch(self):
......@@ -88,7 +88,7 @@ class JFlowConfigReader(object):
options = self.reader.get("global", "batch_options")
limit_submission = self.reader.get("global", "limit_submission")
return [type, options, limit_submission]
except NoOptionError, e:
except NoOptionError:
return None
def get_socket_options(self):
......
......@@ -44,7 +44,7 @@ class ExternalParser(object):
options = {'define_parameters' : fn_define_parameters }
if kwargs :
for key, val in kwargs.items() :
for key, val in list(kwargs.items()) :
options[key] = val
ComponentType = type(component_name, (_SerializableNestedComponent,),options)
......
......@@ -15,11 +15,11 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from featureiolib.bed import BEDReader
from featureiolib.biom import Biom, BiomIO
from featureiolib.gff3 import GFF3Record, GFF3IO
from featureiolib.mpileup import MpileupReader
from featureiolib.obo import OboReader
from featureiolib.vcf import VCFReader
from featureiolib.wego import WEGOReader
from featureiolib.separatedvalue import SeparatedValueIO
\ No newline at end of file
from .featureiolib.bed import BEDReader
from .featureiolib.biom import Biom, BiomIO
from .featureiolib.gff3 import GFF3Record, GFF3IO
from .featureiolib.mpileup import MpileupReader
from .featureiolib.obo import OboReader
from .featureiolib.vcf import VCFReader
from .featureiolib.wego import WEGOReader
from .featureiolib.separatedvalue import SeparatedValueIO
\ No newline at end of file
......@@ -51,7 +51,7 @@ class Entry(object):
return self.attrib[key]
def has(self,attr):
return self.attrib.has_key(attr)
return attr in self.attrib
class _AbstractFeatureReader(object):
'''
......@@ -63,7 +63,7 @@ class _AbstractFeatureReader(object):
@param wholefile: If True, then it is ok to read the entire file into memory. This is faster when there are
many newlines in the file, but may obviously need a lot of memory.
"""
if isinstance(file, basestring):
if isinstance(file, str):
file = xopen(file, "r")
self.fp = file
self.wholefile = wholefile
......
......@@ -21,7 +21,7 @@ import re
from jflow.seqio import FormatError
from jflow.seqio import UnknownFileType
from abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
from .abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
class BEDReader(_AbstractFeatureReader):
......@@ -30,7 +30,7 @@ class BEDReader(_AbstractFeatureReader):
'''
def _process_line(self,line):
row = line.rstrip().split('\t')
if len(row) not in range(3,13) : raise FormatError('Invalid number of columns in your BED file {0}'.format( len(row)))
if len(row) not in list(range(3,13)) : raise FormatError('Invalid number of columns in your BED file {0}'.format( len(row)))
return Entry(**{ 'chrom' : row[0], 'chromStart' : row[1], 'chromEnd' : row[2] })
def _streaming_iter(self):
......@@ -41,8 +41,7 @@ class BEDReader(_AbstractFeatureReader):
def _wholefile_iter(self):
wholefile = self.fp.read()
assert '\r' not in wholefile, "Sorry, currently don't know how to deal with files that contain \\r linebreaks"
assert len(wholefile) == 0 , "Empty BED file"
assert len(wholefile) != 0 , "Empty BED file"
for line in wholefile.split('\n') :
if line.startswith('#') :
......
......@@ -74,7 +74,7 @@ def _bootstrap_selection( OTU_count, nb_total_elts, nb_selected_elts ):
if elt_index <= (OTU_count[OTU_idx]["nb"] + previous_elt):
find = True
OTU_id = OTU_count[OTU_idx]["id"]
if selected_OTU.has_key( OTU_id ):
if OTU_id in selected_OTU:
selected_OTU[OTU_id] += 1
else:
selected_OTU[OTU_id] = 1
......@@ -141,7 +141,7 @@ class SparseData( dict ):
def __init__( self, list=None ):
ini_list = list if list is not None else list()
for data in ini_list:
if not self.has_key( data[0] ):
if not data[0] in self:
self[data[0]] = dict()
self[data[0]][data[1]] = data[2]
......@@ -154,7 +154,7 @@ class SparseData( dict ):
[ 9, 1, 521 ]] # The column 1 of the row 9 has a count of 521.
"""
sparse = list()
for rows_idx in sorted(self.keys(), key=int):
for rows_idx in sorted(list(self.keys()), key=int):
for columns_idx in sorted(self[rows_idx].keys()):
sparse.append([ rows_idx, columns_idx, self[rows_idx][columns_idx] ])
return sparse
......@@ -171,12 +171,12 @@ class SparseData( dict ):
@summary : Remove all the count for the column provided.
@param remove_idx : [int] The real index of the column to remove.
"""
for rows_idx in self.keys():
for rows_idx