Commit 67d45e09 authored by Jerome Mariette's avatar Jerome Mariette
Browse files

from python2 to python3 > ok from command line

parent 51ed8e44
......@@ -51,7 +51,7 @@ class Entry(object):
return self.attrib[key]
def has(self,attr):
return self.attrib.has_key(attr)
return attr in self.attrib
class _AbstractFeatureReader(object):
'''
......@@ -63,7 +63,7 @@ class _AbstractFeatureReader(object):
@param wholefile: If True, then it is ok to read the entire file into memory. This is faster when there are
many newlines in the file, but may obviously need a lot of memory.
"""
if isinstance(file, basestring):
if isinstance(file, str):
file = xopen(file, "r")
self.fp = file
self.wholefile = wholefile
......
......@@ -21,7 +21,7 @@ import re
from jflow.seqio import FormatError
from jflow.seqio import UnknownFileType
from abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
from .abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
class BEDReader(_AbstractFeatureReader):
......@@ -30,7 +30,7 @@ class BEDReader(_AbstractFeatureReader):
'''
def _process_line(self,line):
row = line.rstrip().split('\t')
if len(row) not in range(3,13) : raise FormatError('Invalid number of columns in your BED file {0}'.format( len(row)))
if len(row) not in list(range(3,13)) : raise FormatError('Invalid number of columns in your BED file {0}'.format( len(row)))
return Entry(**{ 'chrom' : row[0], 'chromStart' : row[1], 'chromEnd' : row[2] })
def _streaming_iter(self):
......
......@@ -74,7 +74,7 @@ def _bootstrap_selection( OTU_count, nb_total_elts, nb_selected_elts ):
if elt_index <= (OTU_count[OTU_idx]["nb"] + previous_elt):
find = True
OTU_id = OTU_count[OTU_idx]["id"]
if selected_OTU.has_key( OTU_id ):
if OTU_id in selected_OTU:
selected_OTU[OTU_id] += 1
else:
selected_OTU[OTU_id] = 1
......@@ -141,7 +141,7 @@ class SparseData( dict ):
def __init__( self, list=None ):
ini_list = list if list is not None else list()
for data in ini_list:
if not self.has_key( data[0] ):
if not data[0] in self:
self[data[0]] = dict()
self[data[0]][data[1]] = data[2]
......@@ -154,7 +154,7 @@ class SparseData( dict ):
[ 9, 1, 521 ]] # The column 1 of the row 9 has a count of 521.
"""
sparse = list()
for rows_idx in sorted(self.keys(), key=int):
for rows_idx in sorted(list(self.keys()), key=int):
for columns_idx in sorted(self[rows_idx].keys()):
sparse.append([ rows_idx, columns_idx, self[rows_idx][columns_idx] ])
return sparse
......@@ -171,12 +171,12 @@ class SparseData( dict ):
@summary : Remove all the count for the column provided.
@param remove_idx : [int] The real index of the column to remove.
"""
for rows_idx in self.keys():
for rows_idx in list(self.keys()):
# Remove data
if self[rows_idx].has_key( remove_idx ):
if remove_idx in self[rows_idx]:
del self[rows_idx][remove_idx]
# Change index
row_columns_idx = sorted( self[rows_idx].keys(), key=int )
row_columns_idx = sorted( list(self[rows_idx].keys()), key=int )
for column_idx in row_columns_idx:
if column_idx > remove_idx:
self[rows_idx][column_idx -1] = self[rows_idx][column_idx]
......@@ -188,10 +188,10 @@ class SparseData( dict ):
@param remove_idx : [int] The real index of the row to remove.
"""
# Remove data
if self.has_key( remove_idx ):
if remove_idx in self:
del self[remove_idx]
# Change indexes
all_rows_idx = sorted( self.keys(), key=int )
all_rows_idx = sorted( list(self.keys()), key=int )
for row_idx in all_rows_idx:
if row_idx > remove_idx:
self[row_idx - 1] = self[row_idx]
......@@ -205,8 +205,8 @@ class SparseData( dict ):
"""
# Merge counts
added_values = dict()
for row_idx in self.keys():
if self[row_idx].has_key( added_idx ):
for row_idx in list(self.keys()):
if added_idx in self[row_idx]:
self.add( row_idx, sum_idx, self[row_idx][added_idx] )
# Remove column
self.remove_col( added_idx )
......@@ -218,7 +218,7 @@ class SparseData( dict ):
@param col_idx : [int] The index of the column.
"""
nb = 0
if self.has_key(row_idx) and self[row_idx].has_key(col_idx):
if row_idx in self and col_idx in self[row_idx]:
nb = self[row_idx][col_idx]
return nb
......@@ -228,8 +228,8 @@ class SparseData( dict ):
@param col_idx : [int] The index of the column.
"""
total = 0
for row_idx in self.keys():
if self[row_idx].has_key( col_idx ):
for row_idx in list(self.keys()):
if col_idx in self[row_idx]:
total += self[row_idx][col_idx]
return total
......@@ -239,8 +239,8 @@ class SparseData( dict ):
@param row_idx : [int] The index of the row.
"""
total = 0
if self.has_key( row_idx ):
for column_idx in self[row_idx].keys():
if row_idx in self:
for column_idx in list(self[row_idx].keys()):
total += self[row_idx][column_idx]
return total
......@@ -252,7 +252,7 @@ class SparseData( dict ):
@param nb_col : [int] The expected number of columns.
"""
array = [0 for current in range(nb_col)]
if self.has_key( row_idx ):
if row_idx in self:
for column_idx in sorted( self[row_idx].keys() ):
array[column_idx] = self[row_idx][column_idx]
return array
......@@ -264,9 +264,9 @@ class SparseData( dict ):
@param col_idx : [int] The index of the column.
@param value : [int] The value to add.
"""
if not self.has_key( row_idx ):
if not row_idx in self:
self[row_idx] = { col_idx : 0 }
elif not self[row_idx].has_key( col_idx ):
elif not col_idx in self[row_idx]:
self[row_idx][col_idx] = 0
self[row_idx][col_idx] += value
......@@ -277,7 +277,7 @@ class SparseData( dict ):
@param col_idx : [int] The index of the column.
@param value : [int] The value to subtract.
"""
if self.has_key( row_idx ) and self[row_idx].has_key( col_idx ) and self[row_idx][col_idx] >= value:
if row_idx in self and col_idx in self[row_idx] and self[row_idx][col_idx] >= value:
self[row_idx][col_idx] -= value
else:
raise Exception( "'" + str(value) + "' cannot be subtract from row " + str(row_idx) + " column " + str(col_idx) + "." )
......@@ -290,12 +290,12 @@ class SparseData( dict ):
@param value : [int] The new value.
"""
if value != 0:
if not self.has_key( row_idx ):
if not row_idx in self:
self[row_idx] = { col_idx : value }
else:
self[row_idx][col_idx] = value
else:
if self.has_key( row_idx ) and self[row_idx].has_key( col_idx ) :
if row_idx in self and col_idx in self[row_idx]:
del self[row_idx][col_idx]
def random_by_col( self, col_idx ):
......@@ -442,7 +442,7 @@ class Biom:
final_idx = self.find_idx(self.columns, samples[0])
final_sample = self.columns[final_idx]
if final_sample['metadata'] is not None:
metadata_names = final_sample['metadata'].keys()
metadata_names = list(final_sample['metadata'].keys())
for metadata_name in metadata_names:
final_sample['metadata'][final_sample['id'] + ":" + metadata_name] = final_sample['metadata'][metadata_name]
del final_sample['metadata'][metadata_name]
......@@ -455,7 +455,7 @@ class Biom:
current_idx = self.find_idx(self.columns, current_name)
current_sample = self.columns[current_idx]
# Update metadata history
if final_sample['metadata'].has_key( "merge_history" ):
if merge_history in final_sample['metadata']:
final_sample['metadata']['merge_history'] += " AND " + current_sample['id']
else:
final_sample['metadata']['merge_history'] = final_sample['id'] + " AND " + current_sample['id']
......@@ -514,7 +514,7 @@ class Biom:
else:
if subject_list[subject_idx]['metadata'] is None:
subject_list[subject_idx]['metadata'] = dict()
elif subject_list[subject_idx]['metadata'].has_key( metadata_name ):
elif metadata_name in subject_list[subject_idx]['metadata']:
sys.stderr.write("[WARNING] You erase previous value of the metadata named '" + metadata_name + "' in " + subject_name + " (OLD:'" + str(subject_list[subject_idx]['metadata'][metadata_name]) + "' => NEW:'" + str(metadata_value) + "').\n")
subject_list[subject_idx]['metadata'][metadata_name] = metadata_value
......@@ -583,7 +583,7 @@ class Biom:
except ValueError:
self.rows.append( {'id':observation_name, 'metadata':None } )
self.data.add_row()
for metadata_name in ini_metadata.keys():
for metadata_name in list(ini_metadata.keys()):
self.add_metadata( observation_name, metadata_name, ini_metadata[metadata_name], "observation" )
# Observation already exists
else:
......@@ -602,7 +602,7 @@ class Biom:
except ValueError:
self.columns.append( {'id':sample_name, 'metadata':None } )
self.data.add_column()
for metadata_name in ini_metadata.keys():
for metadata_name in list(ini_metadata.keys()):
self.add_metadata( sample_name, metadata_name, ini_metadata[metadata_name], "sample" )
# Sample already exists
else:
......@@ -681,7 +681,7 @@ class Biom:
# Random selection
selected = _bootstrap_selection( remaining_OTU_count, nb_elts, nb_selected_elts )
# Add to new_data
for OTU_id in selected.keys():
for OTU_id in list(selected.keys()):
OTU_idx = self.find_idx( self.rows, OTU_id )
sample_idx = self.find_idx( self.columns, current_sample['id'] )
self.data.add( OTU_idx, sample_idx, selected[OTU_id] )
......@@ -897,10 +897,10 @@ class BiomIO:
title_fields = title_line.split()
for metadata_name in title_fields[1:]:
metadata_type = "str"
if ini_types.has_key( metadata_name ):
if metadata_name in ini_types:
metadata_type = ini_types[metadata_name]
metadata_list_sep = None
if ini_list_sep.has_key( metadata_name ):
if metadata_name in ini_list_sep:
metadata_list_sep = ini_list_sep[metadata_name]
metadata.append( {
'name' : metadata_name,
......
......@@ -43,7 +43,7 @@ class GFF3Record:
if self.attributes is not None :
self.attributes[cleaned_tag] = cleaned_value
else:
raise "The attibute 'Attributes' is not initialized."
raise ValueError("The attibute 'Attributes' is not initialized.")
def addToAttribute( self, tag, value ):
"""
......@@ -54,12 +54,12 @@ class GFF3Record:
cleaned_tag = GFF3Record._getCleanedAttribute(tag)
cleaned_value = GFF3Record._getCleanedAttribute(value)
if self.attributes is not None :
if self.attributes.has_key( cleaned_tag ):
if cleaned_tag in self.attributes:
self.attributes[cleaned_tag] = self.attributes[cleaned_tag] + "%2C" + cleaned_value
else:
self.attributes[cleaned_tag] = cleaned_value
else:
raise "The attibute 'Attributes' is not initialized."
raise ValueError("The attibute 'Attributes' is not initialized.")
def _attributesToGff( self ):
"""
......@@ -89,13 +89,13 @@ class GFF3Record:
@see : RFC 3986 Percent-Encoding
"""
cleaned_tag = GFF3Record._getCleanedAttribute(tag)
if self.attributes.has_key( cleaned_tag ):
if cleaned_tag in self.attributes:
readable_value = self.attributes[cleaned_tag].replace('%3B', ';')
readable_value = readable_value.replace('%2C', ',')
redable_value = readable_value.replace('%3D', '=')
return redable_value
else:
raise "The attibute 'Attributes' is not initialized."
raise ValueError("The attibute 'Attributes' is not initialized.")
@staticmethod
def _getCleanedAttribute( dirty_value ):
......
......@@ -21,7 +21,7 @@ import re
from jflow.seqio import FormatError
from jflow.seqio import UnknownFileType
from abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
from .abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
class MpileupReader(_AbstractFeatureReader):
......
......@@ -21,7 +21,7 @@ import re
from jflow.seqio import FormatError
from jflow.seqio import UnknownFileType
from abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
from .abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
class OboReader(_AbstractFeatureReader):
......
......@@ -52,7 +52,7 @@ class SeparatedValueIO(object):
else:
yield record
def next(self):
def __next__(self):
"""
@summary : Returns the next line record.
@return : [list] The line record.
......
......@@ -22,7 +22,7 @@ from jflow.seqio import xopen
from jflow.seqio import FormatError
from jflow.seqio import UnknownFileType
from abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
from .abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
class VCFReader(_AbstractFeatureReader):
......@@ -49,7 +49,7 @@ class VCFReader(_AbstractFeatureReader):
@param wholefile: If True, then it is ok to read the entire file into memory. This is faster when there are
many newlines in the file, but may obviously need a lot of memory.
"""
if isinstance(file, basestring):
if isinstance(file, str):
file = xopen(file, "r")
self.fp = file
self.wholefile = wholefile
......
......@@ -21,7 +21,7 @@ import re
from jflow.seqio import FormatError
from jflow.seqio import UnknownFileType
from abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
from .abstractfeaturereader import _AbstractFeatureReader, Entry, boolify, autocast
class WEGOReader(_AbstractFeatureReader):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment