Commit dd988c3b authored by Roque's avatar Roque

erp5_wendelin: fix script name conflict

parent 43f6f999
......@@ -2,4 +2,4 @@
Get list of Data Streams for context Data set.
"""
data_set_uid = context.getUid()
return context.DataSet_getDataStreamList(data_set_uid, limit)
return context.ERP5Site_getDataStreamList(data_set_uid, limit)
......@@ -54,7 +54,7 @@
</item>
<item>
<key> <string>id</string> </key>
<value> <string>DataSet_getDataStreamList</string> </value>
<value> <string>ERP5Site_getDataStreamList</string> </value>
</item>
<item>
<key> <string>title</string> </key>
......
"""
Return list of Data Streams belonging to a Date Set.
Data Ingestion line aggregates both Data Set and Data Stream.
Note: This code is quite computationally costly (for Data Streams having thousands of iles) as it needs to:
1. Query MariaDB to find ingestion lines
2. Read from ZODB both Data Ingestion Lines and Data Streams (which itself can be big too)
"""
data_ingestion_line_list = context.portal_catalog(
portal_type = "Data Ingestion Line",
aggregate_uid = context.getUid())
return [x.getAggregateValue(portal_type = "Data Stream") \
for x in data_ingestion_line_list]
......@@ -50,11 +50,11 @@
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>data_set_reference=None</string> </value>
<value> <string></string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>ERP5Site_checkIngestedData</string> </value>
<value> <string>DataSet_getDataStreamList</string> </value>
</item>
</dictionary>
</pickle>
......
import json
portal = context.getPortalObject()
portal_catalog = portal.portal_catalog
def getDatasetInfo(data_set):
size = 0
datastream_result_dict = json.loads(portal.ERP5Site_getDataStreamList(data_set.getReference()))
for stream_dict in datastream_result_dict['result']:
size += stream_dict['full-size']
return len(datastream_result_dict['result']), size
def format_size(num, suffix='b'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
data_set_list = []
if data_set_reference:
try:
data_set = portal.data_set_module.get(data_set_reference)
if data_set is None or portal.ERP5Site_checkReferenceInvalidated(data_set):
return "Not found: there is no valid dataset for that reference"
data_set_list.append(data_set)
except Exception as e: # fails because unauthorized access
return "ERROR: " + str(e)
else:
data_set_list = portal_catalog(portal_type="Data Set", validation_state='validated OR published')
total_size = 0
for data_set in data_set_list:
print "Data set " + data_set.getReference()
nfiles, size = getDatasetInfo(data_set)
total_size += size
print " #files: " + str(nfiles)
print " Size: " + format_size(size)
print
if len(data_set_list) > 1:
print
print "TOTAL SIZE: " + format_size(total_size)
return printed
......@@ -14,6 +14,6 @@ except Exception as e:
return { "status_code": 1, "error_message": "401 - Unauthorized access. Please check your user credentials and try again." }
data_set_uid = data_set.getUid()
data_stream_list = context.DataSet_getDataStreamList(data_set_uid)
data_stream_list = context.ERP5Site_getDataStreamList(data_set_uid)
return { "status_code": 0, "result": len(data_stream_list) }
......@@ -19,7 +19,7 @@ except Exception as e: # fails because unauthorized access
return json.dumps({ "status_code": 1, "error_message": "401 - Unauthorized access. Please check your user credentials and try again." })
data_set_uid = data_set.getUid()
data_stream_list = context.DataSet_getDataStreamList(data_set_uid, limit)
data_stream_list = context.ERP5Site_getDataStreamList(data_set_uid, limit)
data_stream_dict = {}
for stream_brain in data_stream_list:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment