Commit 96472d18 authored by Ivan Tyagov's avatar Ivan Tyagov

Allow in API to pass reference of object (i.e. Data Array) where...

Allow in API to pass reference of object (i.e. Data Array) where transformation is expected to be stored.
parent c850a14d
...@@ -87,7 +87,7 @@ for line in line_list:\n ...@@ -87,7 +87,7 @@ for line in line_list:\n
pass \n pass \n
else:\n else:\n
d = json.loads(line)\n d = json.loads(line)\n
# xxx: save this value as a numpy array\n # xxx: save this value as a Data Array identified by data_array_reference\n
\n \n
# start and enf offsets may not match existing record structure in stream\n # start and enf offsets may not match existing record structure in stream\n
# thus corrections in start and end offsets is needed thus we\n # thus corrections in start and end offsets is needed thus we\n
...@@ -102,7 +102,7 @@ return start, end\n ...@@ -102,7 +102,7 @@ return start, end\n
</item> </item>
<item> <item>
<key> <string>_params</string> </key> <key> <string>_params</string> </key>
<value> <string>chunk_list, start, end</string> </value> <value> <string>chunk_list, start, end, data_array_reference=None</string> </value>
</item> </item>
<item> <item>
<key> <string>id</string> </key> <key> <string>id</string> </key>
......
...@@ -62,7 +62,10 @@ data_stream_chunk_list = data_stream.readChunkList(start, end)\n ...@@ -62,7 +62,10 @@ data_stream_chunk_list = data_stream.readChunkList(start, end)\n
if transform_script_id is not None:\n if transform_script_id is not None:\n
transform_script = getattr(data_stream, transform_script_id, None)\n transform_script = getattr(data_stream, transform_script_id, None)\n
if transform_script is not None:\n if transform_script is not None:\n
start, end = transform_script(data_stream_chunk_list, start, end)\n start, end = transform_script(data_stream_chunk_list, \\\n
start, \\\n
end, \\\n
data_array_reference)\n
\n \n
# [warning] store current position offset in Data Stream, this can cause easily \n # [warning] store current position offset in Data Stream, this can cause easily \n
# ConflictErrors and it spawns re-index activities on DataStream\n # ConflictErrors and it spawns re-index activities on DataStream\n
...@@ -91,7 +94,7 @@ if start < total_stream_length:\n ...@@ -91,7 +94,7 @@ if start < total_stream_length:\n
</item> </item>
<item> <item>
<key> <string>_params</string> </key> <key> <string>_params</string> </key>
<value> <string>data_stream_relative_url, start, end, chunk_length, transform_script_id=None</string> </value> <value> <string>data_stream_relative_url, start, end, chunk_length, transform_script_id=None, data_array_reference=None</string> </value>
</item> </item>
<item> <item>
<key> <string>id</string> </key> <key> <string>id</string> </key>
......
...@@ -73,7 +73,7 @@ return data_length\n ...@@ -73,7 +73,7 @@ return data_length\n
</item> </item>
<item> <item>
<key> <string>_params</string> </key> <key> <string>_params</string> </key>
<value> <string>chunk_length=1048576, transform_script_id=None</string> </value> <value> <string>chunk_length=1048576, transform_script_id=None, data_array_reference=None</string> </value>
</item> </item>
<item> <item>
<key> <string>id</string> </key> <key> <string>id</string> </key>
......
...@@ -115,14 +115,16 @@ class Test(ERP5TypeTestCase): ...@@ -115,14 +115,16 @@ class Test(ERP5TypeTestCase):
# test copy numpy -> wendelin but first resize persistent one (add new one) # test copy numpy -> wendelin but first resize persistent one (add new one)
data_array.initArray((4, 4), np.uint8) data_array.initArray((4, 4), np.uint8)
persistent_zbig_array = data_array.getArray() persistent_zbig_array = data_array.getArray()
rows = [0,1]
cols = [2,2]
new_array = np.arange(1,17).reshape((4,4)) new_array = np.arange(1,17).reshape((4,4))
persistent_zbig_array[:,:] = new_array persistent_zbig_array[:,:] = new_array
self.assertEquals(new_array.shape, persistent_zbig_array.shape) self.assertEquals(new_array.shape, persistent_zbig_array.shape)
# (enable when new wendelin.core released as it can kill system) # (enable when new wendelin.core released as it can kill system)
#self.assertTrue(np.array_equal(a, persistent_zbig_array)) #self.assertTrue(np.array_equal(new_array, persistent_zbig_array))
# test set element in zbig array
persistent_zbig_array[:2, 2] = 0
#self.assertFalse(np.array_equal(new_array, persistent_zbig_array))
# resize Zbig Array (enable when new wendelin.core released as it can kill system) # resize Zbig Array (enable when new wendelin.core released as it can kill system)
#persistent_zbig_array = np.resize(persistent_zbig_array, (100,100)) #persistent_zbig_array = np.resize(persistent_zbig_array, (100,100))
......
...@@ -48,8 +48,6 @@ ...@@ -48,8 +48,6 @@
<tuple> <tuple>
<string>W: 53, 4: Unused variable \'scipy\' (unused-variable)</string> <string>W: 53, 4: Unused variable \'scipy\' (unused-variable)</string>
<string>W: 54, 4: Unused variable \'sklearn\' (unused-variable)</string> <string>W: 54, 4: Unused variable \'sklearn\' (unused-variable)</string>
<string>W:118, 4: Unused variable \'rows\' (unused-variable)</string>
<string>W:119, 4: Unused variable \'cols\' (unused-variable)</string>
</tuple> </tuple>
</value> </value>
</item> </item>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment