rule.py 15.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
##############################################################################

import zope.interface
from AccessControl import ClassSecurityInfo
from Products.ERP5Type import Permissions, interfaces

Jean-Paul Smets's avatar
Jean-Paul Smets committed
33 34 35 36 37 38
def _compare(tester_list, prevision_movement, decision_movement):
  for tester in tester_list:
    if not tester.compare(prevision_movement, decision_movement):
      return False
  return True

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
class RuleMixin:
  """
  Provides generic methods and helper methods to implement
  IRule and 
  """
  # Declarative security
  security = ClassSecurityInfo()
  security.declareObjectProtected(Permissions.AccessContentsInformation)

  # Declarative interfaces
  zope.interface.implements(interfaces.IRule,
                            interfaces.IMovementCollectionUpdater,)

  # Implementation of IRule
  def constructNewAppliedRule(self, context, id=None, 
                              activate_kw=None, **kw):
    """
    Create a new applied rule in the context.

    An applied rule is an instanciation of a Rule. The applied rule is
    linked to the Rule through the `specialise` relation. The newly
    created rule should thus point to self.

    context -- usually, a parent simulation movement of the
               newly created applied rule

    activate_kw -- activity parameters, required to control
                   activity constraints

    kw -- XXX-JPS probably wrong interface specification
    """
    portal_types = getToolByName(self, 'portal_types')
    if id is None:
      id = context.generateNewId()
    if getattr(aq_base(context), id, None) is None:
      context.newContent(id=id,
                         portal_type='Applied Rule',
                         specialise_value=self,
                         activate_kw=activate_kw)
    return context.get(id)

  def expand(self, applied_rule, **kw):
    """
    Expand this applied rule to create new documents inside the
    applied rule.

    At expand time, we must replace or compensate certain
    properties. However, if some properties were overwriten
    by a decision (ie. a resource if changed), then we
    should not try to compensate such a decision.
    """
    # Update movements
    #  NOTE-JPS: it is OK to make rounding a standard parameter of rules
    #            altough rounding in simulation is not recommended at all
    self.updateMovementCollection(applied_rule, movement_generator=self._geMovementGenerator())
    # And forward expand
    for movement in applied_rule.getMovementList():
      movement.expand(**kw)      

  # Implementation of IMovementCollectionUpdater
  def getMovementCollectionDiff(self, context, rounding=False, movement_generator=None):
    """
    Return a IMovementCollectionDiff by comparing movements
    the list of movements of context and the list of movements
    generated by movement_generator on context.

    context -- an IMovementCollection usually, possibly
               an IMovementList or an IMovement

    movement_generator -- an optional IMovementGenerator
                          (if not specified, a context implicit 
                          IMovementGenerator will be used)
    """
    # We suppose here that we have an IMovementCollection in hand
    decision_movement_list = context.getMovementList()
    prevision_movement_list = movement_generator(self._geMovementGeneratorContext(),
            movement_list=self._geMovementGeneratorMovementList(), rounding=rounding)

    # Get divergence testers
    tester_list = self._getMatchingTesterList()
    if len(tester_list) == 0:
      raise ValueError("It is not possible to match movements without divergence testers")

    # Create small groups of movements per hash keys
    decision_movement_dict = {}
    for movement in decision_movement_list:
      tester_key = []
      for tester in tester_list:
        if tester.test(movement):
          tester_key.append(tester.generateHashKey(movement))
        else:
          tester_key.append(None)
      tester_key = tuple(tester_key)
      decision_movement_dict.setdefaults(tester_key, []).append(movement)
    prevision_movement_dict = {}
    for movement in prevision_movement_list:
      tester_key = []
      for tester in tester_list:
        if tester.test(movement):
          tester_key.append(tester.generateHashKey(movement))
        else:
          tester_key.append(None)
      tester_key = tuple(tester_key)
      prevision_movement_dict.setdefaults(tester_key, []).append(movement)

Jean-Paul Smets's avatar
Jean-Paul Smets committed
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
    # Prepare a mapping between prevision and decision
    #   The prevision_to_decision_map is a list of tuples
    #   of the form (prevision_movement_dict, list of decision_movement)
    prevision_to_decision_map = []

    # First find out all existing (decision) movements which belong to no group
    no_group_list = []
    for tester_key in decision_movement_dict.keys():
      if prevision_movement_dict.has_key(tester_key):
        for decision_movement in decision_movement_dict[tester_key]:
          no_match = True
          for prevision_movement in prevision_movement_dict[tester_key]:
            # Check if this movement belongs to an existing group
            if _compare(tester_list, prevision_movement, decision_movement):
              no_match = False
              break
          if no_match:
            # There is no matching. 
            # So, let us add the decision movements to no_group_list
            no_group_list.append(decision_movement)
      else:
        # The tester key does not even exist.
        # So, let us add all decision movements to no_group_list
        no_group_list.extend(decision_movement_dict[tester_key])
    prevision_to_decision_map.append((None, no_group_list))

    # Second, let us create small groups of movements
    for tester_key in prevision_movement_dict.keys():
      for prevision_movement in prevision_movement_dict[tester_key]:
        map_list = []
        for decision_movement in decision_movement_dict.get(tester_key, ()):
          if _compare(tester_list, prevision_movement, decision_movement):
            map_list.append(decision_movement)
        prevision_to_decision_map.append((prevision_movement, map_list))

    # Third, time to create the diff    
    movement_collection_diff = MovementCollectionDiff()
    for (prevision_movement, decision_movement_list) in prevision_to_decision_map:
      self._extendMovementCollectionDiff(movement_collection_diff, prevision_movement,
                                         decision_movement_list)
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

    # Return result
    return movement_collection_diff
                  
  def updateMovementCollection(self, context, rounding=False, movement_generator=None):
    """
    Invoke getMovementCollectionDiff and update context with 
    the resulting IMovementCollectionDiff.

    context -- an IMovementCollection usually, possibly
               an IMovementList or an IMovement

    movement_generator -- an optional IMovementGenerator
                          (if not specified, a context implicit 
                          IMovementGenerator will be used)
    """
    movement_diff = self.getMovementCollectionDiff(context, 
                 rounding=rounding, movement_generator=movement_generator)

    # Apply Diff
    for movement in movement_diff.getDeletableMovementList():
      movement.getParentValue().deleteContent(movement.getId())
    for movement in movement_diff.getUpdatableMovementList():
      kw = movement_diff.getMovementPropertyDict(movement)
      movement.edit(**kw)
    for movement in movement_diff.getNewMovementList():
      # This cas is easy, cause it is an applied rule
      kw = movement_diff.getMovementPropertyDict(movement)
      movement = context.newContent(portal_type='Simulation Movement')
      movement.edit(**kw)
      
  # Placeholder for methods to override
  def _geMovementGenerator(self):
    """
    Return the movement generator to use in the expand process
    """
    raise NotImplementedError

  def _geMovementGeneratorContext(self):
    """
    Return the movement generator context to use for expand
    """
    raise NotImplementedError

  def _geMovementGeneratorMovementList(self):
    """
    Return the movement lists to provide to the movement generator
    """
    raise NotImplementedError

Jean-Paul Smets's avatar
Jean-Paul Smets committed
234 235 236 237 238 239 240 241 242 243
  def _getDivergenceTesterList(self, exclude_quantity=True):
    """
    Return the applicable divergence testers which must 
    be used to test movement divergence.
 
    exclude_quantity -- if set to true, do not consider 
                        quantity divergence testers
    """
    raise NotImplementedError

244 245 246 247 248 249 250 251 252 253
  def _getMatchingTesterList(self):
    """
    Return the applicable divergence testers which must 
    be used to match movements and build the diff (ie.
    not all divergence testers of the Rule)
    """
    raise NotImplementedError

  def _getQuantityTesterList(self):
    """
Jean-Paul Smets's avatar
Jean-Paul Smets committed
254
    Return the applicable quantity divergence testers.
255 256 257
    """
    raise NotImplementedError

Jean-Paul Smets's avatar
Jean-Paul Smets committed
258
  def _newProfitAndLossMovement(self, prevision_movement):
259
    """
Jean-Paul Smets's avatar
Jean-Paul Smets committed
260 261 262
    Returns a new temp simulation movement which can
    be used to represent a profit or loss in relation
    with prevision_movement
263

Jean-Paul Smets's avatar
Jean-Paul Smets committed
264
    prevision_movement -- a simulation movement
265 266
    """

Jean-Paul Smets's avatar
Jean-Paul Smets committed
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
  def _extendMovementCollectionDiff(self, movement_collection_diff,
                                    prevision_movement, decision_movement_list):
    """
    Compares a prevision_movement to decision_movement_list which
    are part of the matching group and updates movement_collection_diff
    accordingly
    """
    raise NotImplementedError
    # Sample implementation - but it actually looks very generic
    # Case 1: movements which are not needed
    if prevision_movement is None:
      # decision_movement_list contains simulation movements which must
      # be deleted
      for decision_movement in decision_movement_list:
        if decision_movement.isDeletable(): # If not frozen and all children are deletable
          # Delete deletable
          movement_collection_diff.addDeletableMovement(decision_movement)
        else:
          # Compensate non deletable
          new_movement = decision_movement.asContext(quantity=-decision_movement.getQuantity())
          movement_collection_diff.addNewMovement(new_movement)
      return
    # Case 2: movements which are needed but may need update or compensation_movement_list
    #  let us imagine the case of a forward rule
    #  ie. what comes in must either go out or has been lost
    divergence_tester_list = self._getDivergenceTesterList()
    profit_tester_list = self._getDivergenceTesterList()
    quantity_tester_list = self._getQuantityTesterList()
    compensated_quantity = 0.0
    updatable_movement = None
    not_completed_movement = None
    updatable_compensation_movement = None
    prevision_quantity = prevision_movement.getQuantity()
    decision_quantity = 0.0
    # First, we update all properties (exc. quantity) which could be divergent
    # and if we can not, we compensate them
    for decision_movement in decision_movement_list:
      decision_quantity += decision_movement.getQuantity()
      if self._isProfitAndLossMovement(decision_movement):
        if decision_movement.isFrozen():
          # Record not completed movements
          if not_completed_movement is None and not decision_movement.isCompleted():
            not_completed_movement = decision_movement
          # Frozen must be compensated          
          if not _compare(profit_tester_list, prevision_movement, decision_movement):
            new_movement = decision_movement.asContext(quantity=-decision_movement.getQuantity())
            movement_collection_diff.addNewMovement(new_movement)
            compensated_quantity += decision_movement.getQuantity()
        else:
          updatable_compensation_movement = decision_movement
          # Not Frozen can be updated
          kw = {}
          for tester in profit_tester_list:
            if tester.compare(prevision_movement, decision_movement):
              kw.update(tester.getUpdatablePropertyDict(prevision_movement, decision_movement))
          if kw:
            movement_collection_diff.addUpdatableMovement(decision_movement, kw)
      else:
        if decision_movement.isFrozen():
          # Frozen must be compensated          
          if not _compare(divergence_tester_list, prevision_movement, decision_movement):
            new_movement = decision_movement.asContext(quantity=-decision_movement.getQuantity())
            movement_collection_diff.addNewMovement(new_movement)
            compensated_quantity += decision_movement.getQuantity()
        else:
          updatable_movement = decision_movement
          # Not Frozen can be updated
          kw = {}
          for tester in divergence_tester_list:
            if tester.compare(prevision_movement, decision_movement):
              kw.update(tester.getUpdatablePropertyDict(prevision_movement, decision_movement))
          if kw:
            movement_collection_diff.addUpdatableMovement(decision_movement, kw)
    # Second, we calculate if the total quantity is the same on both sides
    # after compensation
    quantity_movement = prevision_movement.asContext(quantity=decision_quantity-compensated_quantity)
    if not _compare(quantity_tester_list, prevision_movement, quantity_movement):
      missing_quantity = prevision_quantity - decision_quantity + compensated_quantity
      if updatable_movement is not None:
        # If an updatable movement still exists, we update it
        updatable_movement.setQuantity(updatable_movement.getQuantity() + missing_quantity)
      elif not_completed_movement is not None:
        # It is still possible to add a new movement some movements are not completed
        new_movement = prevision_movement.asContext(quantity=missing_quantity)
        movement_collection_diff.addNewMovement(new_movement)
      elif updatable_compensation_movement is not None:
        # If not, it means that all movements are completed
        # but we can still update a profit and loss movement_collection_diff
        updatable_compensation_movement.setQuantity(updatable_compensation_movement.getQuantity() 
                                                  + missing_quantity)
357
      else:
Jean-Paul Smets's avatar
Jean-Paul Smets committed
358 359 360
        # We must create a profit and loss movement
        new_movement = self._newProfitAndLossMovement(prevision_movement)
        movement_collection_diff.addNewMovement(new_movement)