Commit 88a19a6e authored by Kazuhiko Shiozaki's avatar Kazuhiko Shiozaki

* add repeat_interval and batch_mode argument just same as newContentFromURL().

* remove needless duplicate code.


git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk@42006 20353a03-c40f-0410-a6d1-a30d3c3de9de
parent 4a033798
......@@ -758,7 +758,8 @@ class Document(DocumentExtensibleTraversableMixin, XMLObject, UrlMixin,
if method is not None: method()
security.declareProtected(Permissions.ModifyPortalContent, 'updateContentFromURL')
def updateContentFromURL(self, repeat=MAX_REPEAT, crawling_depth=0):
def updateContentFromURL(self, repeat=MAX_REPEAT, crawling_depth=0,
repeat_interval=1, batch_mode=True):
"""
Download and update content of this document from its source URL.
Implementation is handled by ContributionTool.
......
......@@ -541,7 +541,8 @@ class ContributionTool(BaseTool):
url_registry_tool.registerURL(url, None, context=container)
security.declareProtected(Permissions.AddPortalContent, 'updateContentFromURL')
def updateContentFromURL(self, content, repeat=MAX_REPEAT, crawling_depth=0):
def updateContentFromURL(self, content, repeat=MAX_REPEAT, crawling_depth=0,
repeat_interval=1, batch_mode=True):
"""
Updates an existing content.
"""
......@@ -555,20 +556,11 @@ class ContributionTool(BaseTool):
try:
url = content.asURL()
file_object, filename, content_type = self._openURL(url)
except urllib2.HTTPError, error:
if repeat == 0:
# XXX - Call the extendBadURLList method,--NOT Implemented--
# IDEA : ajouter l'url en question dans une list "bad_url_list" puis lors du crawling au lieu que de boucler sur
# la liste des url extraites de la page web on fait un test supplementaire qui verifie que l'url n'est pas
# dans la liste bad_url_lis
raise
content.activate(at_date=DateTime() + 1).updateContentFromURL(repeat=repeat - 1)
return
except urllib2.URLError, error:
if repeat == 0:
if repeat == 0 or not batch_mode:
# XXX - Call the extendBadURLList method,--NOT Implemented--
raise
content.activate(at_date=DateTime() + 1).updateContentFromURL(repeat=repeat - 1)
content.activate(at_date=DateTime() + repeat_interval).updateContentFromURL(repeat=repeat - 1)
return
content._edit(file=file_object, content_type=content_type)
......
......@@ -94,7 +94,8 @@ class IUploadable(Interface):
passed to IConvertable.convert or to IDownloadable.index_html
"""
def updateContentFromURL(url=None, repeat=MAX_REPEAT, crawling_depth=0):
def updateContentFromURL(url=None, repeat=MAX_REPEAT, crawling_depth=0,
repeat_interval=1, batch_mode=True):
"""
Download and update content of this document from the specified URL.
If no url is specified, Document which support the IUrlGetter
......@@ -103,11 +104,15 @@ class IUploadable(Interface):
url -- optional URL to download the updated content from.
required whenever document does not implement IUrlGetter
repeat -- optional max number of retries for download
crawling_depth -- optional crawling depth for documents which
implement ICrawlable
repeat -- optional max number of retries for download
repeat_interval -- optional interval between repeats
batch_mode -- optional specify False if used in a user interface
NOTE: implementation is normally delegated to ContributionTool.
XXX - it is unclear whether MAX_REPEAT should be part of signature
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment