=+)\s.*\s(?P=hmarker))$"
+
++
+ def extract_titles(body, title_re):
+ titles = []
+ for title, _ in title_re.findall(body):
+ h = title.strip()
+ level = 1
+- while h[level:level+1] == '=':
++ while h[level:level + 1] == '=':
+ level += 1
+ title_text = h[level:-level].strip()
+ titles.append((title_text, level))
+ return titles
+
++
+ def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M)):
+ request = macro.request
+ _ = request.getText
+@@ -63,7 +73,7 @@
+ # parse and check arguments
+ args = text and args_re.match(text)
+ if not args:
+- return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text, ))
++ return render_error(_('Invalid include arguments "%s"!') % (text,))
+
+ # prepare including page
+ result = []
+@@ -79,11 +89,22 @@
+ try:
+ inc_match = re.compile(inc_name)
+ except re.error:
+- pass # treat as plain page name
++ pass # treat as plain page name
+ else:
+ # Get user filtered readable page list
+ pagelist = request.rootpage.getPageList(filter=inc_match.match)
+
++ specific_page = not inc_name.startswith("^")
++
++ rev = args.group("rev")
++ if specific_page and rev is not None:
++ try:
++ rev = int(rev)
++ except (ValueError, UnicodeDecodeError):
++ rev = None
++ else:
++ rev = None
++
+ # sort and limit page list
+ pagelist.sort()
+ sort_dir = args.group('sort')
+@@ -103,36 +124,48 @@
+ for inc_name in pagelist:
+ if not request.user.may.read(inc_name):
+ continue
++
+ if inc_name in this_page._macroInclude_pagelist:
+- result.append(u'Recursive include of "%s" forbidden
' % (inc_name, ))
++ result.append(render_error(_('Recursive include of "%s" forbidden!') % (inc_name,)))
+ continue
+- if skipitems:
++
++ if skipitems > 0:
+ skipitems -= 1
+ continue
++
+ fmt = macro.formatter.__class__(request, is_included=True)
+ fmt._base_depth = macro.formatter._base_depth
+- inc_page = Page(request, inc_name, formatter=fmt)
+- if not inc_page.exists():
+- continue
++
++ if specific_page and rev is not None:
++ inc_page = Page(request, inc_name, formatter=fmt, rev=rev)
++ else:
++ inc_page = Page(request, inc_name, formatter=fmt)
++
+ inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist
+
++ page_exists = inc_page.exists()
++
+ # check for "from" and "to" arguments (allowing partial includes)
+- body = inc_page.get_raw_body() + '\n'
++ if page_exists:
++ body = inc_page.get_raw_body() + '\n'
++ else:
++ body = ""
++
+ from_pos = 0
+ to_pos = -1
+ from_re = args.group('from')
+- if from_re:
++ if page_exists and from_re:
+ try:
+ from_match = re.compile(from_re, re.M).search(body)
+ except re.error:
+- ##result.append("*** fe=%s ***" % e)
+ from_match = re.compile(re.escape(from_re), re.M).search(body)
+ if from_match:
+ from_pos = from_match.end()
+ else:
+- result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
++ result.append(render_warning(_('Include: Nothing found for "%s"!') % from_re))
++
+ to_re = args.group('to')
+- if to_re:
++ if page_exists and to_re:
+ try:
+ to_match = re.compile(to_re, re.M).search(body, from_pos)
+ except re.error:
+@@ -140,7 +173,7 @@
+ if to_match:
+ to_pos = to_match.start()
+ else:
+- result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)
++ result.append(render_warning(_('Include: Nothing found for "%s"!') % to_re))
+
+ if titlesonly:
+ levelstack = []
+@@ -169,8 +202,6 @@
+
+ if from_pos or to_pos != -1:
+ inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
+- ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
+- ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))
+
+ if not hasattr(request, "_Include_backto"):
+ request._Include_backto = this_page.page_name
+@@ -204,9 +235,14 @@
+ strfile = StringIO.StringIO()
+ request.redirect(strfile)
+ try:
++ request.write(
++ request.formatter.div(True,
++ css_class='gwikiinclude',
++ id=id_escape(inc_name) + SEPARATOR))
+ inc_page.send_page(content_only=True,
+ omit_footnotes=True,
+ count_hit=False)
++ request.write(request.formatter.div(False))
+ result.append(strfile.getvalue())
+ finally:
+ request.redirect()
+@@ -218,17 +254,49 @@
+ else:
+ del this_page._macroInclude_pagelist[inc_name]
+
++ template = args.group("template")
++
+ # if no heading and not in print mode, then output a helper link
+ if editlink and not (level or print_mode):
+- result.extend([
+- macro.formatter.div(1, css_class="include-link"),
+- inc_page.link_to(request, '[%s]' % (inc_name, ), css_class="include-page-link"),
+- inc_page.link_to(request, '[%s]' % (_('edit'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
+- macro.formatter.div(0),
+- ])
++ result.append(macro.formatter.div(1, css_class="include-link"))
++
++ if specific_page and not page_exists:
++ result.append("[%s]" % (inc_name,))
++ if template:
++ result.append(inc_page.link_to(request, '[%s]' % (_('create'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto, 'template': template}))
++ else:
++ out = wr('\n')
++ result.append(out)
++ elif specific_page and rev is not None:
++ result.extend([
++ inc_page.link_to(request, '[%s revision %d]' % (inc_name, rev), querystr={"action": "recall", "rev": str(rev)}, css_class="include-page-link"),
++ inc_page.link_to(request, '[%s]' % (_('edit current version'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
++ ])
++ else:
++ result.extend([
++ inc_page.link_to(request, '[%s]' % (inc_name, ), css_class="include-page-link"),
++ inc_page.link_to(request, '[%s]' % (_('edit'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
++ ])
++
++ result.append(macro.formatter.div(0))
++
+ # XXX page.link_to is wrong now, it escapes the edit_icon html as it escapes normal text
+
+ # return include text
+ return ''.join(result)
+-
+-# vim:ts=4:sw=4:et
+--- ./MoinMoin/packages.py.orig 2014-10-17 22:45:33.000000000 +0300
++++ ./MoinMoin/packages.py 2014-10-20 11:53:32.873284965 +0300
+@@ -529,6 +529,12 @@
+
+ def main():
+ args = sys.argv
++
++ myusername=''
++ if (len(args) > 1) and (args[1] == '-u'):
++ args.pop(1)
++ myusername = args.pop(1)
++
+ if len(args)-1 not in (2, 3) or args[1] not in ('l', 'i'):
+ print >> sys.stderr, """MoinMoin Package Installer v%(version)i
+
+@@ -555,6 +561,8 @@
+ # Setup MoinMoin environment
+ from MoinMoin.web.contexts import ScriptContext
+ request = ScriptContext(url=request_url)
++ if myusername:
++ request.user = user.User(request, auth_username=myusername)
+
+ package = ZipPackage(request, packagefile)
+ if not package.isPackage():
+--- ./MoinMoin/PageEditor.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/PageEditor.py 2014-10-20 11:53:32.880284974 +0300
+@@ -17,7 +17,7 @@
+ """
+
+ import os, time, codecs, errno
+-
++import unicodedata
+
+ from MoinMoin import caching, config, wikiutil, error
+ from MoinMoin.Page import Page
+@@ -54,6 +54,17 @@
+
+ """
+
++#############################################################################
++### Filtering unprintable characters from page content
++#############################################################################
++
++ALLOWED_CONTROL_CHARS = '\t\n\r'
++
++def filter_unprintable(text):
++ return ''.join(x for x in text
++ if (not unicodedata.category(x) in ['Cc', 'Cn', 'Cs']
++ or x in ALLOWED_CONTROL_CHARS))
++
+
+ #############################################################################
+ ### PageEditor - Edit pages
+@@ -1066,6 +1077,26 @@
+ """
+ request = self.request
+ _ = self._
++
++ # Depending on the configuration, filter unprintable
++ # characters from text content or warn of them. Unprintable
++ # characters are often undesired, and result from
++ # eg. copy-pasting text from productivity tools.
++ _handle_unprintable = getattr(self.request.cfg,
++ 'gwiki_handle_unprintable', '')
++ if _handle_unprintable in ['warn', 'filter']:
++ _newtext = filter_unprintable(newtext)
++ if _handle_unprintable == 'filter':
++ newtext = _newtext
++ elif _newtext != newtext:
++ _pos = 0
++ for i in len(_newtext):
++ _pos = i
++ if _newtext[i] != newtext[i]:
++ break
++ raise self.SaveError(_("Bad character in text at position %s.")%
++ (_pos))
++
+ self._save_draft(newtext, rev, **kw)
+ action = kw.get('action', 'SAVE')
+ deleted = kw.get('deleted', False)
+--- ./MoinMoin/Page.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/Page.py 2016-02-05 20:20:23.598923780 +0200
+@@ -108,8 +108,7 @@ class ItemCache:
+ (for 'meta') or the complete cache ('pagelists').
+ @param request: the request object
+ """
+- from MoinMoin.logfile import editlog
+- elog = editlog.EditLog(request)
++ elog = request.editlog
+ old_pos = self.log_pos
+ new_pos, items = elog.news(old_pos)
+ if items:
+@@ -626,7 +625,12 @@ class Page(object):
+ """
+ return self.exists(domain='standard', includeDeleted=includeDeleted)
+
+- def exists(self, rev=0, domain=None, includeDeleted=False):
++ def _in_backend(self):
++ if self.page_name in self.request.graphdata:
++ return self.request.graphdata.is_saved(self.page_name)
++ return 0
++
++ def exists(self, rev=0, domain=None, includeDeleted=False, includeBackend=True):
+ """ Does this page exist?
+
+ This is the lower level method for checking page existence. Use
+@@ -656,6 +660,12 @@ class Page(object):
+ return True
+ return False
+ else:
++ # If it's in the backend, it exists
++ if self._in_backend():
++ return True
++ elif includeBackend:
++ return False
++
+ # Look for non-deleted pages only, using get_rev
+ if not rev and self.rev:
+ rev = self.rev
+@@ -789,13 +799,20 @@ class Page(object):
+ @rtype: string
+ @return: formatted link
+ """
++ # Optimising closing of links
++ if kw.get('on', None) == 0:
++ formatter=getattr(self, 'formatter', None)
++ if formatter:
++ return formatter.url(0, '', None)
++
+ if not text:
+ text = self.split_title()
+ text = wikiutil.escape(text)
+
+- # Add css class for non existing page
+- if not self.exists():
+- kw['css_class'] = 'nonexistent'
++ # Add css class for non existing page (if not done by formatter.pagelink)
++ if not kw.has_key('css_class'):
++ if not self.exists():
++ kw['css_class'] = 'nonexistent'
+
+ attachment_indicator = kw.get('attachment_indicator')
+ if attachment_indicator is None:
+@@ -1826,7 +1843,7 @@ class RootPage(Page):
+
+ return underlay, path
+
+- def getPageList(self, user=None, exists=1, filter=None, include_underlay=True, return_objects=False):
++ def getPageList(self, user=None, exists=1, filter=None, include_underlay=True, return_objects=False, includeBackend=True):
+ """ List user readable pages under current page
+
+ Currently only request.rootpage is used to list pages, but if we
+@@ -1895,7 +1912,7 @@ class RootPage(Page):
+ continue
+
+ # Filter deleted pages
+- if exists and not page.exists():
++ if exists and not page.exists(includeBackend=includeBackend):
+ continue
+
+ # Filter out page user may not read.
+--- ./MoinMoin/parser/text_moin_wiki.py.orig 2014-10-17 22:45:33.000000000 +0300
++++ ./MoinMoin/parser/text_moin_wiki.py 2015-04-29 14:40:41.284018265 +0300
+@@ -728,8 +728,12 @@ class Parser:
+ if scheme == 'attachment':
+ mt = wikiutil.MimeType(filename=url)
+ if mt.major == 'text':
+- desc = self._transclude_description(desc, url)
+- return self.formatter.attachment_inlined(url, desc)
++ if mt.minor == 'csv':
++ desc = self._transclude_description(desc, url)
++ return self.formatter.attachment_inlined(url, desc, format_args=params)
++ else:
++ desc = self._transclude_description(desc, url)
++ return self.formatter.attachment_inlined(url, desc)
+ # destinguishs if browser need a plugin in place
+ elif mt.major == 'image' and mt.minor in config.browser_supported_images:
+ desc = self._transclude_description(desc, url)
+@@ -873,9 +877,10 @@ class Parser:
+ tag_attrs, query_args = self._get_params(params,
+ tag_attrs={},
+ acceptable_attrs=acceptable_attrs)
+- return (self.formatter.pagelink(1, abs_page_name, anchor=anchor, querystr=query_args, **tag_attrs) +
++ page = Page(self.request, abs_page_name, formatter=self.formatter)
++ return (self.formatter.pagelink(1, abs_page_name, page=page, anchor=anchor, querystr=query_args, **tag_attrs) +
+ self._link_description(desc, target, page_name_and_anchor) +
+- self.formatter.pagelink(0, abs_page_name))
++ self.formatter.pagelink(0, abs_page_name, page=page))
+ else: # interwiki link
+ page_name, anchor = wikiutil.split_anchor(page_name)
+ tag_attrs, query_args = self._get_params(params,
+--- ./MoinMoin/support/werkzeug/formparser.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/support/werkzeug/formparser.py 2014-10-20 11:53:32.882284972 +0300
+@@ -33,13 +33,50 @@
+ #: for multipart messages.
+ _supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
+
++class SmartStream(object):
++ """A file-like stream that dynamically switches from memory-based
++ to file-based storage when the total amount of data is larger
++ than 500 kilobytes."""
++
++ def __init__(self, threshold=1024*500):
++ self._is_file = False
++ self._threshold = threshold
++ self._stream = StringIO()
++
++ def __getattr__(self, key):
++ return getattr(self._stream, key)
++
++ def _check(self):
++ if self._is_file:
++ return
++
++ pos = self._stream.tell()
++ if pos <= self._threshold:
++ return
++
++ stream = TemporaryFile('wb+')
++ stream.write(self._stream.getvalue())
++ stream.flush()
++ stream.seek(pos)
++
++ self._stream.close()
++ self._stream = stream
++ self._is_file = True
++
++ def write(self, *args, **kw):
++ result = self._stream.write(*args, **kw)
++ self._check()
++ return result
++
++ def writelines(self, *args, **kw):
++ result = self._stream.writelines(*args, **kw)
++ self._check()
++ return result
+
+ def default_stream_factory(total_content_length, filename, content_type,
+ content_length=None):
+ """The stream factory that is used per default."""
+- if total_content_length > 1024 * 500:
+- return TemporaryFile('wb+')
+- return StringIO()
++ return SmartStream()
+
+
+ def parse_form_data(environ, stream_factory=None, charset='utf-8',
+--- ./MoinMoin/theme/__init__.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/theme/__init__.py 2014-10-20 11:53:32.884284973 +0300
+@@ -48,6 +48,7 @@
+ 'diff': (_("Diffs"), "moin-diff.png", 15, 11),
+ 'info': (_("Info"), "moin-info.png", 12, 11),
+ 'edit': (_("Edit"), "moin-edit.png", 12, 12),
++ 'formedit': (_("FormEdit"), "moin-news.png", 12, 12),
+ 'unsubscribe': (_("Unsubscribe"), "moin-unsubscribe.png", 14, 10),
+ 'subscribe': (_("Subscribe"), "moin-subscribe.png", 14, 10),
+ 'raw': (_("Raw"), "moin-raw.png", 12, 13),
+--- ./MoinMoin/theme/modernized.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/theme/modernized.py 2014-10-20 11:53:32.885284974 +0300
+@@ -20,6 +20,8 @@
+ # FileAttach
+ 'attach': ("%(attach_count)s", "moin-attach.png", 16, 16),
+ 'info': ("[INFO]", "moin-info.png", 16, 16),
++ 'edit': (_("Edit"), "moin-edit.png", 12, 12),
++ 'formedit': (_("FormEdit"), "moin-news.png", 12, 12),
+ 'attachimg': (_("[ATTACH]"), "attach.png", 32, 32),
+ # RecentChanges
+ 'rss': (_("[RSS]"), "moin-rss.png", 16, 16),
+--- ./MoinMoin/user.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/user.py 2014-10-20 11:53:32.887284976 +0300
+@@ -23,6 +23,9 @@
+ import os, time, codecs, base64
+ from copy import deepcopy
+ import md5crypt
++import errno
++import error
++import uuid
+
+ try:
+ import crypt
+@@ -36,13 +39,15 @@
+
+ from MoinMoin import config, caching, wikiutil, i18n, events
+ from werkzeug.security import safe_str_cmp as safe_str_equal
+-from MoinMoin.util import timefuncs, random_string
++from MoinMoin.util import timefuncs, random_string, filesys
+ from MoinMoin.wikiutil import url_quote_plus
+
+ # for efficient lookup -> userid, we keep an index of this in the cache.
+ # the attribute names in here should be uniquely identifying a user.
+ CACHED_USER_ATTRS = ['name', 'email', 'jid', 'openids', ]
+
++class SaveError(error.Error):
++ pass
+
+ def getUserList(request):
+ """ Get a list of all (numerical) user IDs.
+@@ -288,6 +293,10 @@
+ hash = hash_new('sha1', pwd)
+ hash.update(salt)
+ return '{SSHA}' + base64.encodestring(hash.digest() + salt).rstrip()
++ elif scheme == '{SHA}':
++ pwd = pwd.encode('utf-8')
++ hash = hash_new('sha1', pwd)
++ return '{SHA}' + base64.encodestring(hash.digest()).rstrip()
+ else:
+ # should never happen as we check the value of cfg.password_scheme
+ raise NotImplementedError
+@@ -496,7 +505,7 @@
+ self.subscribed_pages = self._cfg.subscribed_pages_default
+ self.email_subscribed_events = self._cfg.email_subscribed_events_default
+ self.jabber_subscribed_events = self._cfg.jabber_subscribed_events_default
+- self.theme_name = self._cfg.theme_default
++ self.theme_name = ''
+ self.editor_default = self._cfg.editor_default
+ self.editor_ui = self._cfg.editor_ui
+ self.last_saved = str(time.time())
+@@ -562,6 +571,10 @@
+ """
+ return os.path.join(self._cfg.user_dir, self.id or "...NONE...")
+
++ # Support for administrative scripts and tasks
++ def getFilename(self):
++ return self.__filename()
++
+ def exists(self):
+ """ Do we have a user account for this user?
+
+@@ -778,25 +791,48 @@
+ # !!! should write to a temp file here to avoid race conditions,
+ # or even better, use locking
+
+- data = codecs.open(self.__filename(), "w", config.charset)
+- data.write("# Data saved '%s' for id '%s'\n" % (
+- time.strftime(self._cfg.datetime_fmt, time.localtime(time.time())),
+- self.id))
+- attrs = self.persistent_items()
+- attrs.sort()
+- for key, value in attrs:
+- # Encode list values
+- if isinstance(value, list):
+- key += '[]'
+- value = encodeList(value)
+- # Encode dict values
+- elif isinstance(value, dict):
+- key += '{}'
+- value = encodeDict(value)
+- line = u"%s=%s" % (key, unicode(value))
+- line = line.replace('\n', ' ').replace('\r', ' ') # no lineseps
+- data.write(line + '\n')
+- data.close()
++ temp = file(os.path.join(user_dir, 'temp-' + uuid.uuid4().get_hex()), 'w')
++ try:
++ data = codecs.getwriter(config.charset)(temp)
++ data.write("# Data saved '%s' for id '%s'\n" % (
++ time.strftime(self._cfg.datetime_fmt,
++ time.localtime(time.time())),
++ self.id))
++ attrs = self.persistent_items()
++ attrs.sort()
++ for key, value in attrs:
++ # Encode list values
++ if isinstance(value, list):
++ key += '[]'
++ value = encodeList(value)
++ # Encode dict values
++ elif isinstance(value, dict):
++ key += '{}'
++ value = encodeDict(value)
++ line = u"%s=%s" % (key, unicode(value))
++ line = line.replace('\n', ' ').replace('\r', ' ') # no lineseps
++ data.write(line + '\n')
++
++ # atomically put it in place (except on windows)
++ filesys.rename(temp.name, self.__filename())
++ except IOError as err:
++ _ = self._request.getText
++ # throw a nicer exception
++ if err.errno == errno.ENOSPC:
++ raise SaveError(
++ _("Cannot save user %s, no storage space left.") %
++ self.name)
++ else:
++ raise SaveError(
++ _("An I/O error occurred while saving user %s (errno=%d)")\
++ % (self.name, err.errno))
++ finally:
++ try:
++ os.remove(temp.name)
++ except:
++ pass # we don't care for errors in the os.remove
++ finally:
++ temp.close()
+
+ if not self.disabled:
+ self.valid = 1
+--- ./MoinMoin/util/filesys.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/util/filesys.py 2014-10-20 11:53:32.888284976 +0300
+@@ -217,7 +217,6 @@
+ """
+ names = os.listdir(src)
+ os.mkdir(dst)
+- copystat(src, dst)
+ errors = []
+ for name in names:
+ srcname = os.path.join(src, name)
+--- ./MoinMoin/web/contexts.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/web/contexts.py 2014-10-20 11:53:32.889284977 +0300
+@@ -218,6 +218,12 @@
+
+ # proxy further attribute lookups to the underlying request first
+ def __getattr__(self, name):
++ if name == 'editlog':
++ if "editlog" not in self.__dict__:
++ from MoinMoin.logfile import editlog
++ self.request.rootpage = self.rootpage
++ self.editlog = editlog.EditLog(self.request)
++ return self.editlog
+ try:
+ return getattr(self.request, name)
+ except AttributeError, e:
+--- ./MoinMoin/xmlrpc/__init__.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/xmlrpc/__init__.py 2014-10-20 11:53:32.891284977 +0300
+@@ -38,6 +38,32 @@
+ from MoinMoin.action import AttachFile
+ from MoinMoin import caching
+
++def is_login_required(request):
++ login_required = True
++ env = request.environ
++
++ from MoinMoin.auth import GivenAuth
++ from MoinMoin.auth.sslclientcert import SSLClientCertAuth
++
++ # Get all the authentication methods used in the config
++ auth = getattr(request.cfg, 'auth', [])
++
++ for method in auth:
++ # If we're using HTTP auth, and the server has authenticated
++ # the user successfully, do not require another login
++ if isinstance(method, GivenAuth):
++ if env.get('REMOTE_USER', ''):
++ login_required = False
++ break
++ # If we're using SSL client certificate auth, and the server
++ # has authenticated the user successfully, do not require
++ # another login
++ elif isinstance(method, SSLClientCertAuth):
++ if env.get('SSL_CLIENT_VERIFY', 'FAILURE') == 'SUCCESS':
++ login_required = False
++ break
++
++ return login_required
+
+ logging_tearline = '- XMLRPC %s ' + '-' * 40
+
+@@ -132,7 +158,12 @@
+ else:
+ # overwrite any user there might be, if you need a valid user for
+ # xmlrpc, you have to use multicall and getAuthToken / applyAuthToken
+- request.user = user.User(request, auth_method='xmlrpc:invalid')
++ login_required = is_login_required(self.request)
++ if (not self.request.user or
++ not self.request.user.valid or
++ login_required):
++ self.request.user = user.User(self.request,
++ auth_method='xmlrpc:invalid')
+
+ data = request.read()
+
+@@ -767,7 +798,14 @@
+ request.session = request.cfg.session_service.get_session(request)
+
+ u = auth.setup_from_session(request, request.session)
+- u = auth.handle_login(request, u, username=username, password=password)
++
++ login_required = is_login_required(request)
++
++ if login_required:
++ u = auth.handle_login(request, u, username=username,
++ password=password)
++ else:
++ u = request.user
+
+ if u and u.valid:
+ request.user = u
+--- ./MoinMoin/action/newaccount.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/action/newaccount.py 2014-10-20 12:06:36.348542933 +0300
+@@ -31,7 +31,8 @@
+
+ # Require non-empty name
+ try:
+- theuser.name = form['name']
++ name = wikiutil.clean_input(form.get('email', ['']))
++ theuser.name = name.strip()
+ except KeyError:
+ return _("Empty user name. Please enter a user name.")
+
+@@ -104,12 +105,9 @@
+
+ row = html.TR()
+ tbl.append(row)
+- row.append(html.TD().append(html.STRONG().append(
+- html.Text(_("Name")))))
+- cell = html.TD()
+- row.append(cell)
+- cell.append(html.INPUT(type="text", size="36", name="name"))
+- cell.append(html.Text(' ' + _("(Use FirstnameLastname)")))
++ row.append(html.TD().append(html.STRONG().append(html.Text(_("Email")))))
++ row.append(html.TD().append(html.INPUT(type="text", size="36",
++ name="email")))
+
+ row = html.TR()
+ tbl.append(row)
+@@ -125,12 +123,6 @@
+ row.append(html.TD().append(html.INPUT(type="password", size="36",
+ name="password2")))
+
+- row = html.TR()
+- tbl.append(row)
+- row.append(html.TD().append(html.STRONG().append(html.Text(_("Email")))))
+- row.append(html.TD().append(html.INPUT(type="text", size="36",
+- name="email")))
+-
+ textcha = TextCha(request)
+ if textcha.is_enabled():
+ row = html.TR()
+@@ -159,7 +151,7 @@
+ found = True
+ break
+
+- if not found:
++ if not found and False:
+ # we will not have linked, so forbid access
+ request.makeForbidden(403, 'No MoinAuth in auth list')
+ return
+--- ./MoinMoin/action/recoverpass.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/action/recoverpass.py 2014-10-20 12:06:36.379542936 +0300
+@@ -70,15 +70,9 @@
+
+ row = html.TR()
+ tbl.append(row)
+- row.append(html.TD().append(html.STRONG().append(html.Text(_("Username")))))
+- row.append(html.TD().append(html.INPUT(type="text", size="36",
+- name="name")))
+-
+- row = html.TR()
+- tbl.append(row)
+ row.append(html.TD().append(html.STRONG().append(html.Text(_("Email")))))
+ row.append(html.TD().append(html.INPUT(type="text", size="36",
+- name="email")))
++ name="name")))
+
+ row = html.TR()
+ tbl.append(row)
+@@ -111,7 +105,7 @@
+
+ row = html.TR()
+ tbl.append(row)
+- row.append(html.TD().append(html.STRONG().append(html.Text(_("Username")))))
++ row.append(html.TD().append(html.STRONG().append(html.Text(_("Email")))))
+ value = name or ''
+ row.append(html.TD().append(html.INPUT(type='text', size="36",
+ name="name", value=value)))
+--- ./MoinMoin/config/multiconfig.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/config/multiconfig.py 2016-08-15 22:34:12.813289705 +0300
+@@ -12,6 +12,7 @@ import re
+ import os
+ import sys
+ import time
++import imp
+
+ from MoinMoin import log
+ logging = log.getLogger(__name__)
+@@ -34,6 +35,25 @@ _farmconfig_mtime = None
+ _config_cache = {}
+
+
++def _findConfigModule(name):
++ """ Try to find config module or raise ImportError
++
++ Return first module that is a single file, skipping packages with
++ colliding names.
++ """
++ for path in sys.path:
++ if not path:
++ continue
++ try:
++ fp, pathname, description = imp.find_module(name, [path])
++ if not fp:
++ continue
++ return fp, pathname, description
++ except ImportError:
++ continue
++ raise ImportError('No module named %s' % name)
++
++
+ def _importConfigModule(name):
+ """ Import and return configuration module and its modification time
+
+@@ -45,7 +65,8 @@ def _importConfigModule(name):
+ @return: module, modification time
+ """
+ try:
+- module = __import__(name, globals(), {})
++ fp, pathname, description = _findConfigModule(name)
++ module = imp.load_module(name, fp, pathname, description)
+ mtime = os.path.getmtime(module.__file__)
+ except ImportError:
+ raise
+--- ./MoinMoin/macro/RecentChanges.py.orig 2014-10-17 22:45:33.000000000 +0300
++++ ./MoinMoin/macro/RecentChanges.py 2014-10-20 12:06:36.381542941 +0300
+@@ -110,8 +110,12 @@
+ if request.cfg.show_names:
+ if len(lines) > 1:
+ counters = {}
++ editorcache = {}
+ for idx in range(len(lines)):
+- name = lines[idx].getEditor(request)
++ editorkey = lines[idx].addr, lines[idx].hostname, lines[idx].userid
++ if editorkey not in editorcache:
++ editorcache[editorkey] = line.getEditor(request)
++ name = editorcache[editorkey]
+ if not name in counters:
+ counters[name] = []
+ counters[name].append(idx+1)
+@@ -228,6 +232,9 @@
+ output.append(request.theme.recentchanges_footer(d))
+ return ''.join(output)
+
++def filter_pages(request, pages):
++ readable = request.user.may.read
++ return filter(lambda lines: readable(lines[0].pagename), pages)
+
+ def macro_RecentChanges(macro, abandoned=False):
+ # handle abandoned keyword
+@@ -291,37 +298,36 @@
+ day_count = 0
+
+ for line in log.reverse():
+-
+- if not request.user.may.read(line.pagename):
+- continue
+-
+ line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
+ day = line.time_tuple[0:3]
+ hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)
+
+- if ((this_day != day or (not hilite and not max_days))) and len(pages) > 0:
++ if this_day != day or (not hilite and not max_days):
+ # new day or bookmark reached: print out stuff
+ this_day = day
+ for p in pages:
+ ignore_pages[p] = None
+- pages = pages.values()
++ pages = filter_pages(request, pages.values())
+ pages.sort(cmp_lines)
+ pages.reverse()
+
+- if request.user.valid:
+- bmtime = pages[0][0].ed_time_usecs
+- d['bookmark_link_html'] = page.link_to(request, _("Set bookmark"), querystr={'action': 'bookmark', 'time': '%d' % bmtime}, rel='nofollow')
+- else:
+- d['bookmark_link_html'] = None
+- d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs))
+- output.append(request.theme.recentchanges_daybreak(d))
++ if len(pages) > 0:
++ if request.user.valid:
++ bmtime = pages[0][0].ed_time_usecs
++ d['bookmark_link_html'] = page.link_to(request, _("Set bookmark"), querystr={'action': 'bookmark', 'time': '%d' % bmtime}, rel='nofollow')
++ else:
++ d['bookmark_link_html'] = None
++ d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs))
++ output.append(request.theme.recentchanges_daybreak(d))
++
++ for p in pages:
++ output.append(format_page_edits(macro, p, bookmark_usecs))
++
++ day_count += 1
++ if max_days and (day_count >= max_days):
++ break
+
+- for p in pages:
+- output.append(format_page_edits(macro, p, bookmark_usecs))
+ pages = {}
+- day_count += 1
+- if max_days and (day_count >= max_days):
+- break
+
+ elif this_day != day:
+ # new day but no changes
+@@ -340,16 +346,16 @@
+ else:
+ pages[line.pagename] = [line]
+ else:
+- if len(pages) > 0:
+- # end of loop reached: print out stuff
+- # XXX duplicated code from above
+- # but above does not trigger if we have the first day in wiki history
+- for p in pages:
+- ignore_pages[p] = None
+- pages = pages.values()
+- pages.sort(cmp_lines)
+- pages.reverse()
++ # end of loop reached: print out stuff
++ # XXX duplicated code from above
++ # but above does not trigger if we have the first day in wiki history
++ for p in pages:
++ ignore_pages[p] = None
++ pages = filter_pages(request, pages.values())
++ pages.sort(cmp_lines)
++ pages.reverse()
+
++ if len(pages) > 0:
+ if request.user.valid:
+ bmtime = pages[0][0].ed_time_usecs
+ d['bookmark_link_html'] = page.link_to(request, _("Set bookmark"), querystr={'action': 'bookmark', 'time': '%d' % bmtime}, rel='nofollow')
+--- ./MoinMoin/wikiutil.py.orig 2014-10-17 22:45:32.000000000 +0300
++++ ./MoinMoin/wikiutil.py 2014-10-20 12:06:36.382542942 +0300
+@@ -471,15 +471,15 @@
+ generate_file_list(request)
+
+ try:
+- _interwiki_list = request.cfg.cache.interwiki_list
+- old_mtime = request.cfg.cache.interwiki_mtime
+- if request.cfg.cache.interwiki_ts + (1*60) < now: # 1 minutes caching time
++ _interwiki_list = request.cfg.cache.interwiki_list[request.user.id]
++ old_mtime = request.cfg.cache.interwiki_mtime[request.user.id]
++ if request.cfg.cache.interwiki_ts[request.user.id] + (1*60) < now: # 1 minutes caching time
+ max_mtime = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
+ if max_mtime > old_mtime:
+ raise AttributeError # refresh cache
+ else:
+- request.cfg.cache.interwiki_ts = now
+- except AttributeError:
++ request.cfg.cache.interwiki_ts[request.user.id] = now
++ except (AttributeError, KeyError):
+ _interwiki_list = {}
+ lines = []
+
+@@ -509,10 +509,28 @@
+ if request.cfg.interwikiname:
+ _interwiki_list[request.cfg.interwikiname] = request.script_root + '/'
+
++ # collab list
++ if hasattr(request.cfg, 'collab_basedir'):
++ from collabbackend import listCollabs
++ user = request.user.name
++ active = request.cfg.interwikiname
++ path = request.cfg.collab_basedir
++ baseurl = request.cfg.collab_baseurl
++ collablist = listCollabs(baseurl, user, path, active)
++
++ for collab in collablist:
++ _interwiki_list[collab[0]] = collab[3]
++
+ # save for later
+- request.cfg.cache.interwiki_list = _interwiki_list
+- request.cfg.cache.interwiki_ts = now
+- request.cfg.cache.interwiki_mtime = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
++ if not getattr(request.cfg.cache, 'interwiki_list', None):
++ request.cfg.cache.interwiki_list = dict()
++ if not getattr(request.cfg.cache, 'interwiki_ts', None):
++ request.cfg.cache.interwiki_ts = dict()
++ if not getattr(request.cfg.cache, 'interwiki_mtime', None):
++ request.cfg.cache.interwiki_mtime = dict()
++ request.cfg.cache.interwiki_list[request.user.id] = _interwiki_list
++ request.cfg.cache.interwiki_ts[request.user.id] = now
++ request.cfg.cache.interwiki_mtime[request.user.id] = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
+
+ return _interwiki_list
+
+@@ -2269,7 +2287,7 @@
+ """
+ # note: filenames containing ../ (or ..\) are made safe by replacing
+ # the / (or the \). the .. will be kept, but is harmless then.
+- basename = re.sub('[\x00-\x1f:/\\\\<>"*?%|]', '_', basename)
++ basename = re.sub('[\x00-\x1f:/\\\\<>"*?|]', '_', basename)
+ return basename
+
+
diff --git a/roles/collab/meta/main.yml b/roles/collab/meta/main.yml
new file mode 100644
index 0000000..56c9527
--- /dev/null
+++ b/roles/collab/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+ - {role: epel-repo}
+ - {role: apache}
diff --git a/roles/collab/tasks/.main.yml.swp b/roles/collab/tasks/.main.yml.swp
new file mode 100644
index 0000000..37e4a52
Binary files /dev/null and b/roles/collab/tasks/.main.yml.swp differ
diff --git a/roles/collab/tasks/main.yml b/roles/collab/tasks/main.yml
new file mode 100644
index 0000000..2d021d1
--- /dev/null
+++ b/roles/collab/tasks/main.yml
@@ -0,0 +1,221 @@
+---
+- name: install dependencies
+ package:
+ name: "{{ item }}"
+ state: installed
+ with_items:
+ - git
+ - graphviz-python
+ - m2crypto
+ - mod_wsgi
+ - patch
+ - python2-pip
+ - python2-setuptools
+
+- name: download moin package
+ get_url:
+ url: "https://static.moinmo.in/files/moin-{{ moin_version }}.tar.gz"
+ dest: /usr/local/src
+ checksum: sha1:bead31f53152395aa93c31dc3e0a8a417be39ccd
+
+- name: extract moin package
+ unarchive:
+ src: "/usr/local/src/moin-{{ moin_version }}.tar.gz"
+ dest: /usr/local/src
+ owner: root
+ group: "{{ ansible_wheel }}"
+ creates: "/usr/local/src/moin-{{ moin_version }}"
+ remote_src: true
+
+- name: copy graphingwiki packages
+ git:
+ dest: "/usr/local/src/{{ item }}"
+ repo: "https://github.com/graphingwiki/{{ item }}.git"
+ with_items:
+ - graphingwiki
+ - collabbackend
+
+- name: patch moin source
+ patch:
+ src: "moin-{{ moin_version }}.patch"
+ basedir: "/usr/local/src/moin-{{ moin_version }}"
+
+- name: copy moin htdocs
+ command: "cp -a /usr/local/src/moin-{{ moin_version }}/MoinMoin/web/static/htdocs /usr/local/src/moin-{{ moin_version }}/wiki"
+ args:
+ creates: "/usr/local/src/moin-{{ moin_version }}/wiki/htdocs"
+
+- name: install graphigwiki packages
+ pip:
+ name: [/usr/local/src/graphingwiki, /usr/local/src/collabbackend]
+ umask: "0022"
+ extra_args: --egg --no-index
+
+- name: install moin
+ pip:
+ name: "/usr/local/src/moin-{{ moin_version }}"
+ umask: "0022"
+ extra_args: --no-index
+
+- name: create group collab
+ group:
+ name: collab
+ gid: 1003
+
+- name: create user collab
+ user:
+ name: collab
+ comment: Service Collab
+ uid: 1003
+ group: collab
+ home: /var/lib/collab
+ shell: /sbin/nologin
+
+- name: create .profile for user collab
+ copy:
+ content: "umask 077\n"
+ dest: /var/lib/collab/.profile
+ mode: 0440
+ owner: collab
+ group: collab
+
+- name: create config directories
+ file:
+ path: "{{ item }}"
+ mode: 0755
+ owner: root
+ group: "{{ ansible_wheel }}"
+ state: directory
+ with_items:
+ - /etc/local
+ - /etc/local/collab
+
+- name: create collab.ini
+ copy:
+ src: collab.ini
+ dest: /etc/local/collab/collab.ini
+ mode: 0644
+ owner: root
+ group: "{{ ansible_wheel }}"
+
+- name: set selinux contexts from data directory
+ sefcontext:
+ path: /export/wikis(/.*)?
+ setype: httpd_sys_rw_content_t
+- name: create data directgory
+ file:
+ path: /export/wikis
+ mode: 0755
+ owner: root
+ group: root
+ seuser: _default
+ setype: _default
+ state: directory
+
+- name: link data directory
+ file:
+ src: /export/wikis
+ dest: /srv/wikis
+ state: link
+
+- name: create data directories
+ file:
+ state: directory
+ path: "{{ item }}"
+ mode: 02770
+ owner: collab
+ group: collab
+ with_items:
+ - "/srv/wikis/collab"
+ - "/srv/wikis/collab/archive"
+ - "/srv/wikis/collab/cache"
+ - "/srv/wikis/collab/config"
+ - "/srv/wikis/collab/htdocs"
+ - "/srv/wikis/collab/log"
+ - "/srv/wikis/collab/run"
+ - "/srv/wikis/collab/underlay"
+ - "/srv/wikis/collab/user"
+ - "/srv/wikis/collab/wikis"
+
+- name: create tmpfs mount for cache
+ mount:
+ state: mounted
+ path: "/export/wikis/collab/cache"
+ src: none
+ fstype: tmpfs
+ opts: "uid=collab,gid=collab,mode=2770,context=\"{{ tmpfs_context }}\""
+
+- name: install htdocs/.htaccess
+ copy:
+ src: collab-htaccess
+ dest: collab-htaccess
+ mode: 0660
+ owner: collab
+ group: collab
+
+- name: copy configs from collabbackend archive
+ copy:
+ src: "/usr/local/src/collabbackend/config/{{ item }}"
+ dest: /srv/wikis/collab/config/{{ item }}
+ mode: 0660
+ owner: collab
+ group: collab
+ remote_src: true
+ with_items:
+ - collabfarm.py
+ - intermap.txt
+ - logging.conf
+
+- name: extract CollabBase.zip from collabbackend archive
+ copy:
+ src: /usr/local/src/collabbackend/packages/CollabBase.zip
+ dest: /var/lib/collab/CollabBase.zip
+ mode: 0660
+ owner: collab
+ group: collab
+ remote_src: true
+
+- name: initialize collab
+ script: collab-init.sh
+ args:
+ creates: /srv/wikis/collab/wikis/collab
+
+- name: add collab-htaccess cron job
+ cron:
+ name: collab-htaccess
+ user: collab
+ job: /usr/bin/collab-htaccess
+
+- name: link collab to apache htdocs
+ file:
+ src: /srv/wikis/collab/htdocs
+ dest: "/srv/web/{{ inventory_hostname }}/collab"
+ owner: root
+ group: "{{ ansible_wheel }}"
+ state: link
+ follow: false
+
+- name: link moin static to apache htdocs
+ file:
+ src: /usr/share/moin/htdocs
+ dest: "/srv/web/{{ inventory_hostname }}/moin_static"
+ owner: root
+ group: "{{ ansible_wheel }}"
+ state: link
+ follow: false
+
+- name: add apache to collab group
+ user:
+ name: apache
+ groups: collab
+ append: yes
+ notify: restart apache
+
+- name: create apache config
+ template:
+ src: collab.conf.j2
+ dest: /etc/httpd/conf.local.d/collab.conf
+ mode: 0644
+ owner: root
+ group: "{{ ansible_wheel }}"
+ notify: restart apache
diff --git a/roles/collab/templates/collab.conf.j2 b/roles/collab/templates/collab.conf.j2
new file mode 100644
index 0000000..40c7b6c
--- /dev/null
+++ b/roles/collab/templates/collab.conf.j2
@@ -0,0 +1,8 @@
+
+ Options +ExecCGI
+ AllowOverride All
+ WSGIProcessGroup collab
+ WSGIRestrictProcess collab
+
+
+WSGIDaemonProcess collab user=collab group=collab umask=0007 processes={{ ansible_processor_vcpus }} threads=20 maximum-requests=4000 display-name=%{GROUP}