summaryrefslogtreecommitdiff
path: root/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules
diff options
context:
space:
mode:
Diffstat (limited to 'webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules')
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.py779
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.pycbin0 -> 19449 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.py906
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.pycbin0 -> 17120 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.py1473
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.pycbin0 -> 18368 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.py168
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.pycbin0 -> 4127 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.py96
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.pycbin0 -> 3119 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.py816
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.pycbin0 -> 16059 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.py19
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.pycbin0 -> 530 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.py303
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.pycbin0 -> 6140 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.py438
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.pycbin0 -> 41094 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.py16
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.pycbin0 -> 896 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.py88
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.pycbin0 -> 2360 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.py97
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.pycbin0 -> 2892 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.py173
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.pycbin0 -> 4122 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tagtool.py45
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.py2873
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.pycbin0 -> 92691 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.py143
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.pycbin0 -> 4283 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.py349
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.pycbin0 -> 9211 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.py563
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.pycbin0 -> 14591 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.py1021
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.pycbin0 -> 22626 bytes
-rwxr-xr-xwebapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.py119
-rw-r--r--webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.pycbin0 -> 2790 bytes
39 files changed, 10485 insertions, 0 deletions
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.py
new file mode 100755
index 0000000000..3d7bee8da5
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.py
@@ -0,0 +1,779 @@
+#!/usr/bin/env python
+
+import sys, os, re, optparse
+import tree, treegenerator, tokenizer, comment
+
+
+
+class DocException (Exception):
+ def __init__ (self, msg, syntaxItem):
+ Exception.__init__(self, msg)
+ self.node = syntaxItem
+
+
+
+def createDoc(syntaxTree, docTree = None):
+ if not docTree:
+ docTree = tree.Node("doctree")
+
+ try:
+ currClassNode = None
+ if not syntaxTree.hasChildren():
+ return docTree
+
+ for item in syntaxTree.children:
+ if item.type == "assignment":
+ leftItem = item.getFirstListChild("left")
+ rightItem = item.getFirstListChild("right")
+ if leftItem.type == "variable":
+ if currClassNode and len(leftItem.children) == 3 and leftItem.children[0].get("name") == "qx":
+ if leftItem.children[1].get("name") == "Proto" and rightItem.type == "function":
+ # It's a method definition
+ handleMethodDefinition(item, False, currClassNode)
+ elif leftItem.children[1].get("name") == "Class":
+ if rightItem.type == "function":
+ handleMethodDefinition(item, True, currClassNode)
+ elif leftItem.children[2].get("name").isupper():
+ handleConstantDefinition(item, currClassNode)
+ elif currClassNode and assembleVariable(leftItem).startswith(currClassNode.get("fullName")):
+ # This is definition of the type "mypackage.MyClass.bla = ..."
+ if rightItem.type == "function":
+ handleMethodDefinition(item, True, currClassNode)
+ elif leftItem.children[len(leftItem.children) - 1].get("name").isupper():
+ handleConstantDefinition(item, currClassNode)
+
+ elif item.type == "call":
+ operand = item.getChild("operand", False)
+ if operand:
+ var = operand.getChild("variable", False)
+ if var and len(var.children) == 3 and var.children[0].get("name") == "qx" and var.children[1].get("name") == "OO":
+ methodName = var.children[2].get("name")
+ if methodName == "defineClass":
+ currClassNode = handleClassDefinition(docTree, item)
+ elif methodName == "addProperty" or methodName == "addFastProperty":
+ # these are private and should be marked if listed, otherwise just hide them (wpbasti)
+ #or methodName == "addCachedProperty" or methodName == "changeProperty":
+ handlePropertyDefinition(item, currClassNode)
+ #elif item.type == "function":
+ # name = item.get("name", False)
+ # if name and name[0].isupper():
+ # # This is an old class definition "function MyClass (...)"
+ # currClassNode = handleClassDefinition(docTree, item)
+
+ except Exception:
+ exc = sys.exc_info()[1]
+ msg = ""
+
+ if hasattr(exc, "node"):
+ (line, column) = getLineAndColumnFromSyntaxItem(exc.node)
+ file = getFileFromSyntaxItem(exc.node)
+ if line != None or file != None:
+ msg = str(exc) + "\n " + str(file) + ", Line: " + str(line) + ", Column: " + str(column)
+
+ if msg == "":
+ raise Exception, "Unknown reason", sys.exc_info()[2]
+
+ else:
+ print
+ print " - Failed: %s" % msg
+ sys.exit(1)
+
+ return docTree
+
+
+
+def variableIsClassName(varItem):
+ length = len(varItem.children)
+ for i in range(length):
+ varChild = varItem.children[i]
+ if not varChild.type == "identifier":
+ return False
+ if i < length - 1:
+ # This is not the last identifier -> It must a package (= lowercase)
+ if not varChild.get("name").islower():
+ return False
+ else:
+ # This is the last identifier -> It must the class name (= first letter uppercase)
+ if not varChild.get("name")[0].isupper():
+ return False
+ return True
+
+
+
+
+
+def assembleVariable(variableItem):
+ if variableItem.type != "variable":
+ raise DocException("'variableItem' is no variable", variableItem)
+
+ assembled = ""
+ for child in variableItem.children:
+ if len(assembled) != 0:
+ assembled += "."
+ assembled += child.get("name")
+
+ return assembled
+
+
+
+def handleClassDefinition(docTree, item):
+ params = item.getChild("params")
+
+ paramsLen = len(params.children);
+ if paramsLen == 1:
+ superClassName = "Object"
+ ctorItem = None
+ elif paramsLen == 2:
+ superClassName = "Object"
+ ctorItem = params.children[1]
+ elif paramsLen == 3:
+ superClassName = assembleVariable(params.children[1])
+ ctorItem = params.children[2]
+ else:
+ raise DocException("defineClass call has more than three parameters: " + str(len(params.children)), item)
+
+ className = params.children[0].get("value")
+ classNode = getClassNode(docTree, className)
+
+ if superClassName != "Object":
+ superClassNode = getClassNode(docTree, superClassName)
+ childClasses = superClassNode.get("childClasses", False)
+ if childClasses:
+ childClasses += "," + className
+ else:
+ childClasses = className
+ superClassNode.set("childClasses", childClasses)
+
+ classNode.set("superClass", superClassName)
+
+ commentAttributes = comment.parseNode(item)
+
+ for attrib in commentAttributes:
+ if attrib["category"] == "event":
+ # Add the event
+ if comment.attribHas(attrib, "name") and comment.attribHas(attrib, "type"):
+ addEventNode(classNode, item, attrib);
+ else:
+ addError(classNode, "Documentation contains malformed event attribute.", item)
+
+ # Add the constructor
+ if ctorItem and ctorItem.type == "function":
+ ctor = handleFunction(ctorItem, commentAttributes, classNode)
+ ctor.set("isCtor", True)
+ classNode.addListChild("constructor", ctor)
+
+ # Check for methods defined in the constructor
+ # (for method definition style that supports real private methods)
+ ctorBlock = ctorItem.getChild("body").getChild("block")
+
+ if ctorBlock.hasChildren():
+ for item in ctorBlock.children:
+ if item.type == "assignment":
+ leftItem = item.getFirstListChild("left")
+ rightItem = item.getFirstListChild("right")
+
+ # It's a method definition
+ if leftItem.type == "variable" and len(leftItem.children) == 2 and (leftItem.children[0].get("name") == "this" or leftItem.children[0].get("name") == "self") and rightItem.type == "function":
+ handleMethodDefinition(item, False, classNode)
+
+ elif ctorItem and ctorItem.type == "map":
+ for keyvalueItem in ctorItem.children:
+ valueItem = keyvalueItem.getChild("value").getFirstChild()
+ if (valueItem.type == "function"):
+ handleMethodDefinition(keyvalueItem, True, classNode)
+ else:
+ handleConstantDefinition(keyvalueItem, classNode)
+
+ return classNode;
+
+
+
+
+
+
+
+def handlePropertyDefinition(item, classNode):
+ paramsMap = item.getChild("params").getChild("map")
+
+ node = tree.Node("property")
+ node.set("name", paramsMap.getChildByAttribute("key", "name").getChild("value").getChild("constant").get("value"))
+
+ propType = paramsMap.getChildByAttribute("key", "type", False)
+ if propType:
+ node.set("type", getType(propType.getChild("value").getFirstChild()))
+
+ allowNull = paramsMap.getChildByAttribute("key", "allowNull", False)
+ if allowNull:
+ node.set("allowNull", allowNull.getChild("value").getChild("constant").get("value"))
+
+ defaultValue = paramsMap.getChildByAttribute("key", "defaultValue", False)
+ if defaultValue:
+ node.set("defaultValue", getValue(defaultValue.getFirstListChild("value")))
+
+ getAlias = paramsMap.getChildByAttribute("key", "getAlias", False)
+ if getAlias:
+ node.set("getAlias", getAlias.getChild("value").getChild("constant").get("value"))
+
+ setAlias = paramsMap.getChildByAttribute("key", "setAlias", False)
+ if setAlias:
+ node.set("setAlias", setAlias.getChild("value").getChild("constant").get("value"))
+
+ unitDetection = paramsMap.getChildByAttribute("key", "unitDetection", False)
+ if unitDetection:
+ node.set("unitDetection", unitDetection.getChild("value").getChild("constant").get("value"))
+
+ instance = paramsMap.getChildByAttribute("key", "instance", False)
+ if instance:
+ node.set("instance", instance.getChild("value").getChild("constant").get("value"))
+
+ classname = paramsMap.getChildByAttribute("key", "classname", False)
+ if classname:
+ node.set("classname", classname.getChild("value").getChild("constant").get("value"))
+
+ possibleValues = paramsMap.getChildByAttribute("key", "possibleValues", False)
+ if possibleValues:
+ array = possibleValues.getChild("value").getChild("array")
+ values = ""
+ for arrayItem in array.children:
+ if len(values) != 0:
+ values += ", "
+ values += getValue(arrayItem)
+ node.set("possibleValues", values)
+
+ # If the description has a type specified then take this type
+ # (and not the one extracted from the paramsMap)
+ commentAttributes = comment.parseNode(item)
+ addTypeInfo(node, comment.getAttrib(commentAttributes, "description"), item)
+
+ classNode.addListChild("properties", node)
+
+
+
+def getValue(item):
+ value = None
+ if item.type == "constant":
+ if item.get("constantType") == "string":
+ value = '"' + item.get("value") + '"'
+ else:
+ value = item.get("value")
+ elif item.type == "variable":
+ value = assembleVariable(item)
+ elif item.type == "operation" and item.get("operator") == "SUB":
+ # E.g. "-1" or "-Infinity"
+ value = "-" + getValue(item.getChild("first").getFirstChild())
+ if value == None:
+ value = "[Unsupported item type: " + item.type + "]"
+
+ return value
+
+
+
+def handleMethodDefinition(item, isStatic, classNode):
+ if item.type == "assignment":
+ # This is a "normal" method definition
+ leftItem = item.getFirstListChild("left")
+ name = leftItem.children[len(leftItem.children) - 1].get("name")
+ functionItem = item.getFirstListChild("right")
+ elif item.type == "keyvalue":
+ # This is a method definition of a map-style class (like qx.Const)
+ name = item.get("key")
+ functionItem = item.getFirstListChild("value")
+
+ commentAttributes = comment.parseNode(item)
+
+ node = handleFunction(functionItem, commentAttributes, classNode)
+ node.set("name", name)
+
+ isPublic = name[0] != "_"
+ listName = "methods"
+ if isStatic:
+ node.set("isStatic", True)
+ listName += "-static"
+ if isPublic:
+ listName += "-pub"
+ else:
+ listName += "-prot"
+
+ classNode.addListChild(listName, node)
+
+
+
+def handleConstantDefinition(item, classNode):
+ if (item.type == "assignment"):
+ # This is a "normal" constant definition
+ leftItem = item.getFirstListChild("left")
+ name = leftItem.children[len(leftItem.children) - 1].get("name")
+ elif (item.type == "keyvalue"):
+ # This is a constant definition of a map-style class (like qx.Const)
+ name = item.get("key")
+
+ node = tree.Node("constant")
+ node.set("name", name)
+
+ commentAttributes = comment.parseNode(item)
+ addTypeInfo(node, comment.getAttrib(commentAttributes, "description"), item)
+
+ classNode.addListChild("constants", node)
+
+
+
+def handleFunction(funcItem, commentAttributes, classNode):
+ if funcItem.type != "function":
+ raise DocException("'funcItem' is no function", funcItem)
+
+ node = tree.Node("method")
+
+ # Read the parameters
+ params = funcItem.getChild("params", False)
+ if params and params.hasChildren():
+ for param in params.children:
+ paramNode = tree.Node("param")
+ paramNode.set("name", param.getFirstChild().get("name"))
+ node.addListChild("params", paramNode)
+
+ # Check whether the function is abstract
+ bodyBlockItem = funcItem.getChild("body").getFirstChild();
+ if bodyBlockItem.type == "block" and bodyBlockItem.hasChildren():
+ firstStatement = bodyBlockItem.children[0];
+ if firstStatement.type == "throw":
+ # The first statement of the function is a throw statement
+ # -> The function is abstract
+ node.set("isAbstract", True)
+
+ if len(commentAttributes) == 0:
+ addError(node, "Documentation is missing.", funcItem)
+ return node
+
+ # Read all description, param and return attributes
+ for attrib in commentAttributes:
+ # Add description
+ if attrib["category"] == "description":
+ descNode = tree.Node("desc").set("text", attrib["text"])
+ node.addChild(descNode)
+
+ elif attrib["category"] == "param":
+ if not attrib.has_key("name"):
+ raise DocException("Missing name of parameter.", funcItem)
+
+ # Find the matching param node
+ paramName = attrib["name"]
+ paramNode = node.getListChildByAttribute("params", "name", paramName, False)
+
+ if not paramNode:
+ addError(node, "Contains information for a non-existing parameter <code>%s</code>." % paramName, funcItem)
+ continue
+
+ addTypeInfo(paramNode, attrib, funcItem)
+
+ elif attrib["category"] == "return":
+ returnNode = tree.Node("return")
+ node.addChild(returnNode)
+
+ addTypeInfo(returnNode, attrib, funcItem)
+
+ # Check for documentation errors
+ # Check whether all parameters have been documented
+ if node.hasChild("params"):
+ paramsListNode = node.getChild("params");
+ for paramNode in paramsListNode.children:
+ if not paramNode.getChild("desc", False):
+ addError(node, "Parameter %s is not documented." % paramNode.get("name"), funcItem)
+
+ return node
+
+
+
+def addTypeInfo(node, commentAttrib=None, item=None):
+ if commentAttrib == None:
+ if node.type == "param":
+ addError(node, "Parameter <code>%s</code> in not documented." % commentAttrib.get("name"), item)
+
+ elif node.type == "return":
+ addError(node, "Return value is not documented.", item)
+
+ else:
+ addError(node, "Documentation is missing.", item)
+
+ return
+
+ # add description
+ node.addChild(tree.Node("desc").set("text", commentAttrib["text"]))
+
+ # add types
+ if commentAttrib.has_key("type"):
+ typesNode = tree.Node("types")
+ node.addChild(typesNode)
+
+ for item in commentAttrib["type"]:
+ itemNode = tree.Node("entry")
+ typesNode.addChild(itemNode)
+
+ itemNode.set("type", item["type"])
+
+ if item["dimensions"] != 0:
+ itemNode.set("dimensions", item["dimensions"])
+
+ # add default value
+ if commentAttrib.has_key("default"):
+ defaultValue = commentAttrib["default"]
+ if defaultValue != None:
+ # print "defaultValue: %s" % defaultValue
+ node.set("defaultValue", defaultValue)
+
+
+
+
+
+def addEventNode(classNode, classItem, commentAttrib):
+ node = tree.Node("event")
+
+ node.set("name", commentAttrib["name"])
+ node.addChild(tree.Node("desc").set("text", commentAttrib["text"]))
+
+ # add types
+ if commentAttrib.has_key("type"):
+ typesNode = tree.Node("types")
+ node.addChild(typesNode)
+
+ for item in commentAttrib["type"]:
+ itemNode = tree.Node("entry")
+ typesNode.addChild(itemNode)
+
+ itemNode.set("type", item["type"])
+
+ if item["dimensions"] != 0:
+ itemNode.set("dimensions", item["dimensions"])
+
+ classNode.addListChild("events", node)
+
+
+
+
+def addError(node, msg, syntaxItem):
+ # print ">>> %s" % msg
+
+ errorNode = tree.Node("error")
+ errorNode.set("msg", msg)
+
+ (line, column) = getLineAndColumnFromSyntaxItem(syntaxItem)
+ if line:
+ errorNode.set("line", line)
+
+ if column:
+ errorNode.set("column", column)
+
+ node.addListChild("errors", errorNode)
+ node.set("hasError", True)
+
+
+
+def getLineAndColumnFromSyntaxItem(syntaxItem):
+ line = None
+ column = None
+
+ while line == None and column == None and syntaxItem:
+ line = syntaxItem.get("line", False)
+ column = syntaxItem.get("column", False)
+
+ if syntaxItem.hasParent():
+ syntaxItem = syntaxItem.parent
+ else:
+ syntaxItem = None
+
+ return line, column
+
+
+def getFileFromSyntaxItem(syntaxItem):
+ file = None
+ while file == None and syntaxItem:
+ file = syntaxItem.get("file", False)
+ if hasattr(syntaxItem, "parent"):
+ syntaxItem = syntaxItem.parent
+ else:
+ syntaxItem = None
+ return file
+
+
+def getType(item):
+ if item.type == "constant" and item.get("constantType") == "string":
+ val = item.get("value")
+
+ if val == "object":
+ val = "Object"
+ elif val == "function":
+ val = "Function"
+
+ return val
+ else:
+ raise DocException("Can't gess type. type is neither string nor variable: " + item.type, item)
+
+
+def getClassNode(docTree, className):
+ splits = className.split(".")
+
+ currPackage = docTree
+ length = len(splits)
+ for i in range(length):
+ split = splits[i]
+
+ if (i < length - 1):
+ # This is a package name -> Get the right package
+ childPackage = currPackage.getListChildByAttribute("packages", "name", split, False)
+ if not childPackage:
+ childPackageName = ".".join(splits[:-(length-i-1)])
+
+ # The package does not exist -> Create it
+ childPackage = tree.Node("package")
+ childPackage.set("name", split)
+ childPackage.set("fullName", childPackageName)
+ childPackage.set("packageName", childPackageName.replace("." + split, ""))
+
+ currPackage.addListChild("packages", childPackage)
+
+ # Update current package
+ currPackage = childPackage
+
+ else:
+ # This is a class name -> Get the right class
+ classNode = currPackage.getListChildByAttribute("classes", "name", split, False)
+ if not classNode:
+ # The class does not exist -> Create it
+ classNode = tree.Node("class")
+ classNode.set("name", split)
+ classNode.set("fullName", className)
+ classNode.set("packageName", className.replace("." + split, ""))
+ currPackage.addListChild("classes", classNode)
+
+ return classNode
+
+
+
+def postWorkPackage(docTree, packageNode):
+ childHasError = False
+
+ packages = packageNode.getChild("packages", False)
+ if packages:
+ packages.children.sort(nameComparator)
+ for node in packages.children:
+ hasError = postWorkPackage(docTree, node)
+ if hasError:
+ childHasError = True
+
+ classes = packageNode.getChild("classes", False)
+ if classes:
+ classes.children.sort(nameComparator)
+ for node in classes.children:
+ hasError = postWorkClass(docTree, node)
+ if hasError:
+ childHasError = True
+
+ if childHasError:
+ packageNode.set("hasWarning", True)
+
+ return childHasError
+
+
+
+def postWorkClass(docTree, classNode):
+ # Sort child classes
+ childClasses = classNode.get("childClasses", False)
+ if childClasses:
+ classArr = childClasses.split(",")
+ classArr.sort()
+ childClasses = ",".join(classArr)
+ classNode.set("childClasses", childClasses)
+
+ # Remove the property-modifier-methods
+ removePropertyModifiers(classNode)
+
+ # Mark overridden items
+ postWorkItemList(docTree, classNode, "properties", True)
+ postWorkItemList(docTree, classNode, "events", False)
+ postWorkItemList(docTree, classNode, "methods-pub", True)
+ postWorkItemList(docTree, classNode, "methods-prot", True)
+ postWorkItemList(docTree, classNode, "methods-static-pub", False)
+ postWorkItemList(docTree, classNode, "methods-static-prot", False)
+
+ # Check whether the class is static
+ superClassName = classNode.get("superClass", False)
+ if (superClassName == None or superClassName == "QxObject") \
+ and classNode.getChild("properties", False) == None \
+ and classNode.getChild("methods-pub", False) == None \
+ and classNode.getChild("methods-prot", False) == None:
+ # This class has is static
+ classNode.set("isStatic", True)
+
+ # Check whether the class is abstract
+ if isClassAbstract(docTree, classNode, {}):
+ classNode.set("isAbstract", True)
+
+ # Check for errors
+ childHasError = listHasError(classNode, "constructor") or listHasError(classNode, "properties") \
+ or listHasError(classNode, "methods-pub") or listHasError(classNode, "methods-prot") \
+ or listHasError(classNode, "methods-static-pub") or listHasError(classNode, "methods-static-prot") \
+ or listHasError(classNode, "constants")
+
+ if childHasError:
+ classNode.set("hasWarning", True)
+
+ return childHasError
+
+
+
+def isClassAbstract(docTree, classNode, visitedMethodNames):
+ if containsAbstractMethods(classNode.getChild("methods-pub", False), visitedMethodNames) \
+ or containsAbstractMethods(classNode.getChild("methods-prot", False), visitedMethodNames):
+ # One of the methods is abstract
+ return True
+
+ # No abstract methods found -> Check whether the super class has abstract
+ # methods that haven't been overridden
+ superClassName = classNode.get("superClass", False)
+ if superClassName:
+ superClassNode = getClassNode(docTree, superClassName)
+ return isClassAbstract(docTree, superClassNode, visitedMethodNames)
+
+
+
+def containsAbstractMethods(methodListNode, visitedMethodNames):
+ if methodListNode:
+ for methodNode in methodListNode.children:
+ name = methodNode.get("name")
+ if not name in visitedMethodNames:
+ visitedMethodNames[name] = True
+ if methodNode.get("isAbstract", False):
+ return True
+
+ return False
+
+
+
+def removePropertyModifiers(classNode):
+ propertiesList = classNode.getChild("properties", False)
+ methodsProtList = classNode.getChild("methods-prot", False)
+ if propertiesList and methodsProtList:
+ for propNode in propertiesList.children:
+ name = propNode.get("name")
+ upperName = name[0].upper() + name[1:]
+
+ modifyNode = methodsProtList.getChildByAttribute("name", "_modify" + upperName, False)
+ if modifyNode:
+ methodsProtList.removeChild(modifyNode);
+
+ changeNode = methodsProtList.getChildByAttribute("name", "_change" + upperName, False)
+ if changeNode:
+ methodsProtList.removeChild(changeNode);
+
+ checkNode = methodsProtList.getChildByAttribute("name", "_check" + upperName, False)
+ if checkNode:
+ methodsProtList.removeChild(checkNode);
+
+ if not methodsProtList.hasChildren():
+ classNode.removeChild(methodsProtList)
+
+
+
+def postWorkItemList(docTree, classNode, listName, overridable):
+ """Does the post work for a list of properties or methods."""
+
+ # Sort the list
+ sortByName(classNode, listName)
+
+ # Post work all items
+ listNode = classNode.getChild(listName, False)
+ if listNode:
+ for itemNode in listNode.children:
+ name = itemNode.get("name")
+
+ # Check whether this item is overridden and try to inherit the
+ # documentation from the next matching super class
+ if overridable:
+ superClassName = classNode.get("superClass", False)
+ overriddenFound = False
+ docFound = (itemNode.getChild("desc", False) != None)
+ while superClassName and (not overriddenFound or not docFound):
+ superClassNode = getClassNode(docTree, superClassName)
+ superItemNode = superClassNode.getListChildByAttribute(listName, "name", name, False)
+
+ if superItemNode:
+ if not docFound:
+ # This super item has a description
+ # -> Check whether the parameters match
+ # NOTE: paramsMatch works for properties, too
+ # (Because both compared properties always have no params)
+ if paramsMatch(itemNode, superItemNode):
+ # The parameters match -> We can use the documentation of the super class
+ itemNode.set("docFrom", superClassName)
+ docFound = (superItemNode.getChild("desc", False) != None)
+
+ # Remove previously recorded documentation errors from the item
+ # (Any documentation errors will be recorded in the super class)
+ removeErrors(itemNode)
+ if not overriddenFound:
+ # This super class has the item defined -> Add a overridden attribute
+ itemNode.set("overriddenFrom", superClassName)
+ overriddenFound = True
+
+ # Check the next superclass
+ superClassName = superClassNode.get("superClass", False)
+
+ if not docFound and itemNode.get("overriddenFrom", False):
+ # This item is overridden, but we didn't find any documentation in the
+ # super classes -> Add a warning
+ itemNode.set("hasWarning", True)
+
+
+
+def paramsMatch(methodNode1, methodNode2):
+ params1 = methodNode1.getChild("params1", False)
+ params2 = methodNode1.getChild("params2", False)
+
+ if params1 == None or params2 == None:
+ # One method has no parameters -> The params match if both are None
+ return params1 == params2
+ elif len(params1.children) != len(params2.children):
+ # The param count is different -> The params don't match
+ return False
+ else:
+ for i in range(len(params1.children)):
+ par1 = params1.children[i]
+ par2 = params2.children[i]
+ if (par1.get("name") != par2.get("name")):
+ # These parameters don't match
+ return False
+
+ # All tests passed
+ return True
+
+
+
+def removeErrors(node):
+ errors = node.getChild("errors", False)
+ if errors:
+ node.removeChild(errors)
+ node.remove("hasError")
+
+
+
+def sortByName(node, listName):
+ listNode = node.getChild(listName, False)
+ if listNode:
+ listNode.children.sort(nameComparator)
+
+
+
+def nameComparator(node1, node2):
+ name1 = node1.get("name").lower()
+ name2 = node2.get("name").lower()
+ return cmp(name1, name2)
+
+
+
+def listHasError(node, listName):
+ listNode = node.getChild(listName, False)
+ if listNode:
+ for childNode in listNode.children:
+ if childNode.get("hasError", False):
+ return True
+
+ return False
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.pyc
new file mode 100644
index 0000000000..2bb39fc2cd
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/api.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.py
new file mode 100755
index 0000000000..b27a10de30
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.py
@@ -0,0 +1,906 @@
+#!/usr/bin/env python
+
+import sys, string, re
+import config, tree, textile
+
+
+
+S_INLINE_COMMENT = "//.*"
+R_INLINE_COMMENT = re.compile("^" + S_INLINE_COMMENT + "$")
+
+R_INLINE_COMMENT_TIGHT = re.compile("^//\S+")
+R_INLINE_COMMENT_PURE = re.compile("^//")
+
+
+
+S_BLOCK_COMMENT = "/\*([^*]|[\n]|(\*+([^*/]|[\n])))*\*+/"
+R_BLOCK_COMMENT = re.compile("^" + S_BLOCK_COMMENT + "$")
+
+R_BLOCK_COMMENT_JAVADOC = re.compile("^/\*\*")
+R_BLOCK_COMMENT_QTDOC = re.compile("^/\*!")
+R_BLOCK_COMMENT_AREA = re.compile("^/\*\n\s*\*\*\*\*\*")
+R_BLOCK_COMMENT_DIVIDER = re.compile("^/\*\n\s*----")
+R_BLOCK_COMMENT_HEADER = re.compile("^/\* \*\*\*\*")
+
+R_BLOCK_COMMENT_TIGHT_START = re.compile("^/\*\S+")
+R_BLOCK_COMMENT_TIGHT_END = re.compile("\S+\*/$")
+R_BLOCK_COMMENT_PURE_START = re.compile("^/\*")
+R_BLOCK_COMMENT_PURE_END = re.compile("\*/$")
+
+R_ATTRIBUTE = re.compile(r'[^{]@(\w+)\s*')
+R_JAVADOC_STARS = re.compile(r'^\s*\*')
+
+
+
+R_NAMED_TYPE = re.compile(r'^\s*(\w+)\s*({([^}]+)})?')
+R_SIMPLE_TYPE = re.compile(r'^\s*({([^}]+)})?')
+
+
+
+
+VARPREFIXES = {
+ "a" : "Array",
+ "b" : "boolean",
+ "d" : "Date",
+ "f" : "Function",
+ "i" : "int",
+ "h" : "Map",
+ "m" : "Map",
+ "n" : "number",
+ "o" : "Object",
+ "r" : "RegExp",
+ "s" : "string",
+ "v" : "var",
+ "w" : "Widget"
+}
+
+VARNAMES = {
+ "a" : "Array",
+ "arr" : "Array",
+
+ "e" : "Event",
+ "ev" : "Event",
+ "evt" : "Event",
+
+ "el" : "Element",
+ "elem" : "Element",
+ "elm" : "Element",
+
+ "ex" : "Exception",
+ "exc" : "Exception",
+
+ "flag" : "boolean",
+ "force" : "boolean",
+
+ "f" : "Function",
+ "func" : "Function",
+
+ "h" : "Map",
+ "hash" : "Map",
+ "map" : "Map",
+
+ "node" : "Node",
+
+ "n" : "number",
+ "num" : "number",
+
+ "o" : "Object",
+ "obj" : "Object",
+
+ "reg" : "RegExp",
+
+ "s" : "string",
+ "str" : "string"
+}
+
+VARDESC = {
+ "propValue" : "Current value",
+ "propOldValue" : "Previous value",
+ "propData" : "Property configuration map"
+}
+
+
+
+
+def outdent(source, indent):
+ return re.compile("\n\s{%s}" % indent).sub("\n", source)
+
+
+
+def indent(source, indent):
+ return re.compile("\n").sub("\n" + (" " * indent), source)
+
+
+
+def correctInline(source):
+ if R_INLINE_COMMENT_TIGHT.match(source):
+ return R_INLINE_COMMENT_PURE.sub("// ", source)
+
+ return source
+
+
+
+def correctBlock(source):
+ if not getFormat(source) in [ "javadoc", "qtdoc" ]:
+ if R_BLOCK_COMMENT_TIGHT_START.search(source):
+ source = R_BLOCK_COMMENT_PURE_START.sub("/* ", source)
+
+ if R_BLOCK_COMMENT_TIGHT_END.search(source):
+ source = R_BLOCK_COMMENT_PURE_END.sub(" */", source)
+
+ return source
+
+
+
+def correct(source):
+ if source.startswith("//"):
+ return correctInline(source)
+ else:
+ return correctBlock(source)
+
+
+
+def isMultiLine(source):
+ return source.find("\n") != -1
+
+
+
+def getFormat(source):
+ if R_BLOCK_COMMENT_JAVADOC.search(source):
+ return "javadoc"
+ elif R_BLOCK_COMMENT_QTDOC.search(source):
+ return "qtdoc"
+ elif R_BLOCK_COMMENT_AREA.search(source):
+ return "area"
+ elif R_BLOCK_COMMENT_DIVIDER.search(source):
+ return "divider"
+ elif R_BLOCK_COMMENT_HEADER.search(source):
+ return "header"
+
+ return "block"
+
+
+
+
+
+
+
+
+def hasThrows(node):
+ if node.type == "throw":
+ return True
+
+ if node.hasChildren():
+ for child in node.children:
+ if hasThrows(child):
+ return True
+
+ return False
+
+
+
+
+def getReturns(node, found):
+ if node.type == "function":
+ pass
+
+ elif node.type == "return":
+ if node.getChildrenLength(True) > 0:
+ val = "var"
+ else:
+ val = "void"
+
+ if node.hasChild("expression"):
+ expr = node.getChild("expression")
+ if expr.hasChild("variable"):
+ var = expr.getChild("variable")
+ if var.getChildrenLength(True) == 1 and var.hasChild("identifier"):
+ val = nameToType(var.getChild("identifier").get("name"))
+ else:
+ val = "var"
+
+ elif expr.hasChild("constant"):
+ val = expr.getChild("constant").get("constantType")
+
+ if val == "number":
+ val = expr.getChild("constant").get("detail")
+
+ elif expr.hasChild("array"):
+ val = "Array"
+
+ elif expr.hasChild("map"):
+ val = "Map"
+
+ elif expr.hasChild("function"):
+ val = "Function"
+
+ elif expr.hasChild("call"):
+ val = "call"
+
+ if not val in found:
+ found.append(val)
+
+ elif node.hasChildren():
+ for child in node.children:
+ getReturns(child, found)
+
+ return found
+
+
+
+def nameToType(name):
+ typ = "var"
+
+ # Evaluate type from name
+ if name in VARNAMES:
+ typ = VARNAMES[name]
+
+ elif len(name) > 1:
+ if name[1].isupper():
+ if name[0] in VARPREFIXES:
+ typ = VARPREFIXES[name[0]]
+
+ return typ
+
+
+
+def nameToDescription(name):
+ desc = "TODOC"
+
+ if name in VARDESC:
+ desc = VARDESC[name]
+
+ return desc
+
+
+
+
+def qt2javadoc(text):
+ attribList = parseText(text, False)
+ res = "/**"
+
+ desc = getAttrib(attribList, "description")["text"]
+
+ if "\n" in desc:
+ res += "\n"
+
+ for line in desc.split("\n"):
+ res += " * %s\n" % line
+
+ res += " "
+
+ else:
+ res += " %s " % desc
+
+ res += "*/"
+
+ return res
+
+
+def parseNode(node):
+ """Takes the last doc comment from the commentsBefore child, parses it and
+ returns a Node representing the doc comment"""
+
+ # Find the last doc comment
+ commentsBefore = node.getChild("commentsBefore", False)
+ if commentsBefore and commentsBefore.hasChildren():
+ for child in commentsBefore.children:
+ if child.type == "comment" and child.get("detail") in [ "javadoc", "qtdoc" ]:
+ return parseText(child.get("text"))
+
+ return []
+
+
+
+def parseText(intext, format=True):
+ # Strip "/**", "/*!" and "*/"
+ intext = intext[3:-2]
+
+ # Strip leading stars in every line
+ text = ""
+ for line in intext.split("\n"):
+ text += R_JAVADOC_STARS.sub("", line).strip() + "\n"
+
+ # Search for attributes
+ desc = { "category" : "description", "text" : "" }
+ attribs = [ desc ]
+ pos = 0
+
+ while True:
+ mtch = R_ATTRIBUTE.search(text, pos)
+
+ if mtch == None:
+ prevText = text[pos:].strip()
+
+ if len(attribs) == 0:
+ desc["text"] = prevText
+ else:
+ attribs[-1]["text"] = prevText
+
+ break
+
+ prevText = text[pos:mtch.start(0)].strip()
+ pos = mtch.end(0)
+
+ if len(attribs) == 0:
+ desc["text"] = prevText
+ else:
+ attribs[-1]["text"] = prevText
+
+ attribs.append({ "category" : mtch.group(1), "text" : "" })
+
+ # parse details
+ for attrib in attribs:
+ parseDetail(attrib, format)
+
+ return attribs
+
+
+
+def parseDetail(attrib, format=True):
+ text = attrib["text"]
+
+ if attrib["category"] in [ "param", "event" ]:
+ mtch = R_NAMED_TYPE.search(text)
+ else:
+ mtch = R_SIMPLE_TYPE.search(text)
+
+ if mtch:
+ text = text[mtch.end(0):]
+
+ if attrib["category"] in [ "param", "event" ]:
+ attrib["name"] = mtch.group(1)
+ # print ">>> NAME: %s" % mtch.group(1)
+ remain = mtch.group(3)
+ else:
+ remain = mtch.group(2)
+
+ if remain != None:
+ defIndex = remain.rfind("?")
+ if defIndex != -1:
+ attrib["default"] = remain[defIndex+1:].strip()
+ remain = remain[0:defIndex].strip()
+ # print ">>> DEFAULT: %s" % attrib["default"]
+
+ typValues = []
+ for typ in remain.split("|"):
+ typValue = typ.strip()
+ arrayIndex = typValue.find("[")
+
+ if arrayIndex != -1:
+ arrayValue = (len(typValue) - arrayIndex) / 2
+ typValue = typValue[0:arrayIndex]
+ else:
+ arrayValue = 0
+
+ typValues.append({ "type" : typValue, "dimensions" : arrayValue })
+
+ if len(typValues) > 0:
+ attrib["type"] = typValues
+ # print ">>> TYPE: %s" % attrib["type"]
+
+ if format:
+ attrib["text"] = formatText(text)
+ else:
+ attrib["text"] = cleanupText(text)
+
+
+
+
+
+
+
+
+def cleanupText(text):
+ #print "============= INTEXT ========================="
+ #print text
+
+ text = text.replace("<p>", "\n")
+ text = text.replace("<br/>", "\n")
+ text = text.replace("<br>", "\n")
+ text = text.replace("</p>", " ")
+
+ newline = False
+ lines = text.split("\n")
+ text = ""
+
+ for line in lines:
+ line = line.strip()
+
+ if line == "":
+ if not newline:
+ newline = True
+
+ else:
+ if text != "":
+ text += "\n"
+
+ if newline:
+ text += "\n"
+ newline = False
+
+ text += line
+
+ #print "============= OUTTEXT ========================="
+ #print text
+
+ return text
+
+
+
+def formatText(text):
+ #print "============= FORMAT:1 ========================="
+ #print text
+
+ # cleanup HTML
+ text = text.replace("<p>", "\n")
+ text = text.replace("<br/>", "\n")
+ text = text.replace("<br>", "\n")
+ text = text.replace("</p>", " ")
+
+ # cleanup wraps
+ text = text.replace("\n\n", "----BREAK----")
+ text = text.replace("\n*", "----UL----")
+ text = text.replace("\n#", "----OL----")
+ text = text.replace("\n", " ")
+ text = text.replace("----BREAK----", "\n\n")
+ text = text.replace("----UL----", "\n*")
+ text = text.replace("----OL----", "\n#")
+
+ #print "============= FORMAT:2 ========================="
+ #print text
+
+ text = textile.textile(unicode(text).encode('utf-8'))
+
+ #print "============= FORMAT:3 ========================="
+ #print text
+
+ return text
+
+
+
+
+
+
+
+
+def getAttrib(attribList, category):
+ for attrib in attribList:
+ if attrib["category"] == category:
+ return attrib
+
+
+
+def getParam(attribList, name):
+ for attrib in attribList:
+ if attrib["category"] == "param":
+ if attrib.has_key("name") and attrib["name"] == name:
+ return attrib
+
+
+
+def attribHas(attrib, key):
+ if attrib != None and attrib.has_key(key) and not attrib[key] in [ "", None ]:
+ return True
+
+ return False
+
+
+
+def splitText(orig, attrib=True):
+ res = ""
+ first = True
+
+ for line in orig.split("\n"):
+ if attrib:
+ if first:
+ res += " %s\n" % line
+ else:
+ res += " * %s\n" % line
+
+ else:
+ res += " * %s\n" % line
+
+ first = False
+
+ if not res.endswith("\n"):
+ res += "\n"
+
+ return res
+
+
+
+def parseType(vtype):
+ typeText = ""
+
+ firstType = True
+ for entry in vtype:
+ if not firstType:
+ typeText += " | "
+
+ typeText += entry["type"]
+
+ if entry.has_key("dimensions") and entry["dimensions"] > 0:
+ typeText += "[]" * entry["dimensions"]
+
+ firstType = False
+
+ return typeText
+
+
+
+
+def fromNode(node, assignType, name, alternative, old=[]):
+ #
+ # description
+ ##############################################################
+ oldDesc = getAttrib(old, "description")
+
+ if attribHas(oldDesc, "text"):
+ newText = oldDesc["text"]
+ else:
+ newText = "{var} TODOC"
+
+ if "\n" in newText:
+ s = "/**\n%s\n-*/" % splitText(newText, False)
+ else:
+ s = "/** %s */" % newText
+
+
+ #
+ # other @attributes
+ ##############################################################
+
+ for attrib in old:
+ cat = attrib["category"]
+
+ if cat != "description":
+ print " * Found unallowed attribute %s in comment for %s" % (cat, name)
+
+ return s
+
+
+
+
+def fromFunction(func, assignType, name, alternative, old=[]):
+ #
+ # open comment
+ ##############################################################
+ s = "/**\n"
+
+
+ #
+ # description
+ ##############################################################
+ oldDesc = getAttrib(old, "description")
+
+ if attribHas(oldDesc, "text"):
+ newText = oldDesc["text"]
+ else:
+ newText = "TODOC"
+
+ s += splitText(newText, False)
+ s += " *\n"
+
+
+
+
+ #
+ # add @type
+ ##############################################################
+ if assignType != None:
+ s += " * @type %s\n" % assignType
+ else:
+ s += " * @type unknown TODOC\n"
+
+
+
+
+ #
+ # add @name
+ ##############################################################
+ if name != None and name != "construct":
+ s += " * @name %s\n" % name
+
+ if name.startswith("__"):
+ s += " * @access private\n"
+ elif name.startswith("_"):
+ s += " * @access protected\n"
+ else:
+ s += " * @access public\n"
+
+
+
+ #
+ # add @alternative
+ ##############################################################
+ oldAlternative = getAttrib(old, "alternative")
+
+ if alternative:
+ if attribHas(oldAlternative, "text"):
+ newText = oldDesc["text"]
+ else:
+ newText = "TODOC"
+
+ s += " * @alternative%s" % splitText(newText)
+
+ if not s.endswith("\n"):
+ s += "\n"
+
+ elif oldAlternative:
+ print " * Removing old @alternative for %s" % name
+
+
+
+
+ #
+ # add @abstract
+ ##############################################################
+ oldAbstract = getAttrib(old, "abstract")
+
+ first = func.getChild("body").getChild("block").getFirstChild(False, True)
+ abstract = first and first.type == "throw"
+
+ if abstract:
+ if attribHas(oldAbstract, "text"):
+ newText = oldDesc["text"]
+ else:
+ newText = ""
+
+ s += " * @abstract%s" % splitText(newText)
+
+ if not s.endswith("\n"):
+ s += "\n"
+
+ elif oldAbstract:
+ print " * Removing old @abstract for %s" % name
+
+
+
+
+
+
+ #
+ # add @param
+ ##############################################################
+ params = func.getChild("params")
+ if params.hasChildren():
+ for child in params.children:
+ if child.type == "variable":
+ newName = child.getChild("identifier").get("name")
+ newType = newTypeText = nameToType(newName)
+ newDefault = ""
+ newText = nameToDescription(newName)
+
+ oldParam = getParam(old, newName)
+
+ # Get type and text from old content
+ if oldParam:
+ if attribHas(oldParam, "type"):
+ newTypeText = parseType(oldParam["type"])
+
+ if attribHas(oldParam, "defaultValue"):
+ newDefault = oldParam["defaultValue"]
+
+ if attribHas(oldParam, "text"):
+ newText = oldParam["text"].strip()
+
+ s += " * @param %s {%s%s}%s" % (newName, newTypeText, newDefault, splitText(newText))
+
+ if not s.endswith("\n"):
+ s += "\n"
+
+
+
+
+
+ #
+ # add @return
+ ##############################################################
+ if name != "construct":
+ oldReturn = getAttrib(old, "return")
+
+ newType = "void"
+ newText = ""
+
+ # Get type and text from old content
+ if oldReturn:
+ if attribHas(oldReturn, "type"):
+ newType = parseType(oldReturn["type"])
+
+ if attribHas(oldReturn, "text"):
+ newText = oldReturn["text"].strip()
+
+ # Try to autodetect the type
+ if newType == "void":
+ returns = getReturns(func.getChild("body"), [])
+
+ if len(returns) > 0:
+ newType = " | ".join(returns)
+ elif name != None and name.startswith("is") and name[3].isupper():
+ newType = "boolean"
+
+ # Add documentation hint in non void cases
+ if newType != "void" and newText == "":
+ newText = "TODOC"
+
+ s += " * @return {%s}%s" % (newType, splitText(newText))
+
+ if not s.endswith("\n"):
+ s += "\n"
+
+
+
+
+
+
+ #
+ # add @throws
+ ##############################################################
+ oldThrows = getAttrib(old, "throws")
+
+ if hasThrows(func):
+ if oldThrows and attribHas(oldThrows, "text"):
+ newText = oldThrows["text"]
+ elif abstract:
+ newText = "the abstract function warning."
+ else:
+ newText = "TODOC"
+
+ s += " * @throws%s" % splitText(newText)
+
+ if not s.endswith("\n"):
+ s += "\n"
+
+ elif oldThrows:
+ print " * Removing old @throw attribute in comment for %s" % name
+
+
+
+
+ #
+ # other @attributes
+ ##############################################################
+
+ for attrib in old:
+ cat = attrib["category"]
+
+ if cat in [ "see", "author", "deprecated", "exception", "since", "version", "abstract", "overridden" ]:
+ s += " * @%s" % cat
+
+ if attribHas(attrib, "text"):
+ s += splitText(attrib["text"])
+
+ if not s.endswith("\n"):
+ s += "\n"
+
+ elif not cat in [ "name", "access", "membership", "alternative", "param", "return", "throws", "description" ]:
+ print " * Found unallowed attribute %s in comment for %s" % (cat, name)
+
+
+
+
+
+ #
+ # close comment
+ ##############################################################
+ s += " */"
+
+ return s
+
+
+
+def fill(node):
+ if node.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ return
+
+ if node.hasParent():
+ target = node
+
+ if node.type == "function":
+ name = node.get("name", False)
+ else:
+ name = ""
+
+ alternative = False
+ assignType = None
+
+ if name != None:
+ assignType = "function"
+
+ # move to hook operation
+ while target.parent.type in [ "first", "second", "third" ] and target.parent.parent.type == "operation" and target.parent.parent.get("operator") == "HOOK":
+ alternative = True
+ target = target.parent.parent
+
+ # move comment to assignment
+ while target.parent.type == "right" and target.parent.parent.type == "assignment":
+ target = target.parent.parent
+ if target.hasChild("left"):
+ left = target.getChild("left")
+ if left and left.hasChild("variable"):
+ var = left.getChild("variable")
+ last = var.getLastChild(False, True)
+ if last and last.type == "identifier":
+ name = last.get("name")
+ assignType = "object"
+
+ for child in var.children:
+ if child.type == "identifier":
+ if child.get("name") in [ "prototype", "Proto" ]:
+ assignType = "member"
+ elif child.get("name") in [ "class", "base", "Class" ]:
+ assignType = "static"
+
+ elif target.parent.type == "definition":
+ name = target.parent.get("identifier")
+ assignType = "definition"
+
+ # move to definition
+ if target.parent.type == "assignment" and target.parent.parent.type == "definition" and target.parent.parent.parent.getChildrenLength(True) == 1:
+ target = target.parent.parent.parent
+ assignType = "function"
+
+
+ # move comment to keyvalue
+ if target.parent.type == "value" and target.parent.parent.type == "keyvalue":
+ target = target.parent.parent
+ name = target.get("key")
+ assignType = "map"
+
+ if name == "construct":
+ assignType = "constructor"
+
+ if target.parent.type == "map" and target.parent.parent.type == "value" and target.parent.parent.parent.type == "keyvalue":
+ paname = target.parent.parent.parent.get("key")
+
+ if paname == "members":
+ assignType = "member"
+
+ elif paname == "statics":
+ assignType = "static"
+
+ # filter stuff, only add comments to member and static values and to all functions
+ if assignType in [ "member", "static" ] or node.type == "function":
+
+ if not hasattr(target, "documentationAdded") and target.parent.type != "params":
+ old = []
+
+ # create commentsBefore
+ if target.hasChild("commentsBefore"):
+ commentsBefore = target.getChild("commentsBefore")
+
+ if commentsBefore.hasChild("comment"):
+ for child in commentsBefore.children:
+ if child.get("detail") in [ "javadoc", "qtdoc" ]:
+ old = parseText(child.get("text"), False)
+ commentsBefore.removeChild(child)
+ break
+
+ else:
+ commentsBefore = tree.Node("commentsBefore")
+ target.addChild(commentsBefore)
+
+ # create comment node
+ commentNode = tree.Node("comment")
+
+ if node.type == "function":
+ commentNode.set("text", fromFunction(node, assignType, name, alternative, old))
+ else:
+ commentNode.set("text", fromNode(node, assignType, name, alternative, old))
+
+ commentNode.set("detail", "javadoc")
+ commentNode.set("multiline", True)
+
+ commentsBefore.addChild(commentNode)
+
+ # in case of alternative methods, use the first one, ignore the others
+ target.documentationAdded = True
+
+
+
+
+
+ if node.hasChildren():
+ for child in node.children:
+ fill(child)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.pyc
new file mode 100644
index 0000000000..6d2ef53cb6
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/comment.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.py
new file mode 100755
index 0000000000..91ce48b345
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.py
@@ -0,0 +1,1473 @@
+#!/usr/bin/env python
+
+import sys, string, re, optparse
+import config, tokenizer, filetool, treegenerator, variableoptimizer, comment, tree
+
+KEY = re.compile("^[A-Za-z0-9_]+$")
+INDENTSPACES = 2
+
+
+
+def compileToken(name, compact=False):
+ global pretty
+
+
+ if name in [ "INC", "DEC", "TYPEOF" ]:
+ pass
+
+ elif name in [ "INSTANCEOF", "IN" ]:
+ space()
+
+ elif not compact and pretty:
+ space()
+
+
+
+ if name == None:
+ write("=")
+
+ elif name in [ "TYPEOF", "INSTANCEOF", "IN" ]:
+ write(name.lower())
+
+ else:
+ for key in config.JSTOKENS:
+ if config.JSTOKENS[key] == name:
+ write(key)
+
+
+
+ if name in [ "INC", "DEC" ]:
+ pass
+
+ elif name in [ "TYPEOF", "INSTANCEOF", "IN" ]:
+ space()
+
+ elif not compact and pretty:
+ space()
+
+
+def space(force=True):
+ global indent
+ global result
+ global pretty
+ global afterLine
+ global afterBreak
+
+ if not force and not pretty:
+ return
+
+ if afterBreak or afterLine or result.endswith(" ") or result.endswith("\n"):
+ return
+
+ result += " "
+
+
+def write(txt=""):
+ global indent
+ global result
+ global pretty
+ global breaks
+ global afterLine
+ global afterBreak
+ global afterDivider
+ global afterArea
+
+ # strip remaining whitespaces
+ if (afterLine or afterBreak or afterDivider or afterArea) and result.endswith(" "):
+ result = result.rstrip()
+
+ if pretty:
+ # handle new line wishes
+ if afterArea:
+ nr = 9
+ elif afterDivider:
+ nr = 5
+ elif afterBreak:
+ nr = 2
+ elif afterLine:
+ nr = 1
+ else:
+ nr = 0
+
+ while not result.endswith("\n" * nr):
+ result += "\n"
+
+ elif breaks and not result.endswith("\n"):
+ if afterArea or afterDivider or afterBreak or afterLine:
+ result += "\n"
+
+ # reset
+ afterLine = False
+ afterBreak = False
+ afterDivider = False
+ afterArea = False
+
+ # add indent (if needed)
+ if pretty and result.endswith("\n"):
+ result += (" " * (INDENTSPACES * indent))
+
+ # append given text
+ result += txt
+
+
+def area():
+ global afterArea
+ afterArea = True
+
+
+def divide():
+ global afterDivider
+ afterDivider = True
+
+
+def sep():
+ global afterBreak
+ afterBreak = True
+
+
+def nosep():
+ global afterBreak
+ afterBreak = False
+
+
+def line():
+ global afterLine
+ afterLine = True
+
+
+def noline():
+ global afterLine
+ global afterBreak
+ global afterDivider
+ global afterArea
+
+ afterLine = False
+ afterBreak = False
+ afterDivider = False
+ afterArea = False
+
+
+def plus():
+ global indent
+ indent += 1
+
+
+def minus():
+ global indent
+ indent -= 1
+
+
+def semicolon():
+ global result
+ global breaks
+
+ noline()
+
+ if not (result.endswith("\n") or result.endswith(";")):
+ write(";")
+
+ if breaks:
+ result += "\n"
+
+
+def commentNode(node):
+ global pretty
+
+ if not pretty:
+ return
+
+ commentText = ""
+ commentIsInline = False
+
+ comment = node.getChild("commentsAfter", False)
+
+ if comment and not comment.get("inserted", False):
+ for child in comment.children:
+ if not child.isFirstChild():
+ commentText += " "
+
+ commentText += child.get("text")
+
+ if child.get("detail") == "inline":
+ commentIsInline = True
+
+ if commentText != "":
+ space()
+ write(commentText)
+
+ if commentIsInline:
+ line()
+ else:
+ space()
+
+ comment.set("inserted", True)
+
+
+
+def postProcessMap(m):
+ if m.get("maxKeyLength", False) != None:
+ return
+
+ maxKeyLength = 0
+ alignValues = True
+
+ if m.hasChildren():
+ for keyvalue in m.children:
+ if keyvalue.type != "keyvalue":
+ continue
+
+ currKeyLength = len(keyvalue.get("key"))
+
+ if keyvalue.get("quote", False) != None:
+ currKeyLength += 2
+
+ if currKeyLength > maxKeyLength:
+ maxKeyLength = currKeyLength
+
+ if alignValues and keyvalue.getChild("value").isComplex():
+ alignValues = False
+
+ m.set("maxKeyLength", maxKeyLength)
+ m.set("alignValues", alignValues)
+
+
+
+
+
+def compile(node, enablePretty=True, enableBreaks=False, enableDebug=False):
+ global indent
+ global result
+ global pretty
+ global debug
+ global breaks
+ global afterLine
+ global afterBreak
+ global afterDivider
+ global afterArea
+
+ indent = 0
+ result = u""
+ pretty = enablePretty
+ debug = enableDebug
+ breaks = enableBreaks
+ afterLine = False
+ afterBreak = False
+ afterDivider = False
+ afterArea = False
+
+ if enablePretty:
+ comment.fill(node)
+
+ compileNode(node)
+
+ return result
+
+
+
+
+
+
+
+
+
+
+def compileNode(node):
+
+ global pretty
+ global indent
+
+
+
+
+ #####################################################################################################################
+ # Recover styling
+ #####################################################################################################################
+
+ if pretty:
+ # Recover exclicit breaks
+ if node.get("breakBefore", False) and not node.isFirstChild(True):
+ sep()
+
+ # Additional explicit break before complex blocks
+ if node.hasParent() and not node.isFirstChild(True) and node.parent.type in [ "block", "file"] and node.isComplex():
+ sep()
+
+
+
+ #####################################################################################################################
+ # Insert comments before
+ #####################################################################################################################
+
+ if pretty:
+ if node.getChild("commentsBefore", False) != None:
+ commentCounter = 0
+ commentsBefore = node.getChild("commentsBefore")
+ isFirst = node.isFirstChild()
+ previous = node.getPreviousSibling(False, True)
+
+ if previous and previous.type in [ "case", "default" ]:
+ inCase = True
+ else:
+ inCase = False
+
+ inOperation = node.parent.type in [ "first", "second", "third" ] and node.parent.parent.type == "operation"
+
+ for child in commentsBefore.children:
+ docComment = child.get("detail") in [ "javadoc", "qtdoc" ]
+ headComment = child.get("detail") == "header"
+ areaComment = child.get("detail") == "area"
+ divComment = child.get("detail") == "divider"
+ blockComment = child.get("detail") == "block"
+ singleLineBlock = child.get("detail") != "inline" and child.get("multiline") == False
+
+ if not child.isFirstChild():
+ pass
+
+ elif inCase:
+ pass
+
+ elif singleLineBlock:
+ if child.get("begin"):
+ sep()
+ else:
+ space()
+
+ elif areaComment and not isFirst:
+ area()
+
+ elif divComment and not isFirst:
+ divide()
+
+ elif not isFirst:
+ sep()
+
+ elif inOperation:
+ sep()
+
+ elif not headComment:
+ line()
+
+ # reindenting first
+ text = child.get("text")
+
+ if child.get("detail") == "qtdoc":
+ text = comment.qt2javadoc(text)
+
+ write(comment.indent(text, INDENTSPACES * indent))
+
+ if singleLineBlock:
+ if child.get("detail") in [ "javadoc", "qtdoc" ]:
+ line()
+ elif child.get("end"):
+ sep()
+ else:
+ space()
+
+ # separator after divider/head comments and after block comments which are not for documentation
+ elif headComment or areaComment or divComment or blockComment:
+ sep()
+
+ else:
+ line()
+
+
+
+
+
+
+ #####################################################################################################################
+ # Opening...
+ #####################################################################################################################
+
+ #
+ # OPEN: FINALLY
+ ##################################
+
+ if node.type == "finally":
+ write("finally")
+
+
+ #
+ # OPEN: DELETE
+ ##################################
+
+ elif node.type == "delete":
+ write("delete")
+ space()
+
+
+ #
+ # OPEN: THROW
+ ##################################
+
+ elif node.type == "throw":
+ write("throw")
+ space()
+
+
+ #
+ # OPEN: NEW
+ ##################################
+
+ elif node.type == "instantiation":
+ write("new")
+ space()
+
+
+ #
+ # OPEN: RETURN
+ ##################################
+
+ elif node.type == "return":
+ write("return")
+
+ if node.hasChildren():
+ space()
+
+
+ #
+ # OPEN: DEFINITION LIST
+ ##################################
+
+ elif node.type == "definitionList":
+ write("var")
+ space()
+
+
+ #
+ # OPEN: BREAK
+ ##################################
+
+ elif node.type == "break":
+ write("break")
+
+ if node.get("label", False):
+ space()
+ write(node.get("label", False))
+
+
+ #
+ # OPEN: CONTINUE
+ ##################################
+
+ elif node.type == "continue":
+ write("continue")
+
+ if node.get("label", False):
+ space()
+ write(node.get("label", False))
+
+
+ #
+ # OPEN: FUNCTION
+ ##################################
+
+ elif node.type == "function":
+ write("function")
+
+ functionName = node.get("name", False)
+ if functionName != None:
+ space()
+ write(functionName)
+
+
+ #
+ # OPEN: IDENTIFIER
+ ##################################
+
+ elif node.type == "identifier":
+ name = node.get("name", False)
+ if name != None:
+ write(name)
+
+
+ #
+ # OPEN: DEFINITION
+ ##################################
+
+ elif node.type == "definition":
+ if node.parent.type != "definitionList":
+ write("var")
+ space()
+
+ write(node.get("identifier"))
+
+
+ #
+ # OPEN: CONSTANT
+ ##################################
+
+ elif node.type == "constant":
+ if node.get("constantType") == "string":
+ if node.get("detail") == "singlequotes":
+ write("'")
+ else:
+ write('"')
+
+ write(node.get("value"))
+
+ if node.get("detail") == "singlequotes":
+ write("'")
+ else:
+ write('"')
+
+ else:
+ write(node.get("value"))
+
+
+ #
+ # OPEN: COMMENT
+ ##################################
+
+ elif node.type == "comment":
+ if pretty:
+ # insert a space before and no newline in the case of after comments
+ if node.get("connection") == "after":
+ noline()
+ space()
+
+ write(node.get("text"))
+
+ # new line after inline comment (for example for syntactical reasons)
+ if node.get("detail") == "inline":
+ line()
+
+ else:
+ space()
+
+
+ #
+ # OPEN: RIGHT
+ ##################################
+
+ elif node.type == "right":
+ if node.parent.type == "accessor":
+ write(".")
+
+
+
+
+
+
+ #
+ # OPEN: ASSIGNMENT
+ ##################################
+
+ elif node.type == "assignment":
+ if node.parent.type == "definition":
+ oper = node.get("operator", False)
+
+ realNode = node.parent.parent
+
+ # be compact in for-loops
+ compact = realNode.hasParent() and realNode.parent.type in [ "first", "second", "third" ] and realNode.parent.parent.type == "loop" and realNode.parent.parent.get("loopType") == "FOR"
+ compileToken(oper, compact)
+
+
+
+
+
+ #
+ # OPEN: KEY
+ ##################################
+
+ elif node.type == "key":
+ if node.parent.type == "accessor":
+ write("[")
+
+
+ #
+ # OPEN: GROUP
+ ##################################
+
+ elif node.type == "group":
+ write("(")
+
+
+ #
+ # OPEN: VOID
+ ##################################
+
+ elif node.type == "void":
+ write("void")
+ write("(")
+
+
+ #
+ # OPEN: ARRAY
+ ##################################
+
+ elif node.type == "array":
+ write("[")
+
+ if node.hasChildren(True):
+ space(False)
+
+
+ #
+ # OPEN: PARAMS
+ ##################################
+
+ elif node.type == "params":
+ noline()
+ write("(")
+
+
+
+
+
+
+
+
+ #
+ # OPEN: CASE
+ ##################################
+
+ elif node.type == "case":
+ if pretty:
+ # force double new lines
+ if not node.isFirstChild() and not node.getPreviousSibling(True).type == "case":
+ sep()
+
+ minus()
+ line()
+
+ write("case")
+ space()
+
+
+ #
+ # OPEN: DEFAULT
+ ##################################
+
+ elif node.type == "default":
+ if pretty:
+ minus()
+
+ # force double new lines
+ if not node.getPreviousSibling(True).type == "case":
+ sep()
+
+ write("default")
+ write(":")
+
+ if pretty:
+ plus()
+ line()
+
+
+
+
+
+
+ #
+ # OPEN: TRY
+ ##################################
+
+ elif node.type == "switch":
+ # Additional new line before each switch/try
+ if not node.isFirstChild(True) and not node.getChild("commentsBefore", False):
+ prev = node.getPreviousSibling(False, True)
+
+ # No separation after case statements
+ if prev != None and prev.type in [ "case", "default" ]:
+ pass
+ else:
+ sep()
+
+ if node.get("switchType") == "catch":
+ write("try")
+ elif node.get("switchType") == "case":
+ write("switch")
+
+
+ #
+ # OPEN: CATCH
+ ##################################
+
+ elif node.type == "catch":
+ if pretty:
+ # If this statement block or the previous try were not complex, be not complex here, too
+ if not node.getChild("statement").getChild("block").isComplex() and not node.parent.getChild("statement").getChild("block").isComplex():
+ noline()
+ space()
+
+ write("catch")
+
+
+
+
+
+
+
+ #
+ # OPEN: MAP
+ ##################################
+
+ elif node.type == "map":
+ par = node.parent
+
+ if pretty:
+ postProcessMap(node)
+
+ if pretty:
+ # No break before return statement
+ if node.hasParent() and node.parent.type == "expression" and node.parent.parent.type == "return":
+ pass
+
+ elif node.isComplex():
+ line()
+
+ write("{")
+
+ if pretty:
+ if node.isComplex():
+ line()
+ plus()
+
+ elif node.hasChildren(True):
+ space()
+
+
+ #
+ # OPEN: KEYVALUE
+ ##################################
+
+ elif node.type == "keyvalue":
+ keyString = node.get("key")
+ keyQuote = node.get("quote", False)
+
+ if keyQuote != None:
+ # print "USE QUOTATION"
+ if keyQuote == "doublequotes":
+ keyString = '"' + keyString + '"'
+ else:
+ keyString = "'" + keyString + "'"
+
+ elif keyString in config.JSPROTECTED or not KEY.match(keyString):
+ print "Warning: Auto protect key: %s" % keyString
+ keyString = "\"" + keyString + "\""
+
+ if pretty and not node.isFirstChild(True) and not node.hasChild("commentsBefore") and node.getChild("value").isComplex():
+ sep()
+
+ write(keyString)
+ space(False)
+
+ # Fill with spaces
+ # Do this only if the parent is complex (many entries)
+ # But not if the value itself is complex
+ if pretty and node.parent.isComplex() and node.parent.get("alignValues"):
+ write(" " * (node.parent.get("maxKeyLength") - len(keyString)))
+
+ write(":")
+ space(False)
+
+
+
+
+
+
+
+ #
+ # OPEN: BLOCK
+ ##################################
+
+ elif node.type == "block":
+ if pretty:
+ if node.isComplex():
+ line()
+ else:
+ space()
+
+ write("{")
+
+ if pretty:
+ if node.hasChildren():
+ plus()
+ line()
+
+
+ #
+ # OPEN: LOOP
+ ##################################
+
+ elif node.type == "loop":
+ # Additional new line before each loop
+ if not node.isFirstChild(True) and not node.getChild("commentsBefore", False):
+ prev = node.getPreviousSibling(False, True)
+
+ # No separation after case statements
+ if prev != None and prev.type in [ "case", "default" ]:
+ pass
+ elif node.hasChild("elseStatement") or node.getChild("statement").hasBlockChildren():
+ sep()
+ else:
+ line()
+
+ loopType = node.get("loopType")
+
+ if loopType == "IF":
+ write("if")
+ space(False)
+
+ elif loopType == "WHILE":
+ write("while")
+ space(False)
+
+ elif loopType == "FOR":
+ write("for")
+ space(False)
+
+ elif loopType == "DO":
+ write("do")
+ space(False)
+
+ elif loopType == "WITH":
+ write("with")
+ space(False)
+
+ else:
+ print "Warning: Unknown loop type: %s" % loopType
+
+
+
+ #
+ # OPEN: ELSE
+ ##################################
+
+ elif node.type == "elseStatement":
+ if node.hasChild("commentsBefore"):
+ pass
+
+ elif pretty:
+ if not node.hasChild("block") and not node.hasChild("loop"):
+ pass
+
+ elif not node.isComplex():
+ noline()
+ space()
+
+ write("else")
+
+ # This is a elseStatement without a block around (a set of {})
+ if not node.hasChild("block"):
+ space()
+
+
+ #
+ # OPEN: EXPRESSION
+ ##################################
+
+ elif node.type == "expression":
+ if node.parent.type == "loop":
+ loopType = node.parent.get("loopType")
+
+ # only do-while loops
+ if loopType == "DO":
+ if pretty:
+ stmnt = node.parent.getChild("statement")
+ compact = stmnt.hasChild("block") and not stmnt.getChild("block").isComplex()
+
+ if compact:
+ noline()
+ space()
+
+ write("while")
+
+ if pretty:
+ space()
+
+ # open expression block of IF/WHILE/DO-WHILE/FOR statements
+ write("(")
+
+ elif node.parent.type == "catch":
+ # open expression block of CATCH statement
+ write("(")
+
+ elif node.parent.type == "switch" and node.parent.get("switchType") == "case":
+ # open expression block of SWITCH statement
+ write("(")
+
+
+ #
+ # OPEN: FIRST
+ ##################################
+
+ elif node.type == "first":
+ # for loop
+ if node.parent.type == "loop" and node.parent.get("loopType") == "FOR":
+ write("(")
+
+ # operation
+ elif node.parent.type == "operation":
+ # operation (var a = -1)
+ if node.parent.get("left", False) == True:
+ compileToken(node.parent.get("operator"), True)
+
+
+
+ #
+ # OPEN: SECOND
+ ##################################
+
+ elif node.type == "second":
+ # for loop
+ if node.parent.type == "loop" and node.parent.get("loopType") == "FOR":
+ if not node.parent.hasChild("first"):
+ write("(;")
+
+ # operation
+ elif node.parent.type == "operation":
+ if node.isComplex():
+ # (?: hook operation)
+ if node.parent.get("operator") == "HOOK":
+ sep()
+ else:
+ line()
+
+
+
+
+
+ #
+ # OPEN: THIRD
+ ##################################
+
+ elif node.type == "third":
+ # for loop
+ if node.parent.type == "loop" and node.parent.get("loopType") == "FOR":
+ if not node.parent.hasChild("second"):
+ if node.parent.hasChild("first"):
+ write(";")
+ space(False)
+ else:
+ write("(;;")
+
+ # operation
+ elif node.parent.type == "operation":
+ # (?: hook operation)
+ if node.parent.get("operator") == "HOOK":
+ if node.isComplex():
+ sep()
+
+
+ #
+ # OPEN: STATEMENT
+ ##################################
+
+ elif node.type == "statement":
+ # for loop
+ if node.parent.type == "loop" and node.parent.get("loopType") == "FOR":
+ if node.parent.get("forVariant") == "iter":
+ if not node.parent.hasChild("first") and not node.parent.hasChild("second") and not node.parent.hasChild("third"):
+ write("(;;");
+
+ elif not node.parent.hasChild("second") and not node.parent.hasChild("third"):
+ write(";")
+
+ write(")")
+
+ if not node.hasChild("block"):
+ space(False)
+
+
+
+
+
+
+
+
+
+
+
+
+
+ #####################################################################################################################
+ # Children content
+ #####################################################################################################################
+
+ if node.hasChildren():
+ for child in node.children:
+ if not node.type in [ "commentsBefore", "commentsAfter" ]:
+ compileNode(child)
+
+
+
+
+
+
+
+
+
+ #####################################################################################################################
+ # Closing node
+ #####################################################################################################################
+
+ #
+ # CLOSE: IDENTIFIER
+ ##################################
+
+ if node.type == "identifier":
+ if node.hasParent() and node.parent.type == "variable" and not node.isLastChild(True):
+ write(".")
+ elif node.hasParent() and node.parent.type == "label":
+ write(":")
+
+
+ #
+ # CLOSE: ACCESSOR
+ ##################################
+
+ elif node.type == "accessor":
+ if node.hasParent() and node.parent.type == "variable" and not node.isLastChild(True):
+ write(".")
+
+
+ #
+ # CLOSE: KEYVALUE
+ ##################################
+
+ elif node.type == "keyvalue":
+ if node.hasParent() and node.parent.type == "map" and not node.isLastChild(True):
+ noline()
+ write(",")
+
+ if pretty:
+ commentNode(node)
+
+ if node.getChild("value").isComplex():
+ sep()
+ elif node.parent.isComplex():
+ line()
+ else:
+ space()
+
+
+ #
+ # CLOSE: DEFINITION
+ ##################################
+
+ elif node.type == "definition":
+ if node.hasParent() and node.parent.type == "definitionList" and not node.isLastChild(True):
+ write(",")
+
+ if pretty:
+ commentNode(node)
+
+ if node.hasComplexChildren():
+ line()
+ else:
+ space()
+
+
+ #
+ # CLOSE: LEFT
+ ##################################
+
+ elif node.type == "left":
+ if node.hasParent() and node.parent.type == "assignment":
+ oper = node.parent.get("operator", False)
+
+ if node.parent.parent.type == "statementList":
+ realNode = node.parent.parent
+ else:
+ realNode = node.parent
+
+ # be compact in for-loops
+ compact = realNode.hasParent() and realNode.parent.type in [ "first", "second", "third" ] and realNode.parent.parent.type == "loop" and realNode.parent.parent.get("loopType") == "FOR"
+ compileToken(oper, compact)
+
+
+
+
+
+
+ #
+ # CLOSE: KEY
+ ##################################
+
+ elif node.type == "key":
+ if node.hasParent() and node.parent.type == "accessor":
+ write("]")
+
+
+ #
+ # CLOSE: GROUP
+ ##################################
+
+ elif node.type == "group":
+ if node.getChildrenLength(True) == 1:
+ noline()
+
+ write(")")
+
+
+ #
+ # CLOSE: VOID
+ ##################################
+
+ elif node.type == "void":
+ if node.getChildrenLength(True) == 1:
+ noline()
+
+ write(")")
+
+
+ #
+ # CLOSE: ARRAY
+ ##################################
+
+ elif node.type == "array":
+ if node.hasChildren(True):
+ space(False)
+
+ write("]")
+
+
+ #
+ # CLOSE: PARAMS
+ ##################################
+
+ elif node.type == "params":
+ write(")")
+
+
+ #
+ # CLOSE: MAP
+ ##################################
+
+ elif node.type == "map":
+ if pretty:
+ if node.isComplex():
+ line()
+ minus()
+
+ elif node.hasChildren(True):
+ space()
+
+ write("}")
+
+
+
+
+
+
+ #
+ # CLOSE: SWITCH
+ ##################################
+
+ elif node.type == "switch":
+ if node.get("switchType") == "case":
+ if pretty:
+ minus()
+ minus()
+ line()
+
+ write("}")
+
+ if pretty:
+ commentNode(node)
+ line()
+
+ # Force a additinal line feed after each switch/try
+ if pretty and not node.isLastChild():
+ sep()
+
+
+ #
+ # CLOSE: CASE
+ ##################################
+
+ elif node.type == "case":
+ write(":")
+
+ if pretty:
+ commentNode(node)
+ plus()
+ line()
+
+
+
+
+
+
+
+
+ #
+ # CLOSE: BLOCK
+ ##################################
+
+ elif node.type == "block":
+ if pretty and node.hasChildren():
+ minus()
+ line()
+
+ write("}")
+
+ if pretty:
+ commentNode(node)
+
+ if node.hasChildren():
+ # Newline afterwards
+ if node.parent.type == "body" and node.parent.parent.type == "function":
+
+ # But only when this isn't a function block inside a assignment
+ if node.parent.parent.parent.type in [ "right", "params" ]:
+ pass
+
+ elif node.parent.parent.parent.type == "value" and node.parent.parent.parent.parent.type == "keyvalue":
+ pass
+
+ else:
+ line()
+
+ else:
+ line()
+
+
+ #
+ # CLOSE: LOOP
+ ##################################
+
+ elif node.type == "loop":
+ if node.get("loopType") == "DO":
+ semicolon()
+
+ if pretty:
+ commentNode(node)
+
+ # Force a additinal line feed after each loop
+ if not node.isLastChild():
+ if node.hasChild("elseStatement"):
+ sep()
+ elif node.getChild("statement").hasBlockChildren():
+ sep()
+ else:
+ line()
+
+
+ #
+ # CLOSE: FUNCTION
+ ##################################
+
+ elif node.type == "function":
+ if pretty:
+ commentNode(node)
+
+ if not node.isLastChild() and node.hasParent() and node.parent.type in [ "block", "file" ]:
+ sep()
+
+
+ #
+ # CLOSE: EXPRESSION
+ ##################################
+
+ elif node.type == "expression":
+ if node.parent.type == "loop":
+ write(")")
+
+ # e.g. a if-construct without a block {}
+ if node.parent.getChild("statement").hasChild("block"):
+ pass
+
+ elif node.parent.type == "loop" and node.parent.get("loopType") == "DO":
+ pass
+
+ else:
+ space(False)
+
+ elif node.parent.type == "catch":
+ write(")")
+
+ elif node.parent.type == "switch" and node.parent.get("switchType") == "case":
+ write(")")
+
+ if pretty:
+ commentNode(node)
+ line()
+
+ write("{")
+
+ if pretty:
+ plus()
+ plus()
+
+
+ #
+ # CLOSE: FIRST
+ ##################################
+
+ elif node.type == "first":
+ # for loop
+ if node.parent.type == "loop" and node.parent.get("loopType") == "FOR":
+ if node.parent.get("forVariant") == "iter":
+ write(";")
+
+ if node.parent.hasChild("second"):
+ space(False)
+
+ # operation
+ elif node.parent.type == "operation" and node.parent.get("left", False) != True:
+ oper = node.parent.get("operator")
+
+ if node.parent.parent.type == "statementList":
+ realNode = node.parent.parent
+ else:
+ realNode = node.parent
+
+ compact = realNode.hasParent() and realNode.parent.type in [ "first", "second", "third" ] and realNode.parent.parent.type == "loop" and realNode.parent.parent.get("loopType") == "FOR"
+ compileToken(oper, compact)
+
+
+ #
+ # CLOSE: SECOND
+ ##################################
+
+ elif node.type == "second":
+ # for loop
+ if node.parent.type == "loop" and node.parent.get("loopType") == "FOR":
+ write(";")
+
+ if node.parent.hasChild("third"):
+ space(False)
+
+ # operation
+ elif node.parent.type == "operation":
+ # (?: hook operation)
+ if node.parent.get("operator") == "HOOK":
+ noline()
+ space(False)
+ write(":")
+ space(False)
+
+
+
+
+
+
+
+
+
+ #
+ # CLOSE: OTHER
+ ##################################
+
+ if node.hasParent() and not node.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+
+ # Add comma dividers between statements in these parents
+ if node.parent.type in [ "array", "params", "statementList" ]:
+ if not node.isLastChild(True):
+ write(",")
+
+ if pretty:
+ commentNode(node)
+
+ if node.isComplex():
+ line()
+ else:
+ space()
+
+ # Semicolon handling
+ elif node.type in [ "block", "assignment", "call", "operation", "definitionList", "return", "break", "continue", "delete", "accessor", "instantiation", "throw", "variable" ]:
+
+ # Default semicolon handling
+ if node.parent.type in [ "block", "file" ]:
+ semicolon()
+
+ if pretty:
+ commentNode(node)
+ line()
+
+ if node.isComplex() and not node.isLastChild():
+ sep()
+
+ # Special handling for switch statements
+ elif node.parent.type == "statement" and node.parent.parent.type == "switch" and node.parent.parent.get("switchType") == "case":
+ semicolon()
+
+ if pretty:
+ commentNode(node)
+ line()
+
+ if node.isComplex() and not node.isLastChild():
+ sep()
+
+ # Special handling for loops (e.g. if) without blocks {}
+ elif node.parent.type in [ "statement", "elseStatement" ] and not node.parent.hasChild("block") and node.parent.parent.type == "loop":
+ semicolon()
+
+ if pretty:
+ commentNode(node)
+ line()
+
+ if node.isComplex() and not node.isLastChild():
+ sep()
+
+
+ #
+ # CLOSE: OTHER
+ ##################################
+
+ if pretty:
+ # Rest of the after comments (not inserted previously)
+ commentNode(node)
+
+
+
+
+
+
+
+
+
+
+
+def main():
+ parser = optparse.OptionParser()
+
+ parser.add_option("-w", "--write", action="store_true", dest="write", default=False, help="Writes file to incoming fileName + EXTENSION.")
+ parser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", help="The EXTENSION to use", default=".compiled")
+ parser.add_option("-c", "--compress", action="store_true", dest="compress", help="Enable compression", default=False)
+ parser.add_option("--optimize-variables", action="store_true", dest="optimizeVariables", default=False, help="Optimize variables. Reducing size.")
+ parser.add_option("--encoding", dest="encoding", default="utf-8", metavar="ENCODING", help="Defines the encoding expected for input files.")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ print "Needs one or more arguments (files) to compile!"
+ sys.exit(1)
+
+ for fileName in args:
+ if options.write:
+ print "Compiling %s => %s%s" % (fileName, fileName, options.extension)
+ else:
+ print "Compiling %s => stdout" % fileName
+
+ restree = treegenerator.createSyntaxTree(tokenizer.parseFile(fileName, "", options.encoding))
+
+ if options.optimizeVariables:
+ variableoptimizer.search(restree, [], 0, "$")
+
+ compiledString = compile(restree, not options.compress)
+ if options.write:
+ filetool.save(fileName + options.extension, compiledString)
+
+ else:
+ try:
+ print compiledString
+
+ except UnicodeEncodeError:
+ print " * Could not encode result to ascii. Use '-w' instead."
+ sys.exit(1)
+
+
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.pyc
new file mode 100644
index 0000000000..371a29bc06
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/compiler.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.py
new file mode 100755
index 0000000000..5ac9b75500
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+
+import re
+
+
+
+
+#
+# FILE EXTENSIONS
+#
+
+JSEXT = ".js"
+PYEXT = ".py"
+XMLEXT = ".xml"
+TOKENEXT = ".txt"
+DIRIGNORE = [ ".svn", "CVS" ]
+
+
+
+
+#
+# QOOXDOO HEADER SUPPORT
+#
+
+QXHEAD = {
+ # TODO: Obsolete with 0.7
+ "defineClass" : re.compile('qx.OO.defineClass\(\s*["\']([\.a-zA-Z0-9_-]+)["\'](\s*\,\s*([\.a-zA-Z0-9_-]+))?', re.M),
+
+ # 0.7 API
+ "classDefine" : re.compile('qx.Clazz.define\(\s*["\']([\.a-zA-Z0-9_-]+)["\']?', re.M),
+ "superClass" : re.compile('extend\s*:\s*([\.a-zA-Z0-9_-]+)', re.M),
+
+ "id" : re.compile("#id\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "module" : re.compile("#module\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "require" : re.compile("#require\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "use" : re.compile("#use\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "after" : re.compile("#after\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "load" : re.compile("#load\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "optional" : re.compile("#optional\(\s*([\.a-zA-Z0-9_-]+?)\s*\)", re.M),
+ "resource" : re.compile("#resource\(\s*(.*?)\s*\)", re.M)
+}
+
+
+
+
+
+#
+# JAVASCRIPT SUPPORT
+#
+
+JSBUILTIN = [ "Object", "Array", "RegExp", "Math", "String", "Number", "Error" ]
+
+JSTOKENS = {
+ "." : "DOT",
+ "," : "COMMA",
+ ":" : "COLON",
+ "?" : "HOOK",
+ ";" : "SEMICOLON",
+ "!" : "NOT",
+ "~" : "BITNOT",
+ "\\" : "BACKSLASH",
+
+ "+" : "ADD",
+ "-" : "SUB",
+ "*" : "MUL",
+ "/" : "DIV",
+ "%" : "MOD",
+
+ "{" : "LC",
+ "}" : "RC",
+ "(" : "LP",
+ ")" : "RP",
+ "[" : "LB",
+ "]" : "RB",
+
+ "<" : "LT",
+ "<=" : "LE",
+ ">" : "GT",
+ ">=" : "GE",
+ "==" : "EQ",
+ "!=" : "NE",
+ "===" : "SHEQ",
+ "!==" : "SHNE",
+
+ "=" : "ASSIGN",
+
+ "+=" : "ASSIGN_ADD",
+ "-=" : "ASSIGN_SUB",
+ "*=" : "ASSIGN_MUL",
+ "/=" : "ASSIGN_DIV",
+ "%=" : "ASSIGN_MOD",
+
+ "|=" : "ASSIGN_BITOR",
+ "^=" : "ASSIGN_BITXOR",
+ "&=" : "ASSIGN_BITAND",
+ "<<=" : "ASSIGN_LSH",
+ ">>=" : "ASSIGN_RSH",
+ ">>>=" : "ASSIGN_URSH",
+
+ "&&" : "AND",
+ "||" : "OR",
+
+ "|" : "BITOR",
+ "^|" : "BITXOR",
+ "&" : "BITAND",
+
+ "^" : "POWEROF",
+
+ "<<" : "LSH",
+ ">>" : "RSH",
+ ">>>" : "URSH",
+
+ "++" : "INC",
+ "--" : "DEC",
+
+ "::" : "COLONCOLON",
+ ".." : "DOTDOT",
+
+ "@" : "XMLATTR",
+
+ "//" : "SINGLE_COMMENT",
+ "/*" : "COMMENT_START",
+ "*/" : "COMMENT_STOP",
+ "/*!" : "DOC_START"
+}
+
+JSPROTECTED = {
+ "null" : "NULL",
+ "Infinity" : "INFINITY",
+ "true" : "TRUE",
+ "false" : "FALSE",
+
+ "this" : "THIS",
+ "var" : "VAR",
+ "new" : "NEW",
+ "prototype" : "PROTOTYPE",
+ "return" : "RETURN",
+ "function" : "FUNCTION",
+
+ "while" : "WHILE",
+ "if" : "IF",
+ "else" : "ELSE",
+ "switch" : "SWITCH",
+ "case" : "CASE",
+ "default" : "DEFAULT",
+ "break" : "BREAK",
+ "continue" : "CONTINUE",
+ "goto" : "GOTO",
+ "do" : "DO",
+ "delete" : "DELETE",
+ "for" : "FOR",
+ "in" : "IN",
+ "with" : "WITH",
+ "try" : "TRY",
+ "catch" : "CATCH",
+ "finally" : "FINALLY",
+ "throw" : "THROW",
+ "instanceof" : "INSTANCEOF",
+ "typeof" : "TYPEOF",
+ "void" : "VOID",
+ "call" : "CALL",
+ "apply" : "APPLY"
+}
+
+JSSPACE_BEFORE = [ "INSTANCEOF", "IN" ]
+JSSPACE_AFTER = [ "VAR", "NEW", "GOTO", "INSTANCEOF", "TYPEOF", "DELETE", "IN", "THROW", "CASE" ]
+JSSPACE_AFTER_USAGE = [ "RETURN", "FUNCTION" ]
+JSPARANTHESIS_BEFORE = [ "ELSE", "FINALLY", "CATCH", "WHILE" ]
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.pyc
new file mode 100644
index 0000000000..44a5131104
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/config.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.py
new file mode 100755
index 0000000000..089b33b966
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+
+import os, codecs, cPickle, sys
+import textutil
+
+def save(filePath, content="", encoding="utf_8"):
+ # Normalize
+ filePath = normalize(filePath)
+
+ # Create directory
+ directory(os.path.dirname(filePath))
+
+ # Writing file
+ try:
+ outputFile = codecs.open(filePath, encoding=encoding, mode="w", errors="replace")
+ outputFile.write(content)
+ except IOError, (errno, strerror):
+ print " * I/O error(%s): %s" % (errno, strerror)
+ sys.exit(1)
+ except UnicodeDecodeError:
+ print " * Could not decode result to %s" % encoding
+ sys.exit(1)
+ except:
+ print " * Unexpected error:", sys.exc_info()[0]
+ sys.exit(1)
+
+ outputFile.flush()
+ outputFile.close()
+
+
+def directory(dirname):
+ # Normalize
+ dirname = normalize(dirname)
+
+ # Check/Create directory
+ if dirname != "" and not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+
+def normalize(filename):
+ return os.path.normcase(os.path.normpath(filename))
+
+
+def read(filePath, encoding="utf_8"):
+ try:
+ ref = codecs.open(filePath, encoding=encoding, mode="r")
+ content = ref.read()
+ ref.close()
+
+ return textutil.any2Unix(unicode(content))
+
+ except IOError, (errno, strerror):
+ print " * I/O error(%s): %s" % (errno, strerror)
+ sys.exit(1)
+
+ except ValueError:
+ print " * Invalid Encoding. Required encoding %s in %s" % (encoding, filePath)
+ sys.exit(1)
+
+ except:
+ print " * Unexpected error:", sys.exc_info()[0]
+ sys.exit(1)
+
+
+def storeCache(cachePath, data):
+ try:
+ cPickle.dump(data, open(cachePath, 'w'), 2)
+
+ except EOFError or PickleError or PicklingError:
+ print " * Could not store cache to %s" % cachePath
+ sys.exit(1)
+
+
+def readCache(cachePath):
+ try:
+ return cPickle.load(open(cachePath))
+
+ except EOFError or PickleError or UnpicklingError:
+ print " * Could not read cache from %s" % cachePath
+ sys.exit(1)
+
+
+def checkCache(filePath, cachePath, internalModTime):
+ fileModTime = os.stat(filePath).st_mtime
+
+ try:
+ cacheModTime = os.stat(cachePath).st_mtime
+ except OSError:
+ cacheModTime = 0
+
+ if internalModTime > cacheModTime:
+ # print "Invalid cache: %s" % filePath
+ # print "%s > %s" % (internalModTime, cacheModTime)
+ return True
+
+ return fileModTime > cacheModTime
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.pyc
new file mode 100644
index 0000000000..3d4a983e3c
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/filetool.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.py
new file mode 100755
index 0000000000..4a9209f3e0
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.py
@@ -0,0 +1,816 @@
+#!/usr/bin/env python
+
+import sys, string, re, os, random, cPickle, codecs
+import config, tokenizer, treegenerator, filetool, stringoptimizer
+
+internalModTime = 0
+
+
+def validateFiles():
+
+ global internalModTime
+
+ base = os.path.dirname(os.path.abspath(sys.argv[0]))
+ if base.endswith("modules"):
+ path = base
+ else:
+ path = os.path.join(base, "modules")
+
+ maxFileModTime = os.stat(os.path.join(path, ".." + os.path.sep + "generator.py")).st_mtime
+
+ for root, dirs, files in os.walk(path):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for files
+ for fileName in files:
+ if os.path.splitext(fileName)[1] != config.PYEXT:
+ continue
+
+ filePath = os.path.join(root, fileName)
+ fileModTime = os.stat(filePath).st_mtime
+
+ if fileModTime > maxFileModTime:
+ maxFileModTime = fileModTime
+
+
+ internalModTime = maxFileModTime
+
+
+
+def getInternalModTime(options):
+
+ global internalModTime
+
+ if internalModTime == 0 and not options.disableInternalCheck:
+ validateFiles()
+
+ return internalModTime
+
+
+
+def extractFileContentId(data):
+ for item in config.QXHEAD["id"].findall(data):
+ return item
+
+ for item in config.QXHEAD["classDefine"].findall(data):
+ return item
+
+ # TODO: Obsolete with 0.7
+ for item in config.QXHEAD["defineClass"].findall(data):
+ return item[0]
+
+ return None
+
+
+def extractSuperClass(data):
+ for item in config.QXHEAD["superClass"].findall(data):
+ return item
+
+ # TODO: Obsolete with 0.7
+ for item in config.QXHEAD["defineClass"].findall(data):
+ return item[2]
+
+ return None
+
+
+def extractLoadtimeDeps(data, fileId=""):
+ deps = []
+
+ # qooxdoo specific:
+ # store inheritance deps
+ superClass = extractSuperClass(data)
+ if superClass != None and superClass != "" and not superClass in config.JSBUILTIN:
+ deps.append("qx.OO")
+ deps.append(superClass)
+ elif "qx.OO.defineClass(" in data:
+ deps.append("qx.OO")
+
+
+ # Adding explicit requirements
+ for item in config.QXHEAD["require"].findall(data):
+ if item == fileId:
+ print " - Self-referring load dependency: %s" % item
+ elif item in deps:
+ print " - Double definition of load dependency: %s" % item
+ else:
+ deps.append(item)
+
+ return deps
+
+
+def extractAfterDeps(data, fileId=""):
+ deps = []
+
+ # Adding explicit after requirements
+ for item in config.QXHEAD["after"].findall(data):
+ if item == fileId:
+ print " - Self-referring load dependency: %s" % item
+ elif item in deps:
+ print " - Double definition of load dependency: %s" % item
+ else:
+ deps.append(item)
+
+ return deps
+
+
+def extractRuntimeDeps(data, fileId=""):
+ deps = []
+
+ # Adding explicit runtime requirements
+ for item in config.QXHEAD["use"].findall(data):
+ if item == fileId:
+ print " - Self-referring runtime dependency: %s" % item
+ elif item in deps:
+ print " - Double definition of runtime dependency: %s" % item
+ else:
+ deps.append(item)
+
+ return deps
+
+
+def extractLoadDeps(data, fileId=""):
+ deps = []
+
+ # Adding before requirements
+ for item in config.QXHEAD["load"].findall(data):
+ if item == fileId:
+ print " - Self-referring runtime dependency: %s" % item
+ elif item in deps:
+ print " - Double definition of runtime dependency: %s" % item
+ else:
+ deps.append(item)
+
+ return deps
+
+
+def extractOptional(data):
+ deps = []
+
+ # Adding explicit requirements
+ for item in config.QXHEAD["optional"].findall(data):
+ if not item in deps:
+ deps.append(item)
+
+ return deps
+
+
+def extractModules(data):
+ mods = []
+
+ for item in config.QXHEAD["module"].findall(data):
+ if not item in mods:
+ mods.append(item)
+
+ return mods
+
+
+def extractResources(data):
+ res = []
+
+ for item in config.QXHEAD["resource"].findall(data):
+ res.append(item)
+
+ return res
+
+
+
+
+
+
+def getTokens(fileDb, fileId, options):
+ if not fileDb[fileId].has_key("tokens"):
+ if options.verbose:
+ print " - Generating tokens for %s..." % fileId
+
+ useCache = False
+ loadCache = False
+
+ fileEntry = fileDb[fileId]
+
+ filePath = fileEntry["path"]
+ fileEncoding = fileEntry["encoding"]
+
+ if options.cacheDirectory != None:
+ cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl")
+ useCache = True
+
+ if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)):
+ loadCache = True
+
+ if loadCache:
+ tokens = filetool.readCache(cachePath)
+ else:
+ fileContent = filetool.read(filePath, fileEncoding)
+ tokens = tokenizer.parseStream(fileContent, fileId)
+
+ if useCache:
+ if options.verbose:
+ print " - Caching tokens for %s..." % fileId
+
+ filetool.storeCache(cachePath, tokens)
+
+ fileDb[fileId]["tokens"] = tokens
+
+ return fileDb[fileId]["tokens"]
+
+
+
+
+def getTree(fileDb, fileId, options):
+ if not fileDb[fileId].has_key("tree"):
+ if options.verbose:
+ print " - Generating tree for %s..." % fileId
+
+ useCache = False
+ loadCache = False
+
+ fileEntry = fileDb[fileId]
+ filePath = fileEntry["path"]
+
+ if options.cacheDirectory != None:
+ cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tree.pcl")
+ useCache = True
+
+ if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)):
+ loadCache = True
+
+ if loadCache:
+ tree = filetool.readCache(cachePath)
+ else:
+ tree = treegenerator.createSyntaxTree(getTokens(fileDb, fileId, options))
+
+ if useCache:
+ if options.verbose:
+ print " - Caching tree for %s..." % fileId
+
+ filetool.storeCache(cachePath, tree)
+
+ fileDb[fileId]["tree"] = tree
+
+ return fileDb[fileId]["tree"]
+
+
+
+
+
+def getStrings(fileDb, fileId, options):
+ if not fileDb[fileId].has_key("strings"):
+ if options.verbose:
+ print " - Searching for strings in %s..." % fileId
+
+ useCache = False
+ loadCache = False
+
+ fileEntry = fileDb[fileId]
+ filePath = fileEntry["path"]
+
+ if options.cacheDirectory != None:
+ cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-strings.pcl")
+ useCache = True
+
+ if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)):
+ loadCache = True
+
+ if loadCache:
+ strings = filetool.readCache(cachePath)
+ else:
+ strings = stringoptimizer.search(getTree(fileDb, fileId, options), options.verbose)
+
+ if useCache:
+ if options.verbose:
+ print " - Caching strings for %s..." % fileId
+
+ filetool.storeCache(cachePath, strings)
+
+ fileDb[fileId]["strings"] = strings
+
+ return fileDb[fileId]["strings"]
+
+
+
+
+
+def resolveAutoDeps(fileDb, options):
+ ######################################################################
+ # DETECTION OF AUTO DEPENDENCIES
+ ######################################################################
+
+ if options.verbose:
+ print " * Resolving dependencies..."
+ else:
+ print " * Resolving dependencies: ",
+
+ knownIds = []
+ depCounter = 0
+ hasMessage = False
+
+ for fileId in fileDb:
+ knownIds.append(fileId)
+
+ for fileId in fileDb:
+ fileEntry = fileDb[fileId]
+
+ if fileEntry["autoDeps"] == True:
+ continue
+
+ if not options.verbose:
+ sys.stdout.write(".")
+ sys.stdout.flush()
+
+ hasMessage = False
+
+ fileTokens = getTokens(fileDb, fileId, options)
+ fileDeps = []
+
+ assembledName = ""
+
+ for token in fileTokens:
+ if token["type"] == "name" or token["type"] == "builtin":
+ if assembledName == "":
+ assembledName = token["source"]
+ else:
+ assembledName += ".%s" % token["source"]
+
+ if assembledName in knownIds:
+ if assembledName != fileId and not assembledName in fileDeps:
+ fileDeps.append(assembledName)
+
+ assembledName = ""
+
+ elif not (token["type"] == "token" and token["source"] == "."):
+ if assembledName != "":
+ assembledName = ""
+
+ if token["type"] == "string" and token["source"] in knownIds and token["source"] != fileId and not token["source"] in fileDeps:
+ fileDeps.append(token["source"])
+
+
+ if options.verbose:
+ print " - Analysing %s..." % fileId
+
+ # Updating lists...
+ optionalDeps = fileEntry["optionalDeps"]
+ loadtimeDeps = fileEntry["loadtimeDeps"]
+ runtimeDeps = fileEntry["runtimeDeps"]
+
+ # Removing optional deps from list
+ for dep in optionalDeps:
+ if dep in fileDeps:
+ fileDeps.remove(dep)
+
+ if options.verbose:
+
+ # Checking loadtime dependencies
+ for dep in loadtimeDeps:
+ if not dep in fileDeps:
+ print " - Could not confirm #require(%s) in %s!" % (dep, fileId)
+
+ # Checking runtime dependencies
+ for dep in runtimeDeps:
+ if not dep in fileDeps:
+ print " - Could not confirm #use(%s) in %s!" % (dep, fileId)
+
+ # Adding new content to runtime dependencies
+ for dep in fileDeps:
+ if not dep in runtimeDeps and not dep in loadtimeDeps:
+ if options.verbose:
+ print " - Adding dependency: %s" % dep
+
+ runtimeDeps.append(dep)
+ depCounter += 1
+
+ # store flag to omit it the next run
+ fileEntry["autoDeps"] = True
+
+ if not hasMessage and not options.verbose:
+ print
+
+ print " * Added %s dependencies" % depCounter
+
+
+
+
+def storeEntryCache(fileDb, options):
+ print " * Storing file entries..."
+
+ cacheCounter = 0
+ ignoreDbEntries = [ "tokens", "tree", "path", "pathId", "encoding", "resourceInput", "resourceOutput", "sourceScriptPath", "listIndex", "scriptInput" ]
+
+ for fileId in fileDb:
+ fileEntry = fileDb[fileId]
+
+ if fileEntry["cached"] == True:
+ continue
+
+ # Store flag
+ fileEntry["cached"] = True
+
+ # Copy entries
+ fileEntryCopy = {}
+ for key in fileEntry:
+ if not key in ignoreDbEntries:
+ fileEntryCopy[key] = fileEntry[key]
+
+ filetool.storeCache(fileEntry["cachePath"], fileEntryCopy)
+ cacheCounter += 1
+
+ print " * Updated %s files" % cacheCounter
+
+
+
+
+def indexFile(filePath, filePathId, scriptInput, listIndex, scriptEncoding, sourceScriptPath, resourceInput, resourceOutput, options, fileDb={}, moduleDb={}):
+
+ ########################################
+ # Checking cache
+ ########################################
+
+ useCache = False
+ loadCache = False
+ cachePath = None
+
+ if options.cacheDirectory != None:
+ cachePath = os.path.join(filetool.normalize(options.cacheDirectory), filePathId + "-entry.pcl")
+ useCache = True
+
+ if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)):
+ loadCache = True
+
+
+
+ ########################################
+ # Loading file content / cache
+ ########################################
+
+ if loadCache:
+ fileEntry = filetool.readCache(cachePath)
+ fileId = filePathId
+
+ else:
+ fileContent = filetool.read(filePath, scriptEncoding)
+
+ # Extract ID
+ fileContentId = extractFileContentId(fileContent)
+
+ # Search for valid ID
+ if fileContentId == None:
+ print " - Could not extract ID from file: %s. Using fileName!" % filePath
+ fileId = filePathId
+
+ else:
+ fileId = fileContentId
+
+ if fileId != filePathId:
+ print " - ID mismatch: CONTENT=%s != PATH=%s" % (fileContentId, filePathId)
+ sys.exit(1)
+
+ fileEntry = {
+ "autoDeps" : False,
+ "cached" : False,
+ "cachePath" : cachePath,
+ "optionalDeps" : extractOptional(fileContent),
+ "loadtimeDeps" : extractLoadtimeDeps(fileContent, fileId),
+ "runtimeDeps" : extractRuntimeDeps(fileContent, fileId),
+ "afterDeps" : extractAfterDeps(fileContent, fileId),
+ "loadDeps" : extractLoadDeps(fileContent, fileId),
+ "resources" : extractResources(fileContent),
+ "modules" : extractModules(fileContent)
+ }
+
+
+
+ ########################################
+ # Additional data
+ ########################################
+
+ # We don't want to cache these items
+ fileEntry["path"] = filePath
+ fileEntry["pathId"] = filePathId
+ fileEntry["encoding"] = scriptEncoding
+ fileEntry["resourceInput"] = resourceInput
+ fileEntry["resourceOutput"] = resourceOutput
+ fileEntry["sourceScriptPath"] = sourceScriptPath
+ fileEntry["listIndex"] = listIndex
+ fileEntry["scriptInput"] = scriptInput
+
+
+ ########################################
+ # Registering file
+ ########################################
+
+ # Register to file database
+ fileDb[fileId] = fileEntry
+
+ # Register to module database
+ for moduleId in fileEntry["modules"]:
+ if moduleDb.has_key(moduleId):
+ moduleDb[moduleId].append(fileId)
+ else:
+ moduleDb[moduleId] = [ fileId ]
+
+
+
+
+
+def indexSingleScriptInput(scriptInput, listIndex, options, fileDb={}, moduleDb={}):
+ scriptInput = filetool.normalize(scriptInput)
+
+ # Search for other indexed lists
+ if len(options.scriptEncoding) > listIndex:
+ scriptEncoding = options.scriptEncoding[listIndex]
+ else:
+ scriptEncoding = "utf-8"
+
+ if len(options.sourceScriptPath) > listIndex:
+ sourceScriptPath = options.sourceScriptPath[listIndex]
+ else:
+ sourceScriptPath = None
+
+ if len(options.resourceInput) > listIndex:
+ resourceInput = options.resourceInput[listIndex]
+ else:
+ resourceInput = None
+
+ if len(options.resourceOutput) > listIndex:
+ resourceOutput = options.resourceOutput[listIndex]
+ else:
+ resourceOutput = None
+
+ for root, dirs, files in os.walk(scriptInput):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for files
+ for fileName in files:
+ if os.path.splitext(fileName)[1] == config.JSEXT:
+ filePath = os.path.join(root, fileName)
+ filePathId = filePath.replace(scriptInput + os.sep, "").replace(config.JSEXT, "").replace(os.sep, ".")
+
+ indexFile(filePath, filePathId, scriptInput, listIndex, scriptEncoding, sourceScriptPath, resourceInput, resourceOutput, options, fileDb, moduleDb)
+
+
+def indexScriptInput(options):
+ if options.cacheDirectory != None:
+ filetool.directory(options.cacheDirectory)
+
+ print " * Indexing files... "
+
+ fileDb = {}
+ moduleDb = {}
+ listIndex = 0
+
+ for scriptInput in options.scriptInput:
+ indexSingleScriptInput(scriptInput, listIndex, options, fileDb, moduleDb)
+ listIndex += 1
+
+ print " * %s files were found" % len(fileDb)
+
+ if options.enableAutoDependencies:
+ resolveAutoDeps(fileDb, options)
+
+ if options.cacheDirectory != None:
+ storeEntryCache(fileDb, options)
+
+ return fileDb, moduleDb
+
+
+
+
+
+"""
+Simple resolver, just try to add items and put missing stuff around
+the new one.
+"""
+def addIdWithDepsToSortedList(sortedList, fileDb, fileId):
+ if not fileDb.has_key(fileId):
+ print " * Error: Couldn't find required file: %s" % fileId
+ return False
+
+ # Test if already in
+ if not fileId in sortedList:
+
+ # Including loadtime dependencies
+ for loadtimeDepId in fileDb[fileId]["loadtimeDeps"]:
+ if loadtimeDepId == fileId: break;
+ addIdWithDepsToSortedList(sortedList, fileDb, loadtimeDepId)
+
+ # Including after dependencies
+ for afterDepId in fileDb[fileId]["afterDeps"]:
+ if afterDepId == fileId: break;
+ addIdWithDepsToSortedList(sortedList, fileDb, afterDepId)
+
+ # Add myself
+ if not fileId in sortedList:
+ sortedList.append(fileId)
+
+ # Include runtime dependencies
+ for runtimeDepId in fileDb[fileId]["runtimeDeps"]:
+ addIdWithDepsToSortedList(sortedList, fileDb, runtimeDepId)
+
+ # Include load dependencies
+ for loadDepId in fileDb[fileId]["loadDeps"]:
+ addIdWithDepsToSortedList(sortedList, fileDb, loadDepId)
+
+
+
+
+
+"""
+Search for dependencies, but don't add them. Just use them to put
+the new class after the stuff which is required (if it's included, too)
+"""
+def addIdWithoutDepsToSortedList(sortedList, fileDb, fileId):
+ if not fileDb.has_key(fileId):
+ print " * Error: Couldn't find required file: %s" % fileId
+ return False
+
+ # Test if already in
+ if not fileId in sortedList:
+
+ # Search sortedList for files which needs this one and are already included
+ lowestIndex = None
+ currentIndex = 0
+ for lowId in sortedList:
+ for lowDepId in getResursiveLoadDeps([], fileDb, lowId, lowId):
+ if lowDepId == fileId and (lowestIndex == None or currentIndex < lowestIndex):
+ lowestIndex = currentIndex
+
+ currentIndex += 1
+
+ # Insert at defined index or just append new entry
+ if lowestIndex != None:
+ sortedList.insert(lowestIndex, fileId)
+ else:
+ sortedList.append(fileId)
+
+
+
+
+def getResursiveLoadDeps(deps, fileDb, fileId, ignoreId=None):
+ if fileId in deps:
+ return
+
+ if fileId != ignoreId:
+ deps.append(fileId)
+
+ # Including loadtime dependencies
+ for loadtimeDepId in fileDb[fileId]["loadtimeDeps"]:
+ getResursiveLoadDeps(deps, fileDb, loadtimeDepId)
+
+ # Including after dependencies
+ for afterDepId in fileDb[fileId]["afterDeps"]:
+ getResursiveLoadDeps(deps, fileDb, afterDepId)
+
+ return deps
+
+
+
+
+
+def getSortedList(options, fileDb, moduleDb):
+ includeWithDeps = []
+ excludeWithDeps = []
+ includeWithoutDeps = []
+ excludeWithoutDeps = []
+
+ sortedIncludeList = []
+ sortedExcludeList = []
+
+
+
+ # INCLUDE
+
+ # Add Modules and Files (with deps)
+ if options.includeWithDeps:
+ for include in options.includeWithDeps:
+ if include in moduleDb:
+ includeWithDeps.extend(moduleDb[include])
+
+ elif "*" in include or "?" in include:
+ regstr = "^(" + include.replace('.', '\\.').replace('*', '.*').replace('?', '.?') + ")$"
+ regexp = re.compile(regstr)
+
+ for fileId in fileDb:
+ if regexp.search(fileId):
+ if not fileId in includeWithDeps:
+ includeWithDeps.append(fileId)
+
+ else:
+ if not include in includeWithDeps:
+ includeWithDeps.append(include)
+
+
+ # Add Modules and Files (without deps)
+ if options.includeWithoutDeps:
+ for include in options.includeWithoutDeps:
+ if include in moduleDb:
+ includeWithoutDeps.extend(moduleDb[include])
+
+ elif "*" in include or "?" in include:
+ regstr = "^(" + include.replace('.', '\\.').replace('*', '.*').replace('?', '.?') + ")$"
+ regexp = re.compile(regstr)
+
+ for fileId in fileDb:
+ if regexp.search(fileId):
+ if not fileId in includeWithoutDeps:
+ includeWithoutDeps.append(fileId)
+
+ else:
+ if not include in includeWithoutDeps:
+ includeWithoutDeps.append(include)
+
+
+
+
+
+
+ # Add all if both lists are empty
+ if len(includeWithDeps) == 0 and len(includeWithoutDeps) == 0:
+ for fileId in fileDb:
+ includeWithDeps.append(fileId)
+
+ # Sorting include (with deps)
+ for fileId in includeWithDeps:
+ addIdWithDepsToSortedList(sortedIncludeList, fileDb, fileId)
+
+ # Sorting include (without deps)
+ for fileId in includeWithoutDeps:
+ addIdWithoutDepsToSortedList(sortedIncludeList, fileDb, fileId)
+
+
+
+ # EXCLUDE
+
+ # Add Modules and Files (with deps)
+ if options.excludeWithDeps:
+ for exclude in options.excludeWithDeps:
+ if exclude in moduleDb:
+ excludeWithDeps.extend(moduleDb[exclude])
+
+ elif "*" in exclude or "?" in exclude:
+ regstr = "^(" + exclude.replace('.', '\\.').replace('*', '.*').replace('?', '.?') + ")$"
+ regexp = re.compile(regstr)
+
+ for fileId in fileDb:
+ if regexp.search(fileId):
+ if not fileId in excludeWithDeps:
+ excludeWithDeps.append(fileId)
+
+ else:
+ if not exclude in excludeWithDeps:
+ excludeWithDeps.append(exclude)
+
+
+ # Add Modules and Files (without deps)
+ if options.excludeWithoutDeps:
+ for exclude in options.excludeWithoutDeps:
+ if exclude in moduleDb:
+ excludeWithoutDeps.extend(moduleDb[exclude])
+
+ elif "*" in exclude or "?" in exclude:
+ regstr = "^(" + exclude.replace('.', '\\.').replace('*', '.*').replace('?', '.?') + ")$"
+ regexp = re.compile(regstr)
+
+ for fileId in fileDb:
+ if regexp.search(fileId):
+ if not fileId in excludeWithDeps:
+ excludeWithoutDeps.append(fileId)
+
+ else:
+ if not exclude in excludeWithDeps:
+ excludeWithoutDeps.append(exclude)
+
+
+
+
+
+ # Sorting exclude (with deps)
+ for fileId in excludeWithDeps:
+ addIdWithDepsToSortedList(sortedExcludeList, fileDb, fileId)
+
+ # Sorting exclude (without deps)
+ for fileId in excludeWithoutDeps:
+ addIdWithoutDepsToSortedList(sortedExcludeList, fileDb, fileId)
+
+
+
+
+ # MERGE
+
+ # Remove excluded files from included files list
+ for fileId in sortedExcludeList:
+ if fileId in sortedIncludeList:
+ sortedIncludeList.remove(fileId)
+
+
+
+ # RETURN
+
+ return sortedIncludeList
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.pyc
new file mode 100644
index 0000000000..51d49a8ad2
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/loader.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.py
new file mode 100755
index 0000000000..8d34450282
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+table = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+def convert(current):
+ # Possibilities with each character
+ # 1: 36 = 36
+ # 2: 36*36 = 1296
+ # 3: 36*36*36 = 46656
+
+ res = ""
+ length = len(table) - 1
+
+ if current / length > 0:
+ res += convert(current / length)
+
+ res += table[current % length]
+
+ return res
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.pyc
new file mode 100644
index 0000000000..93f461bc8e
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/mapper.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.py
new file mode 100755
index 0000000000..520a51fb44
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.py
@@ -0,0 +1,303 @@
+#!/usr/bin/env python
+
+import sys, re, os
+import config, filetool, treegenerator, tokenizer, compiler, textutil
+
+def entryCompiler(line):
+ # protect escaped equal symbols
+ line = line.replace("\=", "----EQUAL----")
+
+ splitLine = line.split("=")
+
+ if len(splitLine) != 2:
+ print " - Malformed entry: %s" % line
+ return
+
+ orig = splitLine[0].strip()
+ repl = splitLine[1].strip()
+
+ #print "%s :: %s" % (orig, value)
+
+ # recover protected equal symbols
+ orig = orig.replace("----EQUAL----", "=")
+ repl = repl.replace("----EQUAL----", "=")
+
+ return {"expr":re.compile(orig), "orig":orig, "repl":repl}
+
+
+
+
+def regtool(content, regs, patch, options):
+ for patchEntry in regs:
+ matches = patchEntry["expr"].findall(content)
+ itercontent = content
+ line = 1
+
+ for fragment in matches:
+ # Search for first match position
+ pos = itercontent.find(fragment)
+ pos = patchEntry["expr"].search(itercontent).start()
+
+ # Update current line
+ line += len((itercontent[:pos] + fragment).split("\n")) - 1
+
+ # Removing leading part til matching part
+ itercontent = itercontent[pos+len(fragment):]
+
+ # Debug
+ if options.verbose:
+ print " - Matches %s in %s" % (patchEntry["orig"], line)
+
+ # Replacing
+ if patch:
+ content = patchEntry["expr"].sub(patchEntry["repl"], content, 1)
+
+ else:
+ print " - line %s : (%s)" % (line, patchEntry["orig"])
+ print " %s" % patchEntry["repl"]
+
+ return content
+
+
+
+
+def getHtmlList(options):
+ htmlList = []
+
+ for htmlDir in options.migrationInput:
+ for root, dirs, files in os.walk(htmlDir):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for files
+ for fileName in files:
+ if os.path.splitext(fileName)[1] in [ ".js", ".html", ".htm", ".php", ".asp", ".jsp" ]:
+ htmlList.append(os.path.join(root, fileName))
+
+ return htmlList
+
+
+
+def handle(fileList, fileDb, options):
+ confPath = os.path.join(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "migration"), options.migrationTarget)
+
+ infoPath = os.path.join(confPath, "info")
+ patchPath = os.path.join(confPath, "patches")
+
+ importedModule = False
+ infoList = []
+ patchList = []
+ htmlList = getHtmlList(options)
+
+
+
+
+ print " * Number of script input files: %s" % len(fileList)
+ print " * Number of HTML input files: %s" % len(htmlList)
+ print " * Update to version: %s" % options.migrationTarget
+
+
+
+ print " * Searching for patch module..."
+
+ for root, dirs, files in os.walk(confPath):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for files
+ for fileName in files:
+ filePath = os.path.join(root, fileName)
+
+ if os.path.splitext(fileName)[1] != config.PYEXT:
+ continue
+
+ if fileName == "patch.py":
+ print " - Importing..."
+
+ if not root in sys.path:
+ sys.path.insert(0, root)
+
+ import patch
+ importedModule = True
+
+
+
+
+
+
+
+ emptyLine = re.compile("^\s*$")
+
+
+
+ print " * Searching for info expression data..."
+
+ for root, dirs, files in os.walk(infoPath):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for files
+ for fileName in files:
+ filePath = os.path.join(root, fileName)
+
+ fileContent = textutil.any2Unix(filetool.read(filePath, "utf-8"))
+ infoList.append({"path":filePath, "content":fileContent.split("\n")})
+
+ if options.verbose:
+ print " - %s" % filePath
+
+ print " - Number of info files: %s" % len(infoList)
+
+ print " - Compiling expressions..."
+
+ compiledInfos = []
+
+ for infoFile in infoList:
+ print " - %s" % os.path.basename(infoFile["path"])
+ for line in infoFile["content"]:
+ if emptyLine.match(line) or line.startswith("#") or line.startswith("//"):
+ continue
+
+ compiled = entryCompiler(line)
+ if compiled != None:
+ compiledInfos.append(compiled)
+
+ print " - Number of infos: %s" % len(compiledInfos)
+
+
+
+
+ print " * Searching for patch expression data..."
+
+ for root, dirs, files in os.walk(patchPath):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for files
+ for fileName in files:
+ filePath = os.path.join(root, fileName)
+
+ fileContent = textutil.any2Unix(filetool.read(filePath, "utf-8"))
+ patchList.append({"path":filePath, "content":fileContent.split("\n")})
+
+ if options.verbose:
+ print " - %s" % filePath
+
+ print " - Number of patch files: %s" % len(patchList)
+
+ print " - Compiling expressions..."
+
+ compiledPatches = []
+
+ for patchFile in patchList:
+ print " - %s" % os.path.basename(patchFile["path"])
+ for line in patchFile["content"]:
+ if emptyLine.match(line) or line.startswith("#") or line.startswith("//"):
+ continue
+
+ compiled = entryCompiler(line)
+ if compiled != None:
+ compiledPatches.append(compiled)
+
+ print " - Number of patches: %s" % len(compiledPatches)
+
+
+
+
+
+
+
+
+ print
+ print " FILE PROCESSING:"
+ print "----------------------------------------------------------------------------"
+
+ if len(fileList) > 0:
+ print " * Processing script files:"
+
+ for fileId in fileList:
+ fileEntry = fileDb[fileId]
+
+ filePath = fileEntry["path"]
+ fileEncoding = fileEntry["encoding"]
+
+ print " - %s" % fileId
+
+ # Read in original content
+ fileContent = filetool.read(filePath, fileEncoding)
+ patchedContent = fileContent
+
+ # Apply patches
+ if importedModule:
+ tree = treegenerator.createSyntaxTree(tokenizer.parseStream(patchedContent))
+
+ # If there were any changes, compile the result
+ if patch.patch(fileId, tree):
+ patchedContent = compiler.compile(tree, True)
+
+ patchedContent = regtool(patchedContent, compiledPatches, True, options)
+ patchedContent = regtool(patchedContent, compiledInfos, False, options)
+
+ # Write file
+ if patchedContent != fileContent:
+ print " - Store modifications..."
+ filetool.save(filePath, patchedContent, fileEncoding)
+
+ print " * Done"
+
+
+
+ if len(htmlList) > 0:
+ print " * Processing HTML files:"
+
+ for filePath in htmlList:
+ print " - %s" % filePath
+
+ # Read in original content
+ fileContent = filetool.read(filePath)
+
+ patchedContent = fileContent
+ patchedContent = regtool(patchedContent, compiledPatches, True, options)
+ patchedContent = regtool(patchedContent, compiledInfos, False, options)
+
+ # Write file
+ if patchedContent != fileContent:
+ print " - Store modifications..."
+ filetool.save(filePath, patchedContent)
+
+ print " * Done"
+
+
+
+
+
+
+
+
+
+
+
+######################################################################
+# MAIN LOOP
+######################################################################
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.pyc
new file mode 100644
index 0000000000..78885cb200
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/migrator.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.py
new file mode 100755
index 0000000000..f4348b2d26
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.py
@@ -0,0 +1,438 @@
+#!/usr/bin/env python
+
+import tree, mapper
+
+qooxdooStart = [
+ # Properties
+ "_modify", "_check", "_unitDetection",
+
+ # Layout
+ "_applyRuntime",
+
+ # Cache Properties
+ "_resetRuntime", "_compute", "_change", "_invalidate", "_recompute",
+
+ # Property Methods
+ "set", "get", "force", "reset"
+]
+
+qooxdooNames = [
+ # Demos
+ "ROOT_LOGGER", "removeAllAppenders", "addAppender", "DivAppender",
+
+ # qx.ui.core.Widget.initApplyMethods
+ "_style",
+ "setStyleProperty", "removeStyleProperty",
+ "constant", "Core",
+
+ # Inheritance
+ "qx", "Proto", "Class"
+]
+
+systemNames = [
+ # Statement
+ "break","catch","continue","do","for","in","finally","function","if","else","return","switch","case","default",
+ "this","throw","try","var","while","with",
+
+ # Operator
+ "delete","false","instanceof","new","null","true","typeof","void",
+
+ # Function
+ "GetObject","ScriptEngine","ScriptEngineBuildVersion","ScriptEngineMajorVersion","ScriptEngineMinorVersion",
+
+ # Objects
+ "ActiveXObject","Arguments","Array","Boolean","Date","Dictionary","Enumerator","Error","FileSystemObject",
+ "Function","Global","Image","Math","Number","Object","RegExp","String","VBArray",
+
+ # Property
+ "$1","$2","$3","$4","$5",
+ "$6","$7","$8","$9","arguments","arity","callee","caller","constructor","description","E","global","ignoreCase",
+ "index","Infinity","input","lastIndex","leftContext","length","LN2","LN10","LOG2E","LOG10E","MAX_VALUE",
+ "MIN_VALUE","message","multiline","name","NaN","NEGATIVE_INFINITY","number","PI","POSITIVE_INFINITY",
+ "prototype","rightContext","source","SQRT1_2","SQRT2","undefined",
+
+ # Methods
+ "abs","acos","anchor","apply","asin",
+ "atan","atan2","atEnd","big","blink","bold","call","ceil","charAt","charCodeAt","compile","concat","cos",
+ "decodeURI","decodeURIComponent","dimensions","encodeURI","encodeURIComponent","escape","eval","exec",
+ "exp","fixed","floor","fontcolor","fontsize","fromCharCode","getDate","getDay","getFullYear","getHours",
+ "getItem","getMilliseconds","getMinutes","getMonth","getSeconds","getTime","getTimezoneOffset","getUTCDate",
+ "getUTCDay","getUTCFullYear","getUTCHours","getUTCMilliseconds","getUTCMinutes","getUTCMonth","getUTCSeconds",
+ "getVarDate","getYear","hasOwnProperty","indexOf","isFinite","isNaN","isPrototypeOf","italics","item","join",
+ "lastIndexOf","lastMatch","lastParen","lbound","link","localeCompare","log","match","max","min","moveFirst",
+ "moveNext","parse","parseFloat","parseInt","pop","pow","propertyIsEnumerable","push","random","replace",
+ "reverse","round","search","setDate","setFullYear","setHours","setMilliseconds","setMinutes","setMonth",
+ "setSeconds","setTime","setUTCDate","setUTCFullYear","setUTCHours","setUTCMilliseconds","setUTCMinutes",
+ "setUTCMonth","setUTCSeconds","setYear","shift","sin","slice","small","sort","splice","split","sqrt","strike",
+ "sub","substr","substring","sup","tan","test","toArray","toDateString","toExponential","toFixed","toGMTString",
+ "toLocaleDateString","toLocaleLowerCase","toLocaleString","toLocaleTimeString","toLocaleUpperCase","toLowerCase",
+ "toPrecision","toString","toTimeString","toUpperCase","toUTCString","ubound","unescape","unshift","unwatch","UTC",
+ "valueOf","watch",
+
+ # HTML Entity
+ "nbsp","lt","gt","amp","apos","quot","cent","pound","yen","sect","copy","reg","times","divide",
+
+ # DHTML Constant
+ "ATTRIBUTE_NODE","CDATA_SECTION_NODE","COMMENT_NODE","DOCUMENT_FRAGMENT_NODE","DOCUMENT_NODE","DOCUMENT_TYPE_NODE",
+ "ELEMENT_NODE","ENTITY_NODE","ENTITY_REFERENCE_NODE","NOTATION_NODE","PROCESSING_INSTRUCTION_NODE","TEXT_NODE",
+ "NOTATION_FRAGMENT_NODE","DOMSTRING_SIZE_ERR","HIERARCHY_REQUEST_ERR","INDEX_SIZE_ERR","INUSE_ATTRIBUTE_ERR",
+ "INVALID_ACCESS_ERR","INVALID_CHARACTER_ERR","INVALID_MODIFICATION_ERR","INVALID_STATE_ERR","NAMESPACE_ERR",
+ "NOT_FOUND_ERR","NOT_SUPPORTED_ERR","NO_DATA_ALLOWED_ERR","NO_MODIFICATION_ALLOWED_ERR","SYNTAX_ERR",
+ "WRONG_DOCUMENT_ERR","ABORT","BLUR","CLICK","CHANGE","DBLCLICK","DRAGDROP","ERROR","FOCUS","KEYDOWN","KEYPRESS",
+ "KEYUP","LOAD","MOUSEDOWN","MOUSEMOVE","MOUSEOUT","MOUSEOVER","MOUSEUP","MOVE","RESET","RESIZE","SELECT","SUBMIT",
+ "UNLOAD","NODE_BEFORE","NODE_AFTER","NODE_BEFORE_AND_AFTER","NODE_INSIDE","START_TO_START","START_TO_END",
+ "END_TO_END","END_TO_START","BAD_BOUNDARYPOINTS_ERR","INVALID_NODE_TYPE_ERR","UNKNOWN_RULE","STYLE_RULE",
+ "CHARSET_RULE","IMPORT_RULE","MEDIA_RULE","FONT_FACE_RULE","PAGE_RULE","CSS_UNKNOWN","CSS_NUMBER","CSS_PERCENTAGE",
+ "CSS_EMS","CSS_EXS","CSS_PX","CSS_CM","CSS_MM","CSS_IN","CSS_PT","CSS_PC","CSS_DEG","CSS_RAD","CSS_GRAD","CSS_MS",
+ "CSS_S","CSS_HZ","CSS_KHZ","CSS_DIMENSION","CSS_STRING","CSS_URI","CSS_IDENT","CSS_ATTR","CSS_COUNTER","CSS_RECT",
+ "CSS_RGBCOLOR","CSS_INHERIT","CSS_PRIMITIVE_VALUE","CSS_VALUE_LIST","CSS_CUSTOM","STATE_MAXIMIZED","STATE_MINIMIZED",
+ "STATE_NORMAL","DOCUMENT_POSITION_DISCONNECTED","DOCUMENT_POSITION_PRECEDING","DOCUMENT_POSITION_FOLLOWING",
+ "DOCUMENT_POSITION_CONTAINS","DOCUMENT_POSITION_CONTAINED_BY","DOCUMENT_POSITION_IMPLEMENTATION_SPECIFIC",
+ "CAPTURING_PHASE","AT_TARGET","BUBBLING_PHASE","MOUSEDOWN","MOUSEUP","MOUSEOVER","MOUSEOUT","MOUSEMOVE",
+ "MOUSEDRAG","CLICK","DBLCLICK","KEYDOWN","KEYUP","KEYPRESS","DRAGDROP","FOCUS","BLUR","SELECT","CHANGE",
+ "RESET","SUBMIT","SCROLL","LOAD","UNLOAD","XFER_DONE","ABORT","ERROR","LOCATE","MOVE","RESIZE","FORWARD",
+ "HELP","BACK","TEXT","ALT_MASK","CONTROL_MASK","SHIFT_MASK","META_MASK","SCROLL_PAGE_UP","SCROLL_PAGE_DOWN",
+ "DOM_VK_CANCEL","DOM_VK_HELP","DOM_VK_BACK_SPACE","DOM_VK_TAB","DOM_VK_CLEAR","DOM_VK_RETURN","DOM_VK_ENTER",
+ "DOM_VK_SHIFT","DOM_VK_CONTROL","DOM_VK_ALT","DOM_VK_PAUSE","DOM_VK_CAPS_LOCK","DOM_VK_ESCAPE","DOM_VK_SPACE",
+ "DOM_VK_PAGE_UP","DOM_VK_PAGE_DOWN","DOM_VK_END","DOM_VK_HOME","DOM_VK_LEFT","DOM_VK_UP","DOM_VK_RIGHT","DOM_VK_DOWN",
+ "DOM_VK_PRINTSCREEN","DOM_VK_INSERT","DOM_VK_DELETE","DOM_VK_0","DOM_VK_1","DOM_VK_2","DOM_VK_3","DOM_VK_4","DOM_VK_5",
+ "DOM_VK_6","DOM_VK_7","DOM_VK_8","DOM_VK_9","DOM_VK_SEMICOLON","DOM_VK_EQUALS","DOM_VK_A","DOM_VK_B","DOM_VK_C",
+ "DOM_VK_D","DOM_VK_E","DOM_VK_F","DOM_VK_G","DOM_VK_H","DOM_VK_I","DOM_VK_J","DOM_VK_K","DOM_VK_L","DOM_VK_M",
+ "DOM_VK_N","DOM_VK_O","DOM_VK_P","DOM_VK_Q","DOM_VK_R","DOM_VK_S","DOM_VK_T","DOM_VK_U","DOM_VK_V","DOM_VK_W",
+ "DOM_VK_X","DOM_VK_Y","DOM_VK_Z","DOM_VK_CONTEXT_MENU","DOM_VK_NUMPAD0","DOM_VK_NUMPAD1","DOM_VK_NUMPAD2",
+ "DOM_VK_NUMPAD3","DOM_VK_NUMPAD4","DOM_VK_NUMPAD5","DOM_VK_NUMPAD6","DOM_VK_NUMPAD7","DOM_VK_NUMPAD8","DOM_VK_NUMPAD9",
+ "DOM_VK_MULTIPLY","DOM_VK_ADD","DOM_VK_SEPARATOR","DOM_VK_SUBTRACT","DOM_VK_DECIMAL","DOM_VK_DIVIDE","DOM_VK_F1",
+ "DOM_VK_F2","DOM_VK_F3","DOM_VK_F4","DOM_VK_F5","DOM_VK_F6","DOM_VK_F7","DOM_VK_F8","DOM_VK_F9","DOM_VK_F10",
+ "DOM_VK_F11","DOM_VK_F12","DOM_VK_F13","DOM_VK_F14","DOM_VK_F15","DOM_VK_F16","DOM_VK_F17","DOM_VK_F18","DOM_VK_F19",
+ "DOM_VK_F20","DOM_VK_F21","DOM_VK_F22","DOM_VK_F23","DOM_VK_F24","DOM_VK_NUM_LOCK","DOM_VK_SCROLL_LOCK","DOM_VK_COMMA",
+ "DOM_VK_PERIOD","DOM_VK_SLASH","DOM_VK_BACK_QUOTE","DOM_VK_OPEN_BRACKET","DOM_VK_BACK_SLASH","DOM_VK_CLOSE_BRACKET",
+ "DOM_VK_QUOTE","DOM_VK_META","MODIFICATION","ADDITION","REMOVAL","INVALID_EXPRESSION_ERR","TYPE_ERR","ANY_TYPE",
+ "NUMBER_TYPE","STRING_TYPE","BOOLEAN_TYPE","UNORDERED_NODE_ITERATOR_TYPE","ORDERED_NODE_ITERATOR_TYPE",
+ "UNORDERED_NODE_SNAPSHOT_TYPE","ORDERED_NODE_SNAPSHOT_TYPE","ANY_UNORDERED_NODE_TYPE","FIRST_ORDERED_NODE_TYPE",
+ "UNSPECIFIED_EVENT_TYPE_ERR",
+
+ # DHTML Object
+ "a","AbstractView","acronym","address","applet","area","Attr","attribute","b",
+ "BarProp","base","baseFont","bdo","BeforeUnloadEvent","bgSound","big","blockQuote","body","br","button",
+ "CanvasGradient","CanvasPattern","CanvasRenderingContext2D","caption","CDATASection","center","CharacterData",
+ "ChromeWindow","cite","clientInformation","clipboardData","code","col","colGroup","comment","Comment","Counter",
+ "CSS2Properties","CSSCharsetRule","CSSFontFaceRule","CSSImportRule","CSSMediaRule","CSSPageRule","CSSPrimitiveValue",
+ "CSSRGBColor","CSSRule","CSSRuleList","CSSStyleDeclaration","CSSStyleRule","CSSStyleSheet","CSSUnknownRule",
+ "CSSValue","CSSValueList","currentStyle","custom","dataTransfer","dd","defaults","del","dfn","dir","div","dl",
+ "document","Document","DocumentCSS","DocumentEvent","DocumentFragment","DocumentRange","DocumentStyle",
+ "DocumentType","DocumentView","DOMException","DOMImplementation","DOMImplementationCSS","DOMParser",
+ "DOMStringList","dt","Element","ElementCSSInlineStyle","em","embed","Entity","EntityReference","event",
+ "Event","EventException","EventListener","EventTarget","external","fieldSet","font","form","frame","frameSet",
+ "h","head","history","History","hr","html","HTMLAnchorElement","HTMLAppletElement","HTMLAreaElement",
+ "HTMLBRElement","HTMLBaseElement","HTMLBaseFontElement","HTMLBodyElement","HTMLButtonElement","HTMLCanvasElement",
+ "HTMLCollection","HTMLDListElement","HTMLDirectoryElement","HTMLDivElement","HTMLDocument","HTMLDOMImplementation",
+ "HTMLElement","HTMLEmbedElement","HTMLFieldSetElement","HTMLFontElement","HTMLFormElement","HTMLFrameElement",
+ "HTMLFrameSetElement","HTMLHRElement","HTMLHeadElement","HTMLHeadingElement","HTMLHtmlElement","HTMLIFrameElement",
+ "HTMLImageElement","HTMLInputElement","HTMLIsIndexElement","HTMLLIElement","HTMLLabelElement","HTMLLegendElement",
+ "HTMLLinkElement","HTMLMapElement","HTMLMenuElement","HTMLMetaElement","HTMLModElement","HTMLOListElement",
+ "HTMLObjectElement","HTMLOptGroupElement","HTMLOptionElement","HTMLOptionsCollection","HTMLParagraphElement",
+ "HTMLParamElement","HTMLPreElement","HTMLQuoteElement","HTMLScriptElement","HTMLSelectElement","HTMLStyleElement",
+ "HTMLTableCaptionElement","HTMLTableCellElement","HTMLTableColElement","HTMLTableElement","HTMLTableRowElement",
+ "HTMLTableSectionElement","HTMLTextAreaElement","HTMLTitleElement","HTMLUListElement","i","iframe","ImageDocument",
+ "img","implementation","IMPORT","input","ins","isIndex","kbd","KeyboardEvent","KeyEvent","label","legend","li",
+ "link","LinkStyle","listing","location","Location","map","marquee","MediaList","menu","meta","MimeType",
+ "MimeTypeArray","MouseEvent","MutationEvent","NamedNodeMap","NameList","namespace","navigator","Navigator",
+ "nextID","noBR","Node","NodeList","noFrames","noScript","Notation","NSDocument","NSEvent","NSHTMLAnchorElement",
+ "NSHTMLAreaElement","NSHTMLButtonElement","NSHTMLDocument","NSHTMLElement","NSHTMLFormElement","NSHTMLFrameElement",
+ "NSHTMLHRElement","NSHTMLImageElement","NSHTMLInputElement","NSHTMLOptionElement","NSHTMLSelectElement",
+ "NSHTMLTextAreaElement","NSRange","NSUIEvent","object","ol","optGroup","option","p","page","PageTransitionEvent",
+ "param","plainText","Plugin","PluginArray","popup","PopupBlockedEvent","pre","ProcessingInstruction","q","Range",
+ "RangeException","Rect","RGBColor","rt","ruby","rule","runtimeStyle","s","samp","SchemaLoader","screen","Screen",
+ "script","select","selection","Selection","small","SmartCardEvent","span","strike","strong","style","styleSheet",
+ "StyleSheet","StyleSheetList","sub","sup","Supports","table","TableSectionElement","tBody","td","Text","textArea",
+ "TextNode","TextRange","TextRectangle","tFoot","th","tHead","title","tr","TreeWalker","tt","u","UIEvent","ul",
+ "userProfile","URI","var","ViewCSS","wbr","WebBrowser","WebNavigation","window","Window","Window2","WindowCollection",
+ "WindowInternal","xml","XMLDocument","XMLHttpRequest","XMLSerializer","xmp","XPathEvaluator","XPathException",
+ "XPathExpression","XPathNSResolver","XPathResult","XPointerResult","XSLTProcessor",
+
+ # DHTML Property
+ "_content","abbr","accelerator",
+ "accept","acceptCharset","accessKey","action","activeElement","additive","align","aLink","alinkColor","allowTransparency",
+ "alt","altHTML","altKey","altLeft","anchorNode","anchorOffset","appCodeName","APPLICATION","appMinorVersion","appName",
+ "appVersion","archive","async","ATOMICSELECTION","attrChange","attrName","autocomplete","availHeight","availLeft",
+ "availTop","availWidth","azimuth","axis","background","backgroundAttachment","backgroundColor","backgroundImage",
+ "backgroundPosition","backgroundPositionX","backgroundPositionY","backgroundRepeat","balance","Banner",
+ "BannerAbstract","BaseHref","baseURI","behavior","bgColor","BGCOLOR","bgProperties","blockDirection","blue",
+ "booleanValue","border","borderBottom","borderBottomColor","borderBottomStyle","borderBottomWidth","borderCollapse",
+ "borderColor","borderColorDark","borderColorLight","borderLeft","borderLeftColor","borderLeftStyle","borderLeftWidth",
+ "borderRight","borderRightColor","borderRightStyle","borderRightWidth","borderSpacing","borderStyle","borderTop",
+ "borderTopColor","borderTopStyle","borderTopWidth","borderWidth","borderWidths","bottom","bottomMargin","boundingHeight",
+ "boundingLeft","boundingTop","boundingWidth","browserDOMWindow","browserLanguage","bubbles","bufferDepth","button",
+ "cancelable","cancelBubble","canHaveChildren","canHaveHTML","canvas","caption","captionSide","cellIndex","cellPadding",
+ "cellSpacing","ch","channel","charCode","charset","checked","characterSet","chOff","cite","classid","className",
+ "clear","clientHeight","clientLeft","clientTop","clientWidth","clientX","clientY","clip","clipBottom","clipLeft",
+ "clipRight","clipTop","cloneContents","closed","code","codeBase","codeType","collapsed","color","colorDepth","cols",
+ "colSpan","columnNumber","commonAncestorContainer","compact","compatMode","complete","content","contentDocument",
+ "contentEditable","contentOverflow","contentType","contentWindow","cookie","cookieEnabled","coords","Count",
+ "counterIncrement","counterReset","cpuClass","crypto","cssFloat","cssRules","cssText","cssValueType","ctrlKey",
+ "ctrlLeft","cue","cueAfter","cueBefore","current","currentNode","currentTarget","cursor","data","dataFld","DATAFLD",
+ "dataFormatAs","DATAFORMATAS","dataPageSize","dataSrc","DATASRC","dateTime","declare","defaultCharset",
+ "defaultChecked","defaultSelected","defaultStatus","defaultValue","defaultView","defer","description","designMode",
+ "detail","deviceXDPI","deviceYDPI","dialogArguments","dialogHeight","dialogLeft","dialogTop","dialogWidth","dir",
+ "direction","directories","disabled","display","displays","doctype","document","documentElement","documentURI",
+ "domain","domConfig","dropEffect","dynsrc","effectAllowed","elevation","emptyCells","enabledPlugin","encoding",
+ "enctype","endContainer","endOffset","entities","event","eventPhase","expandEntityReferences","expando",
+ "explicitOriginalTarget","face","fgColor","FieldDelim","fileCreatedDate","fileModifiedDate","filename","fileSize",
+ "fileUpdatedDate","fillStyle","filter","firstChild","focusNode","focusOffset","font","fontFamily","fontSize",
+ "fontSizeAdjust","fontSmoothingEnabled","fontStretch","fontStyle","fontVariant","fontWeight","form","formName",
+ "frame","frameBorder","frameElement","frameSpacing","fromElement","fullScreen","galleryImg","globalAlpha",
+ "globalCompositeOperation","green","hash","hasLayout","headers","height","hidden","hideFocus","history","host",
+ "hostname","href","hreflang","hspace","htmlFor","htmlText","httpEquiv","id","identifier","imageIsOverflowing",
+ "imageIsResized","imageRequest","imageResizingEnabled","imeMode","implementation","indeterminate","index","inner",
+ "innerHeight","innerHTML","innerText","innerWidth","inputEncoding","internalSubset","invalidIteratorState","isChar",
+ "isCollapsed","isContentEditable","isDisabled","isMap","isMultiLine","isOpen","isTextEdit","isTrusted","keyCode",
+ "label","lang","language","lastChild","lastModified","layerX","layerY","layoutFlow","layoutGrid","layoutGridChar",
+ "layoutGridLine","layoutGridMode","layoutGridType","left","leftMargin","length","letterSpacing","lineBreak","lineCap",
+ "lineHeight","lineJoin","lineNumber","lineWidth","link","linkColor","listStyle","listStyleImage","listStylePosition",
+ "listStyleType","localName","location","locationbar","logicalXDPI","logicalYDPI","longDesc","loop","loop","lowsrc",
+ "lowSrc","margin","marginBottom","marginHeight","marginLeft","marginRight","margins","marginTop","marginWidth",
+ "markerOffset","marks","maxHeight","maxLength","maxWidth","media","mediaText","menuArguments","menubar","message",
+ "metaKey","method","Methods","minHeight","minWidth","miterLimit","MozAppearance","MozBackgroundClip",
+ "MozBackgroundInlinePolicy","MozBackgroundOrigin","MozBinding","MozBorderBottomColors","MozBorderLeftColors",
+ "MozBorderRadius","MozBorderRadiusBottomleft","MozBorderRadiusBottomright","MozBorderRadiusTopleft",
+ "MozBorderRadiusTopright","MozBorderRightColors","MozBorderTopColors","MozBoxAlign","MozBoxDirection","MozBoxFlex",
+ "MozBoxOrdinalGroup","MozBoxOrient","MozBoxPack","MozBoxSizing","MozColumnCount","MozColumnGap","MozColumnWidth",
+ "MozFloatEdge","MozForceBrokenImageIcon","MozImageRegion","MozMarginEnd","MozMarginStart","MozOpacity","MozOutline",
+ "MozOutlineColor","MozOutlineOffset","MozOutlineRadius","MozOutlineRadiusBottomleft","MozOutlineRadiusBottomright",
+ "MozOutlineRadiusTopleft","MozOutlineRadiusTopright","MozOutlineStyle","MozOutlineWidth","MozPaddingEnd",
+ "MozPaddingStart","MozUserFocus","MozUserInput","MozUserModify","MozUserSelect","multipart","multiple","name",
+ "nameProp","namespaceURI","naturalHeight","naturalWidth","navigator","newValue","next","nextPage","nextSibling",
+ "nodeName","nodeType","nodeValue","noHref","noResize","noShade","notationName","notations","noWrap","numberValue",
+ "object","offscreenBuffering","offsetHeight","offsetLeft","offsetParent","offsetTop","offsetWidth","offsetX",
+ "offsetY","onBefore","onLine","opacity","opener","originalTarget","orphans","oscpu","outerHeight","outerHTML",
+ "outerText","outerWidth","outline","outlineColor","outlineOffset","outlineStyle","outlineWidth","overflow","overflowX",
+ "overflowY","ownerDocument","ownerElement","ownerNode","ownerRule","owningElement","padding","paddingBottom",
+ "paddingLeft","paddingRight","paddings","paddingTop","page","pageBreakAfter","pageBreakBefore","pageBreakInside",
+ "pageX","pageXOffset","pageY","pageYOffset","palette","parent","parentElement","parentNode","parentRule",
+ "parentStyleSheet","parentTextEdit","parentWindow","pathname","pause","pauseAfter","pauseBefore","persisted",
+ "personalbar","pitch","pitchRange","pixelBottom","pixelDepth","pixelHeight","pixelLeft","pixelRight","pixelTop",
+ "pixelWidth","pkcs11","platform","playDuring","pluginspage","popupWindowFeatures","popupWindowURI","port","posBottom",
+ "posHeight","position","posLeft","posRight","posTop","posWidth","preferredStylesheetSet","prefix","previous",
+ "previousSibling","prevValue","primitiveType","product","productSub","profile","prompt","prompter","propertyName",
+ "protocol","pseudoClass","publicId","qualifier","quotes","rangeCount","rangeOffset","rangeParent","readOnly",
+ "readyState","reason","recordNumber","recordset","red","referrer","rel","relatedNode","relatedTarget","repeat",
+ "requestingWindowURI","responseText","responseXML","result","resultType","returnValue","rev","richness","right",
+ "rightMargin","root","rowIndex","rows","rowSpan","rubyAlign","rubyOverhang","rubyPosition","rules","saveType",
+ "scheme","scope","scopeName","screen","screenLeft","screenTop","screenX","screenY","scroll","scrollAmount",
+ "scrollbar3dLightColor","scrollbarArrowColor","scrollbarBaseColor","scrollbarDarkShadowColor","scrollbarFaceColor",
+ "scrollbarHighlightColor","scrollbars","scrollbarShadowColor","scrollbarTrackColor","scrollDelay","scrollHeight",
+ "scrolling","scrollLeft","scrollMaxX","scrollMaxY","scrollTop","scrollX","scrollY","scrollWidth","search",
+ "sectionRowIndex","SECURITY","securityPolicy","selected","selectedIndex","selectionEnd","selectionStart","selector",
+ "selectorText","self","separator","shape","sheet","shadowBlur","shadowColor","shadowOffsetX","shadowOffsetY",
+ "shiftKey","shiftLeft","sidebar","singleNodeValue","size","snapshotLength","sourceIndex","span","speak","speakHeader",
+ "speakNumeral","speakPunctuation","specified","speechRate","src","srcElement","srcFilter","srcUrn","standby","start",
+ "startContainer","startOffset","status","statusbar","statusText","stress","strictErrorChecking","stringValue",
+ "strokeStyle","style","STYLE","styleFloat","styleSheet","suffixes","summary","systemId","systemLanguage","tabIndex",
+ "tableLayout","tabStop","tagName","tagUrn","target","text","textAlign","textAlignLast","textAutospace","textContent",
+ "textDecoration","textDecorationBlink","textDecorationLineThrough","textDecorationNone","textDecorationOverline",
+ "textDecorationUnderline","textIndent","textJustify","textKashidaSpace","textLength","textOverflow","textShadow",
+ "textTransform","textUnderlinePosition","textZoom","tFoot","tHead","timeStamp","title","tmpRealOriginalTarget",
+ "toElement","tokenName","toolbar","top","topMargin","trueSpeed","type","typeDetail","unicodeBidi","uniqueID","units",
+ "unselectable","UNSELECTABLE","updateInterval","URL","URLUnencoded","urn","useMap","userAgent","userLanguage","vAlign",
+ "value","valueType","vcard_name","vendor","vendorSub","version","verticalAlign","view","viewInheritStyle","viewLink",
+ "viewMasterTab","visibility","visible","vLink","vlinkColor","voiceFamily","volume","vspace","whatToShow","wheelDelta",
+ "which","whiteSpace","widows","width","window","windowRoot","windowState","wordBreak","wordSpacing","wordWrap","wrap",
+ "writingMode","x","XMLDocument","xmlEncoding","XMLNS","xmlStandalone","xmlVersion","XSLDocument","y","zIndex","zoom",
+
+ # DHTML Method
+ "abort","add","addBehavior","addBinding","addColorStop","addEventListener","AddChannel","AddDesktopComponent",
+ "addElement","AddFavorite","addImport","addPageRule","addRange","addReadRequest","addRule","adoptNode","alert",
+ "appendChild","appendData","appendMedium","applyElement","arc","arcTo","assign","atob","attachEvent",
+ "AutoCompleteSaveForm","AutoScan","back","beginPath","bezierCurveTo","blur","btoa","captureEvents","ChooseColorDlg",
+ "clear","clearAttributes","clearData","clearInterval","clearParameters","clearRect","clearRequest","clearTimeout",
+ "click","clip","cloneNode","cloneRange","close","closePath","collapse","collapseToEnd","collapseToStart",
+ "compareBoundaryPoints","compareDocumentPosition","compareEndPoints","compareNode","comparePoint","componentFromPoint",
+ "confirm","contains","containsNode","containsNS","createAttribute","createAttributeNS","createCaption",
+ "createCDATASection","createComment","createContextualFragment","createControlRange","createCSSStyleSheet",
+ "createDocument","createDocumentFragment","createDocumentType","createElement","createElementNS",
+ "createEntityReference","createEvent","createEventObject","createExpression","createLinearGradient",
+ "createHTMLDocument","createNodeIterator","createNSResolver","createPattern","createPopup",
+ "createProcessingInstruction","createRadialGradient","createRange","createRangeCollection","createStyleSheet",
+ "createTextNode","createTextRange","createTFoot","createTHead","createTreeWalker","deleteCaption","deleteCell",
+ "deleteContents","deleteData","deleteFromDocument","deleteMedium","deleteRow","deleteRule","deleteTFoot",
+ "deleteTHead","detach","detachEvent","disableExternalCapture","dispatchEvent","doImport","doReadRequest",
+ "doScroll","dragDrop","drawImage","dump","duplicate","elementFromPoint","enableExternalCapture","empty",
+ "escape","evaluate","evaluateFIXptr","evaluateWithContext","evaluateXPointer","execCommand","execCommandShowHelp",
+ "execScript","expand","extend","extractContents","fill","fillRect","find","findText","fireEvent","firstPage",
+ "focus","forward","getAdjacentText","getAllResponseHeaders","getAnonymousElementByAttribute","getAnonymousNodes",
+ "getAttention","getAttentionWithCycleCount","getAttribute","getAttributeNode","getAttributeNodeNS","getAttributeNS",
+ "getBindingParent","getBookmark","getBoundingClientRect","getBoxObjectFor","getCharset","getClientRects",
+ "getComputedStyle","getContext","getCounterValue","getData","getElementById","getElementsByName","getElementsByTagName",
+ "getElementsByTagNameNS","getExpression","getFeature","getFloatValue","getName","getNamedItem","getNamedItemNS",
+ "getNamespaceURI","getOverrideStyle","getParameter","getPreventDefault","getPropertyCSSValue","getPropertyPriority",
+ "getPropertyValue","getRangeAt","getRectValue","getResponseHeader","getRGBColorValue","getSelection","getStringValue",
+ "getSVGDocument","getUserData","go","hasAttribute","hasAttributeNS","hasAttributes","hasChildNodes","hasFeature",
+ "hasFocus","hide","home","ImportExportFavorites","importNode","importStylesheet","Init","initEvent","initKeyEvent",
+ "initMouseEvent","initMutationEvent","initPageTransitionEvent","initPopupBlockedEvent","initUIEvent","inRange",
+ "insertNode","insertAdjacentElement","insertAdjacentHTML","insertAdjacentText","insertBefore","insertCell","insertData",
+ "insertRow","insertRule","intersectsNode","isDefaultNamespace","isEqual","isEqualNode","isPointInRange","isSameNode",
+ "IsSubscribed","isSupported","item","Item","iterateNext","javaEnabled","lastPage","lineTo","load","loadAsync",
+ "loadBindingDocument","loadOverlay","lookupNamespaceURI","lookupPrefix","maximize","mergeAttributes","minimize",
+ "move","moveBy","moveEnd","moveRow","moveStart","moveTo","moveToBookmark","moveToElementText","moveToPoint",
+ "namedItem","namedRecordset","navigate","NavigateAndFind","nextNode","nextPage","normalize","normalizeDocument",
+ "nSDetach","open","openDialog","openRequest","overrideMimeType","parentElement","parseFromBuffer","parseFromStream",
+ "parseFromString","pasteHTML","preference","preventBubble","preventCapture","preventDefault","previousNode",
+ "previousPage","print","processSchemaElement","prompt","quadraticCurveTo","queryCommandEnabled","queryCommandIndeterm",
+ "queryCommandState","queryCommandSupported","queryCommandText","queryCommandValue","recalc","rect","refresh",
+ "releaseCapture","releaseEvents","reload","remove","removeAllRanges","removeAttribute","removeAttributeNode",
+ "removeAttributeNS","removeBehavior","removeBinding","removeChild","removeEventListener","removeExpression",
+ "removeNamedItem","removeNamedItemNS","removeNode","removeParameter","removeProperty","removeRange","removeRule",
+ "renameNode","replace","replaceAdjacentText","replaceChild","replaceData","replaceNode","reset","resizeBy",
+ "resizeTo","restore","restoreImage","restoreImageTo","rotate","routeEvent","save","scale","scroll","scrollBy",
+ "scrollByLines","scrollByPages","scrollIntoView","scrollTo","select","selectAllChildren","selectionLanguageChange",
+ "selectNode","selectNodeContents","send","serializeToStream","serializeToString","setActive","setAttribute",
+ "setAttributeNode","setAttributeNodeNS","setAttributeNS","setBoxObjectFor","setCapture","setCursor","setData",
+ "setEnd","setEndAfter","setEndBefore","setEndPoint","setExpression","setFloatValue","setInterval","setNamedItem",
+ "setNamedItemNS","setParameter","setProperty","setPropertyPriority","setRequestHeader","setResizable",
+ "setSelectionRange","setStart","setStartAfter","setStartBefore","setStringValue","setTimeout","setUserData",
+ "show","ShowBrowserUI","showHelp","showModalDialog","showModelessDialog","shrinkToFit","sizeToContent",
+ "snapshotItem","splitText","start","stop","stopPropagation","stroke","strokeRect","submit","substringData",
+ "supports","surroundContents","swapNode","tags","taintEnabled","toggleImageSize","transformToDocument",
+ "transformToFragment","translate","unescape","updateCommands","urns","write","writeln",
+
+ # DHTML Event
+ "onabort","onactivate",
+ "onafterprint","onafterupdate","onbeforeactivate","onbeforecopy","onbeforecut","onbeforedeactivate","onbeforeeditfocus",
+ "onbeforepaste","onbeforeprint","onbeforeunload","onbeforeupdate","onblur","onbounce","oncellchange","onchange",
+ "onclick","onclose","oncontextmenu","oncontrolselect","oncopy","oncut","ondataavailable","ondatasetchanged",
+ "ondatasetcomplete","ondblclick","ondeactivate","ondrag","ondragdrop","ondragend","ondragenter","ondragleave",
+ "ondragover","ondragstart","ondrop","onerror","onerrorupdate","onfilterchange","onfinish","onfocus","onfocusin",
+ "onfocusout","onhelp","onkeydown","onkeypress","onkeyup","onlayoutcomplete","onload","onlosecapture","onmousedown",
+ "onmouseenter","onmouseleave","onmousemove","onmouseout","onmouseover","onmouseup","onmousewheel","onmove","onmoveend",
+ "onmovestart","onpaint","onpaste","onprogress","onpropertychange","onreadystatechange","onreset","onresize",
+ "onresizeend","onresizestart","onrowenter","onrowexit","onrowsdelete","onrowsinserted","onscroll","onselect",
+ "onselectionchange","onselectstart","onstart","onstop","onsubmit","onunload",
+
+ # DHTML Collection
+ "all","anchors","applets","areas",
+ "attributes","behaviorUrns","blockFormats","bookmarks","boundElements","cells","childNodes","children","classes",
+ "Components","controllers","controlRange","elements","embeds","filters","fonts","forms","frames","ids","images",
+ "imports","interfaces","layers","links","mimeTypes","namespaces","options","pages","plugins","rows","rules","scripts",
+ "styleSheets","tBodies","TextRange","TextRectangle",
+
+ # IE Default Behavior
+ "anchorClick","anim","clientCaps","download","homePage","httpFolder",
+ "mediaBar","saveFavorite","saveHistory","saveSnapshot","userData",
+
+ # IE Default Behavior Object
+ "MediaItem","PlaylistInfo",
+
+ # IE Default Behavior Property
+ "attributeCount",
+ "availHeight","availWidth","bufferDepth","colorDepth","connectionType","cookieEnabled","cpuClass","currentItem",
+ "disabledUI","duration","enabled","expires","folder","hasNextItem","height","image","javaEnabled","name","nextItem",
+ "openState","platform","playlistInfo","playState","sound","sourceURL","statics","systemLanguage","target",
+ "userLanguage","width","XMLDocument",
+
+ # IE Default Behavior Method
+ "addComponentRequest","addDABehavior","clearComponentRequest","compareVersions",
+ "doComponentRequest","getAttribute","getAttributeName","getComponentVersion","getItemInfo","isComponentInstalled",
+ "isHomePage","load","navigate","navigateFrame","navigateHomePage","playNext","playURL","removeAttribute",
+ "removeDABehavior","save","setAttribute","setHomePage","startDownload","stop",
+
+ # IE Default Behavior Event
+ "onhide","onload","onopenstatechange",
+ "onplaystatechange","onsave","onshow",
+
+ # XMLHTTP Object Property
+ "onreadystatechange","readyState","responseBody","responseStream","responseText",
+ "responseXML","status","statusText",
+
+ # XMLHTTP Object Method
+ "abort","getAllResponseHeaders","getResponseHeader","open","send",
+ "setRequestHeader"
+]
+
+
+def search(node, names):
+
+ if node.type == "assignment":
+ left = node.getChild("left", False)
+
+ if left:
+ variable = left.getChild("variable", False)
+
+ if variable:
+ last = variable.getLastChild()
+ first = variable.getFirstChild()
+
+ if last == first:
+ if last.type == "identifier":
+ pass
+
+ elif last.type == "identifier":
+ name = last.get("name")
+
+ ignore = False
+
+ if name in systemNames or name in qooxdooNames:
+ ignore = True
+
+ if not ignore:
+ for item in qooxdooStart:
+ if name.startswith(item):
+ ignore = True
+
+ # only apply to names which starts with an underscore
+ if not name.startswith("_"):
+ ignore = True
+
+ if not ignore:
+ if not names.has_key(name):
+ # print "Add %s" % name
+
+ names[name] = 1
+ else:
+ names[name] += 1
+
+ if node.hasChildren():
+ for child in node.children:
+ search(child, names)
+
+ return names
+
+
+
+
+def update(node, list, prefix):
+ counter = 0
+
+ if node.type == "identifier":
+ idenName = node.get("name", False)
+
+ if idenName != None and idenName in list:
+ replName = "%s%s" % (prefix, mapper.convert(list.index(idenName)))
+ node.set("name", replName)
+ counter += 1
+
+ # print " - Replaced '%s' with '%s'" % (idenName, replName)
+
+ if node.hasChildren():
+ for child in node.children:
+ counter += update(child, list, prefix)
+
+ return counter
+
+
+
+def sort(names):
+ temp = []
+
+ for name in names:
+ temp.append({ "name" : name, "number" : names[name] })
+
+ temp.sort(lambda x, y: y["number"]-x["number"])
+
+ list = []
+
+ for item in temp:
+ list.append(item["name"])
+
+ print " * Found %s names" % len(list)
+
+ return list
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.pyc
new file mode 100644
index 0000000000..5de4c4c0c9
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/obfuscator.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.py
new file mode 100755
index 0000000000..487277eb29
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+from optparse import *
+
+class ExtendAction(Option):
+ ACTIONS = Option.ACTIONS + ("extend",)
+ STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
+ TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
+
+ def take_action(self, action, dest, opt, value, values, parser):
+ if action == "extend":
+ lvalue = value.split(",")
+ values.ensure_value(dest, []).extend(lvalue)
+ else:
+ Option.take_action(
+ self, action, dest, opt, value, values, parser)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.pyc
new file mode 100644
index 0000000000..7f63b3058c
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/optparseext.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.py
new file mode 100755
index 0000000000..ce345af26d
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+import os, shutil
+import config
+
+
+def copy(options, sortedIncludeList, fileDb):
+ print " * Preparing configuration..."
+
+ overrideList = []
+
+ for overrideEntry in options.overrideResourceOutput:
+ # Parse
+ # fileId.resourceId:destinationDirectory
+ targetSplit = overrideEntry.split(":")
+ targetStart = targetSplit.pop(0)
+ targetStartSplit = targetStart.split(".")
+
+ # Store
+ overrideData = {}
+ overrideData["destinationDirectory"] = ":".join(targetSplit)
+ overrideData["resourceId"] = targetStartSplit.pop()
+ overrideData["fileId"] = ".".join(targetStartSplit)
+
+ # Append
+ overrideList.append(overrideData)
+
+ print " * Syncing..."
+
+ for fileId in sortedIncludeList:
+ filePath = fileDb[fileId]["path"]
+ fileResources = fileDb[fileId]["resources"]
+
+ if len(fileResources) > 0:
+ print " - Found %i resources in %s" % (len(fileResources), fileId)
+
+ for fileResource in fileResources:
+ fileResourceSplit = fileResource.split(":")
+
+ resourceId = fileResourceSplit.pop(0)
+ relativeDirectory = ":".join(fileResourceSplit)
+
+ sourceDirectory = os.path.join(fileDb[fileId]["resourceInput"], relativeDirectory)
+ destinationDirectory = os.path.join(fileDb[fileId]["resourceOutput"], relativeDirectory)
+
+ # Searching for overrides
+ for overrideData in overrideList:
+ if overrideData["fileId"] == fileId and overrideData["resourceId"] == resourceId:
+ destinationDirectory = overrideData["destinationDirectory"]
+
+ print " - Copy %s => %s" % (sourceDirectory, destinationDirectory)
+
+ try:
+ os.listdir(sourceDirectory)
+ except OSError:
+ print " - Source directory isn't readable! Ignore resource!"
+ continue
+
+ for root, dirs, files in os.walk(sourceDirectory):
+
+ # Filter ignored directories
+ for ignoredDir in config.DIRIGNORE:
+ if ignoredDir in dirs:
+ dirs.remove(ignoredDir)
+
+ # Searching for items (resource files)
+ for itemName in files:
+
+ # Generate absolute source file path
+ itemSourcePath = os.path.join(root, itemName)
+
+ # Extract relative path and directory
+ itemRelPath = itemSourcePath.replace(sourceDirectory + os.sep, "")
+ itemRelDir = os.path.dirname(itemRelPath)
+
+ # Generate destination directory and file path
+ itemDestDir = os.path.join(destinationDirectory, itemRelDir)
+ itemDestPath = os.path.join(itemDestDir, itemName)
+
+ # Check/Create destination directory
+ if not os.path.exists(itemDestDir):
+ os.makedirs(itemDestDir)
+
+ # Copy file
+ if options.verbose:
+ print " - Copying: %s => %s" % (itemSourcePath, itemDestPath)
+
+ shutil.copyfile(itemSourcePath, itemDestPath) \ No newline at end of file
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.pyc
new file mode 100644
index 0000000000..4c08b8e634
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/resources.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.py
new file mode 100755
index 0000000000..1e17e53567
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+import sys, re, os, optparse
+import filetool
+
+
+
+
+def generate(options):
+ if len(options.defineRuntimeSetting) == 0:
+ return ""
+
+ typeFloat = re.compile("^([0-9\-]+\.[0-9]+)$")
+ typeNumber = re.compile("^([0-9\-])$")
+
+ settingsStr = ""
+
+ settingsStr += 'if(!window.qx)qx={};'
+
+ if options.addNewLines:
+ settingsStr += "\n"
+
+ settingsStr += 'if(!qx.Settings)qx.Settings={};'
+
+ if options.addNewLines:
+ settingsStr += "\n"
+
+ settingsStr += 'if(!qx.Settings._customSettings)qx.Settings._customSettings={};'
+
+ if options.addNewLines:
+ settingsStr += "\n"
+
+ for setting in options.defineRuntimeSetting:
+ settingSplit = setting.split(":")
+ settingKey = settingSplit.pop(0)
+ settingValue = ":".join(settingSplit)
+
+ settingKeySplit = settingKey.split(".")
+ settingKeyName = settingKeySplit.pop()
+ settingKeySpace = ".".join(settingKeySplit)
+
+ checkStr = 'if(!qx.Settings._customSettings["%s"])qx.Settings._customSettings["%s"]={};' % (settingKeySpace, settingKeySpace)
+ if not checkStr in settingsStr:
+ settingsStr += checkStr
+
+ if options.addNewLines:
+ settingsStr += "\n"
+
+ settingsStr += 'qx.Settings._customSettings["%s"]["%s"]=' % (settingKeySpace, settingKeyName)
+
+ if settingValue == "false" or settingValue == "true" or typeFloat.match(settingValue) or typeNumber.match(settingValue):
+ settingsStr += '%s' % settingValue
+
+ else:
+ settingsStr += '"%s"' % settingValue.replace("\"", "\\\"")
+
+ settingsStr += ";"
+
+ if options.addNewLines:
+ settingsStr += "\n"
+
+ return settingsStr
+
+
+
+
+def main():
+ parser = optparse.OptionParser()
+
+ parser.add_option("-d", "--define-runtime-setting", action="append", dest="defineRuntimeSetting", metavar="NAMESPACE.KEY:VALUE", default=[], help="Define a setting.")
+ parser.add_option("-s", "--settings-script-file", dest="settingsScriptFile", metavar="FILENAME", help="Name of settings script file.")
+ parser.add_option("-n", "--add-new-lines", action="store_true", dest="addNewLines", default=False, help="Keep newlines in compiled files.")
+
+ (options, args) = parser.parse_args()
+
+ if options.settingsScriptFile == None:
+ print " * Please define the output file!"
+ sys.exit(1)
+
+ if len(options.defineRuntimeSetting) == 0:
+ print " * Please define at least one runtime setting!"
+ sys.exit(1)
+
+ print " * Saving settings to %s" % options.settingsScriptFile
+ filetool.save(options.settingsScriptFile, generate(options))
+
+
+
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.pyc
new file mode 100644
index 0000000000..e6cbe3d3ae
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/settings.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.py
new file mode 100755
index 0000000000..4c0f3c9782
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+
+import tree
+
+
+def search(node, verbose=False):
+ return search_loop(node, {}, verbose)
+
+
+def search_loop(node, stringMap={}, verbose=False):
+ if node.type == "constant" and node.get("constantType") == "string":
+
+ if verbose:
+ print " - Found: %s" % node.get("value")
+
+ if node.get("detail") == "singlequotes":
+ quote = "'"
+ elif node.get("detail") == "doublequotes":
+ quote = '"'
+
+ value = "%s%s%s" % (quote, node.get("value"), quote)
+
+ if value in stringMap:
+ stringMap[value] += 1
+ else:
+ stringMap[value] = 1
+
+ if check(node, verbose):
+ for child in node.children:
+ search_loop(child, stringMap, verbose)
+
+ return stringMap
+
+
+
+def check(node, verbose=False):
+ # Needs children
+ if not node.hasChildren():
+ return False
+
+ # Try to find all output statements
+ if node.type == "call":
+ cu = node
+ nx = cu.getChild("operand", False)
+
+ if nx != None:
+ cu = nx
+
+ all = cu.getAllChildrenOfType("identifier")
+
+ for ch in all:
+ if ch.get("name", False) in [ "Error", "debug", "info", "warning", "error", "alert" ]:
+ if verbose:
+ print " - Ignore output statement at line: %s" % ch.get("line")
+ return False
+
+ # Try to find all constant assignments (ns.UPPER = string)
+ elif node.type == "assignment":
+ left = node.getChild("left", False)
+ if left != None:
+ var = left.getChild("variable", False)
+
+ if var != None:
+ last = var.getLastChild()
+
+ if last.type == "identifier" and last.get("name").isupper():
+ if verbose:
+ print " - Ignore constant assignment at line: %s" % last.get("line")
+ return False
+
+ # Try to find all constant assignments from Maps ({ UPPER : string })
+ elif node.type == "keyvalue":
+ if node.get("key").isupper():
+ if verbose:
+ print " - Ignore constant key value at line: %s" % node.get("line")
+ return False
+
+ return True
+
+
+
+def sort(stringMap):
+ stringList = []
+
+ for value in stringMap:
+ stringList.append({ "value" : value, "number" : stringMap[value] })
+
+ stringList.sort(lambda x, y: y["number"]-x["number"])
+
+ return stringList
+
+
+
+
+def replace(node, stringList, var="$", verbose=False):
+ if node.type == "constant" and node.get("constantType") == "string":
+ if node.get("detail") == "singlequotes":
+ quote = "'"
+ elif node.get("detail") == "doublequotes":
+ quote = '"'
+
+ oldvalue = "%s%s%s" % (quote, node.get("value"), quote)
+
+ pos = 0
+ for item in stringList:
+ if item["value"] == oldvalue:
+ newvalue = "%s[%s]" % (var, pos)
+
+ if verbose:
+ print " - Replace: %s => %s" % (oldvalue, newvalue)
+
+ line = node.get("line")
+
+
+ # GENERATE IDENTIFIER
+
+ newidentifier = tree.Node("identifier")
+ newidentifier.set("line", line)
+
+ childidentifier = tree.Node("identifier")
+ childidentifier.set("line", line)
+ childidentifier.set("name", var)
+
+ newidentifier.addChild(childidentifier)
+
+
+
+ # GENERATE KEY
+
+ newkey = tree.Node("key")
+ newkey.set("line", line)
+
+ newconstant = tree.Node("constant")
+ newconstant.set("line", line)
+ newconstant.set("constantType", "number")
+ newconstant.set("value", "%s" % pos)
+
+ newkey.addChild(newconstant)
+
+
+
+ # COMBINE CHILDREN
+
+ newnode = tree.Node("accessor")
+ newnode.set("line", line)
+ newnode.set("optimized", True)
+ newnode.set("original", oldvalue)
+ newnode.addChild(newidentifier)
+ newnode.addChild(newkey)
+
+
+ # REPLACE NODE
+
+ node.parent.replaceChild(node, newnode)
+ break
+
+ pos += 1
+
+ if check(node, verbose):
+ for child in node.children:
+ replace(child, stringList, var, verbose)
+
+
+
+def replacement(stringList, var="$"):
+ repl = "%s=[" % var
+
+ for item in stringList:
+ repl += "%s," % (item["value"])
+
+ repl = repl[:-1] + "];"
+
+ return repl
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.pyc
new file mode 100644
index 0000000000..57ada1ceb7
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/stringoptimizer.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tagtool.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tagtool.py
new file mode 100755
index 0000000000..0313be750c
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tagtool.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+import sys, string, re, optparse
+import config, filetool, comment, random
+
+
+R_TAG = re.compile("random\(.*\)")
+
+
+
+def main():
+ parser = optparse.OptionParser()
+
+ parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=False, help="Quiet output mode.")
+ parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Verbose output mode.")
+ parser.add_option("--encoding", dest="encoding", default="utf-8", metavar="ENCODING", help="Defines the encoding expected for input files.")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ print "Needs one or more arguments (files) to tag!"
+ sys.exit(1)
+
+ for fileName in args:
+ if options.verbose:
+ print " * Tagging %s" % fileName
+
+ origFileContent = filetool.read(fileName, options.encoding)
+ patchedFileContent = R_TAG.sub("random(%s)" % random.randint(100, 999), origFileContent)
+
+ if patchedFileContent != origFileContent:
+ filetool.save(fileName, patchedFileContent, options.encoding)
+
+
+
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
+ \ No newline at end of file
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.py
new file mode 100755
index 0000000000..a54e89272e
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.py
@@ -0,0 +1,2873 @@
+#!/usr/bin/env python
+# _*_ coding: latin1 _*_
+
+"""This is Textile
+A Humane Web Text Generator
+
+TODO:
+* Make it work with Python 2.1.
+* Make it work with Python 1.5.2? Or that's too optimistic?
+
+---
+To get an overview of all PyTextile's features, simply
+type 'tell me about textile.' in a single line.
+"""
+
+__authors__ = ["Roberto A. F. De Almeida (roberto@dealmeida.net)",
+ "Mark Pilgrim (f8dy@diveintomark.org)"]
+__version__ = "2.0.10"
+__date__ = "2004/10/06"
+__copyright__ = """
+Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
+Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
+All rights reserved.
+
+Original PHP version:
+Version 1.0
+21 Feb, 2003
+
+Copyright (c) 2003, Dean Allen, www.textism.com
+All rights reserved.
+
+Parts of the documentation and some of the regular expressions are (c) Brad
+Choate, http://bradchoate.com/. Thanks, Brad!
+"""
+__license__ = """
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name Textile nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+"""
+__history__ = """
+1.0 - 2003/03/19 - MAP - initial release
+1.01 - 2003/03/19 - MAP - don't strip whitespace within <pre> tags;
+ map high-bit ASCII to HTML numeric entities
+1.02 - 2003/03/19 - MAP - changed hyperlink qtag expression to only
+ match valid URL characters (per RFC 2396); fixed preg_replace to
+ not match across line breaks (solves lots of problems with
+ mistakenly matching overlapping inline markup); fixed whitespace
+ stripping to only strip whitespace from beginning and end of lines,
+ not immediately before and after HTML tags.
+1.03 - 2003/03/20 - MAP - changed hyperlink qtag again to more
+ closely match original Textile (fixes problems with links
+ immediately followed by punctuation -- somewhere Dean is
+ grinning right now); handle curly apostrophe with "ve"
+ contraction; clean up empty titles at end.
+1.04 - 2003/03/23 - MAP - lstrip input to deal with extra spaces at
+ beginning of first line; tweaked list loop to handle consecutive lists
+1.1 - 2003/06/06 - MAP - created initial test suite for links and images,
+ and fixed a bunch of related bugs to pass them
+1.11 - 2003/07/20 - CL - don't demoronise unicode strings; handle
+ "they're" properly
+1.12 - 2003/07/23 - GW - print debug messages to stderr; handle bq(cite).
+1.13 - 2003/07/23 - MAP - wrap bq. text in <p>...</p>
+2 - 2004/03/26 - RAFA - rewritten from (almost) scratch to include
+ all features from Textile 2 and a little bit more.
+2.0.1 - 2004/04/02 - RAFA - Fixed validating function that uses uTidyLib.
+2.0.2 - 2004/04/02 - RAFA - Fixed problem with caps letters in URLs.
+2.0.3 - 2004/04/19 - RAFA - Multiple classes are allowed, thanks to Dave
+ Anderson. The "lang" attribute is now removed from <code>, to be valid
+ XHTML. Fixed <span class="caps">UCAS</span> problem.
+2.0.4 - 2004/05/20 - RAFA, CLB - Added inline formatting to table cells.
+ Curt Bergmann fixed a bug with the colspan formatting. Added Amazon
+ Associated id.
+2.0.5 - 2004/06/01 - CL - Applied patch from Chris Lawrence to (1) fix
+ that Amazon associates ID was being added to all search URIs, (2)
+ customize the Amazon site used with the AMAZON variable, and (3) added
+ an "isbn" URI type that links directly to an Amazon product by ISBN or
+ Amazon ASIN.
+2.0.6 - 2004/06/02 - RAFA - Fixed CAPS problem, again. I hope this is
+ the last time.
+2.0.7 - 2004/06/04 - RAFA, MW - Fixed bullet macro, thanks to Adam
+ Messinger. Added patch from Michal Wallace changing {}.pop() for
+ compatibility with Python 2.2.x.
+2.0.8 - 2004/06/25 - RAFA - Strip tags when adding the content from a
+ footnote to the reference link. Escaped '<' and '>' in the self-
+ generated documentation.
+2.0.9 - 2004/10/04 - RAFA - In images, if ALT is not defined, add an
+ empty attribute. Added "LaTeX" style open/close quotes. Fixed a bug
+ where the acronym definition was being formatted with inline rules.
+ Handle "broken" lines correctly, removing the <br /> from inside
+ split HTML tags.
+2.0.10 - 2004/10/06 - RAFA, LO - Escape all non-escaped ampersands.
+ Applied "trivial patch" from Ludvig Omholt to remove newline right
+ after the <pre> tag.
+"""
+
+# Set your encoding here.
+ENCODING = 'utf-8'
+
+# Output? Non-ASCII characters will be automatically
+# converted to XML entities if you choose ASCII.
+OUTPUT = 'utf-8'
+
+# PyTextile can optionally validate the generated
+# XHTML code. We can use either mxTidy or uTidyLib.
+# You can change the default behaviour here.
+VALIDATE = 0
+
+# If you want h1. to be translated to something other
+# than <h1>, change this offset. You can also pass it
+# as an argument to textile().
+HEAD_OFFSET = 0
+
+# If you want to use itex2mml, specify the full path
+# to the binary here. You can download it from here:
+# http://golem.ph.utexas.edu/~distler/blog/files/itexToMML.tar.gz
+itex2mml = None
+#itex2mml = '/usr/local/bin/itex2MML'
+#itex2mml = '/usr/people/almeida/bin/itex2MML'
+
+# PyTextile can optionally sanitize the generated XHTML,
+# which is good for weblog comments or if you don't trust
+# yourself.
+SANITIZE = 1
+
+# Turn debug on?
+DEBUGLEVEL = 0
+
+# Amazon associate for links: "keywords":amazon
+# If you don't have one, please consider leaving mine here as
+# a small compensation for writing PyTextile. It's commented
+# off as default.
+#amazon_associate_id = 'bomtempo-21'
+amazon_associate_id = None
+
+#AMAZON = 'www.amazon.co.uk'
+AMAZON = 'www.amazon.com'
+
+import re
+import sys
+import os
+import sgmllib
+import unicodedata
+
+
+def _in_tag(text, tag):
+ """Extracts text from inside a tag.
+
+ This function extracts the text from inside a given tag.
+ It's useful to get the text between <body></body> or
+ <pre></pre> when using the validators or the colorizer.
+ """
+ if text.count('<%s' % tag):
+ text = text.split('<%s' % tag, 1)[1]
+ if text.count('>'):
+ text = text.split('>', 1)[1]
+ if text.count('</%s' % tag):
+ text = text.split('</%s' % tag, 1)[0]
+
+ text = text.strip().replace('\r\n', '\n')
+
+ return text
+
+
+# If you want PyTextile to automatically colorize
+# your Python code, you need the htmlizer module
+# from Twisted. (You can just grab this file from
+# the distribution, it has no other dependencies.)
+try:
+ #from twisted.python import htmlizer
+ import htmlizer
+ from StringIO import StringIO
+
+ def _color(code):
+ """Colorizer Python code.
+
+ This function wraps a text string in a StringIO,
+ and passes it to the htmlizer function from
+ Twisted.
+ """
+ # Fix line continuations.
+ code = preg_replace(r' \\\n', ' \\\\\n', code)
+
+ code_in = StringIO(code)
+ code_out = StringIO()
+
+ htmlizer.filter(code_in, code_out)
+
+ # Remove <pre></pre> from input.
+ code = _in_tag(code_out.getvalue(), 'pre')
+
+ # Fix newlines.
+ code = code.replace('<span class="py-src-newline">\n</span>', '<span class="py-src-newline"></span>\n')
+
+ return code
+
+except ImportError:
+ htmlizer = None
+
+
+# PyTextile can optionally validate the generated
+# XHTML code using either mxTidy or uTidyLib.
+try:
+ # This is mxTidy.
+ from mx.Tidy import Tidy
+
+ def _tidy1(text):
+ """mxTidy's XHTML validator.
+
+ This function is a wrapper to mxTidy's validator.
+ """
+ nerrors, nwarnings, text, errortext = Tidy.tidy(text, output_xhtml=1, numeric_entities=1, wrap=0)
+ return _in_tag(text, 'body')
+
+ _tidy = _tidy1
+
+except ImportError:
+ try:
+ # This is uTidyLib.
+ import tidy
+
+ def _tidy2(text):
+ """uTidyLib's XHTML validator.
+
+ This function is a wrapper to uTidyLib's validator.
+ """
+ text = tidy.parseString(text, output_xhtml=1, add_xml_decl=0, indent=0, tidy_mark=0)
+ return _in_tag(str(text), 'body')
+
+ _tidy = _tidy2
+
+ except ImportError:
+ _tidy = None
+
+
+# This is good for debugging.
+def _debug(s, level=1):
+ """Outputs debug information to sys.stderr.
+
+ This function outputs debug information if DEBUGLEVEL is
+ higher than a given treshold.
+ """
+ if DEBUGLEVEL >= level: print >> sys.stderr, s
+
+
+#############################
+# Useful regular expressions.
+parameters = {
+ # Horizontal alignment.
+ 'align': r'''(?:(?:<>|[<>=]) # Either '<>', '<', '>' or '='
+ (?![^\s]*(?:<>|[<>=]))) # Look-ahead to ensure it happens once
+ ''',
+
+ # Horizontal padding.
+ 'padding': r'''(?:[\(\)]+) # Any number of '(' and/or ')'
+ ''',
+
+ # Class and/or id.
+ 'classid': r'''( #
+ (?:\(\#[\w]+\)) # (#id)
+ | #
+ (?:\((?:[\w]+(?:\s[\w]+)*) #
+ (?:\#[\w]+)?\)) # (class1 class2 ... classn#id) or (class1 class2 ... classn)
+ ) #
+ (?![^\s]*(?:\([\w#]+\))) # must happen once
+ ''',
+
+ # Language.
+ 'lang': r'''(?:\[[\w-]+\]) # [lang]
+ (?![^\s]*(?:\[.*?\])) # must happen once
+ ''',
+
+ # Style.
+ 'style': r'''(?:{[^\}]+}) # {style}
+ (?![^\s]*(?:{.*?})) # must happen once
+ ''',
+}
+
+res = {
+ # Punctuation.
+ 'punct': r'''[\!"#\$%&'()\*\+,\-\./:;<=>\?@\[\\\]\^_`{\|}\~]''',
+
+ # URL regular expression.
+ 'url': r'''(?=[a-zA-Z0-9./#]) # Must start correctly
+ (?: # Match the leading part (proto://hostname, or just hostname)
+ (?:ftp|https?|telnet|nntp) # protocol
+ :// # ://
+ (?: # Optional 'username:password@'
+ \w+ # username
+ (?::\w+)? # optional :password
+ @ # @
+ )? #
+ [-\w]+(?:\.\w[-\w]*)+ # hostname (sub.example.com)
+ | #
+ (?:mailto:)? # Optional mailto:
+ [-\+\w]+ # username
+ \@ # at
+ [-\w]+(?:\.\w[-\w]*)+ # hostname
+ | #
+ (?:[a-z0-9](?:[-a-z0-9]*[a-z0-9])?\.)+ # domain without protocol
+ (?:com\b # TLD
+ | edu\b #
+ | biz\b #
+ | gov\b #
+ | in(?:t|fo)\b # .int or .info
+ | mil\b #
+ | net\b #
+ | org\b #
+ | museum\b #
+ | aero\b #
+ | coop\b #
+ | name\b #
+ | pro\b #
+ | [a-z][a-z]\b # two-letter country codes
+ ) #
+ )? #
+ (?::\d+)? # Optional port number
+ (?: # Rest of the URL, optional
+ /? # Start with '/'
+ [^.!,?;:"'<>()\[\]{}\s\x7F-\xFF]* # Can't start with these
+ (?: #
+ [.!,?;:]+ # One or more of these
+ [^.!,?;:"'<>()\[\]{}\s\x7F-\xFF]+ # Can't finish with these
+ #'" # # or ' or "
+ )* #
+ )? #
+ ''',
+
+
+ # Block attributes.
+ 'battr': r'''(?P<parameters> #
+ (?: %(align)s # alignment
+ | %(classid)s # class and/or id
+ | %(padding)s # padding tags
+ | %(lang)s # [lang]
+ | %(style)s # {style}
+ )+ #
+ )? #
+ ''' % parameters,
+
+ # (Un)ordered list attributes.
+ 'olattr': r'''(?P<olparameters> #
+ (?: %(align)s # alignment
+ | ((?:\(\#[\w]+\)) # (#id)
+ | #
+ (?:\((?:[\w]+(?:\s[\w]+)*) #
+ (?:\#[\w]+)?\)) # (class1 class2 ... classn#id) or (class1 class2 ... classn)
+ ) #
+ | %(padding)s # padding tags
+ | %(lang)s # [lang]
+ | %(style)s # {style}
+ )+ #
+ )? #
+ ''' % parameters,
+
+ # List item attributes.
+ 'liattr': r'''(?P<liparameters> #
+ (?: %(align)s # alignment
+ | %(classid)s # class and/or id
+ | %(padding)s # padding tags
+ | %(lang)s # [lang]
+ | %(style)s # {style}
+ )+ #
+ )? #
+ ''' % parameters,
+
+ # Qtag attributes.
+ 'qattr': r'''(?P<parameters> #
+ (?: %(classid)s # class and/or id
+ | %(lang)s # [lang]
+ | %(style)s # {style}
+ )+ #
+ )? #
+ ''' % parameters,
+
+ # Link attributes.
+ 'lattr': r'''(?P<parameters> # Links attributes
+ (?: %(align)s # alignment
+ | %(classid)s # class and/or id
+ | %(lang)s # [lang]
+ | %(style)s # {style}
+ )+ #
+ )? #
+ ''' % parameters,
+
+ # Image attributes.
+ 'iattr': r'''(?P<parameters> #
+ (?: #
+ (?: [<>]+ # horizontal alignment tags
+ (?![^\s]*(?:[<>]))) # (must happen once)
+ | #
+ (?: [\-\^~]+ # vertical alignment tags
+ (?![^\s]*(?:[\-\^~]))) # (must happen once)
+ | %(classid)s # class and/or id
+ | %(padding)s # padding tags
+ | %(style)s # {style}
+ )+ #
+ )? #
+ ''' % parameters,
+
+ # Resize attributes.
+ 'resize': r'''(?: #
+ (?:([\d]+%?)x([\d]+%?)) # 20x10
+ | #
+ (?: # or
+ (?:([\d]+)%?w\s([\d]+)%?h) # 10h 20w
+ | # or
+ (?:([\d]+)%?h\s([\d]+)%?w) # 20w 10h
+ ) #
+ )? #
+ ''',
+
+ # Table attributes.
+ 'tattr': r'''(?P<parameters> #
+ (?: #
+ (?: [\^~] # vertical alignment
+ (?![^\s]*(?:[\^~]))) # (must happen once)
+ | %(align)s # alignment
+ | %(lang)s # [lang]
+ | %(style)s # {style}
+ | %(classid)s # class and/or id
+ | %(padding)s # padding
+ | _ # is this a header row/cell?
+ | \\\d+ # colspan
+ | /\d+ # rowspan
+ )+ #
+ )? #
+ ''' % parameters,
+}
+
+
+def preg_replace(pattern, replacement, text):
+ """Alternative re.sub that handles empty groups.
+
+ This acts like re.sub, except it replaces empty groups with ''
+ instead of raising an exception.
+ """
+
+ def replacement_func(matchobj):
+ counter = 1
+ rc = replacement
+ _debug(matchobj.groups())
+ for matchitem in matchobj.groups():
+ if not matchitem:
+ matchitem = ''
+
+ rc = rc.replace(r'\%s' % counter, matchitem)
+ counter += 1
+
+ return rc
+
+ p = re.compile(pattern)
+ _debug(pattern)
+
+ return p.sub(replacement_func, text)
+
+
+def html_replace(pattern, replacement, text):
+ """Replacement outside HTML tags.
+
+ Does a preg_replace only outside HTML tags.
+ """
+ # If there is no html, do a simple search and replace.
+ if not re.search(r'''<.*>''', text):
+ return preg_replace(pattern, replacement, text)
+
+ else:
+ lines = []
+ # Else split the text into an array at <>.
+ for line in re.split('(<.*?>)', text):
+ if not re.match('<.*?>', line):
+ line = preg_replace(pattern, replacement, line)
+
+ lines.append(line)
+
+ return ''.join(lines)
+
+
+# PyTextile can optionally sanitize the generated XHTML,
+# which is good for weblog comments. This code is from
+# Mark Pilgrim's feedparser.
+class _BaseHTMLProcessor(sgmllib.SGMLParser):
+ elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
+ 'img', 'input', 'isindex', 'link', 'meta', 'param']
+
+ def __init__(self):
+ sgmllib.SGMLParser.__init__(self)
+
+ def reset(self):
+ self.pieces = []
+ sgmllib.SGMLParser.reset(self)
+
+ def normalize_attrs(self, attrs):
+ # utility method to be called by descendants
+ attrs = [(k.lower(), sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v).strip()) for k, v in attrs]
+ attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
+ return attrs
+
+ def unknown_starttag(self, tag, attrs):
+ # called for each start tag
+ # attrs is a list of (attr, value) tuples
+ # e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")]
+ strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
+ if tag in self.elements_no_end_tag:
+ self.pieces.append("<%(tag)s%(strattrs)s />" % locals())
+ else:
+ self.pieces.append("<%(tag)s%(strattrs)s>" % locals())
+
+ def unknown_endtag(self, tag):
+ # called for each end tag, e.g. for </pre>, tag will be "pre"
+ # Reconstruct the original end tag.
+ if tag not in self.elements_no_end_tag:
+ self.pieces.append("</%(tag)s>" % locals())
+
+ def handle_charref(self, ref):
+ # called for each character reference, e.g. for "&#160;", ref will be "160"
+ # Reconstruct the original character reference.
+ self.pieces.append("&#%(ref)s;" % locals())
+
+ def handle_entityref(self, ref):
+ # called for each entity reference, e.g. for "&copy;", ref will be "copy"
+ # Reconstruct the original entity reference.
+ self.pieces.append("&%(ref)s;" % locals())
+
+ def handle_data(self, text):
+ # called for each block of plain text, i.e. outside of any tag and
+ # not containing any character or entity references
+ # Store the original text verbatim.
+ self.pieces.append(text)
+
+ def handle_comment(self, text):
+ # called for each HTML comment, e.g. <!-- insert Javascript code here -->
+ # Reconstruct the original comment.
+ self.pieces.append("<!--%(text)s-->" % locals())
+
+ def handle_pi(self, text):
+ # called for each processing instruction, e.g. <?instruction>
+ # Reconstruct original processing instruction.
+ self.pieces.append("<?%(text)s>" % locals())
+
+ def handle_decl(self, text):
+ # called for the DOCTYPE, if present, e.g.
+ # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ # "http://www.w3.org/TR/html4/loose.dtd">
+ # Reconstruct original DOCTYPE
+ self.pieces.append("<!%(text)s>" % locals())
+
+ def output(self):
+ """Return processed HTML as a single string"""
+ return "".join(self.pieces)
+
+
+class _HTMLSanitizer(_BaseHTMLProcessor):
+ acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
+ 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
+ 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
+ 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
+ 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
+ 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
+ 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
+ 'thead', 'tr', 'tt', 'u', 'ul', 'var']
+
+ acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
+ 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
+ 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
+ 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
+ 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
+ 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
+ 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
+ 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
+ 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
+ 'usemap', 'valign', 'value', 'vspace', 'width']
+
+ unacceptable_elements_with_end_tag = ['script', 'applet']
+
+ # This if for MathML.
+ mathml_elements = ['math', 'mi', 'mn', 'mo', 'mrow', 'msup']
+ mathml_attributes = ['mode', 'xmlns']
+
+ acceptable_elements = acceptable_elements + mathml_elements
+ acceptable_attributes = acceptable_attributes + mathml_attributes
+
+ def reset(self):
+ _BaseHTMLProcessor.reset(self)
+ self.unacceptablestack = 0
+
+ def unknown_starttag(self, tag, attrs):
+ if not tag in self.acceptable_elements:
+ if tag in self.unacceptable_elements_with_end_tag:
+ self.unacceptablestack += 1
+ return
+ attrs = self.normalize_attrs(attrs)
+ attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
+ _BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
+
+ def unknown_endtag(self, tag):
+ if not tag in self.acceptable_elements:
+ if tag in self.unacceptable_elements_with_end_tag:
+ self.unacceptablestack -= 1
+ return
+ _BaseHTMLProcessor.unknown_endtag(self, tag)
+
+ def handle_pi(self, text):
+ pass
+
+ def handle_decl(self, text):
+ pass
+
+ def handle_data(self, text):
+ if not self.unacceptablestack:
+ _BaseHTMLProcessor.handle_data(self, text)
+
+
+class Textiler:
+ """Textile formatter.
+
+ This is the base class for the PyTextile text processor.
+ """
+ def __init__(self, text=''):
+ """Instantiate the class, passing the text to be formatted.
+
+ Here we pre-process the text and collect all the link
+ lookups for later.
+ """
+ self.text = text
+
+ # Basic regular expressions.
+ self.res = res
+
+ # Smart searches.
+ self.searches = {}
+ self.searches['imdb'] = 'http://www.imdb.com/Find?for=%s'
+ self.searches['google'] = 'http://www.google.com/search?q=%s'
+ self.searches['python'] = 'http://www.python.org/doc/current/lib/module-%s.html'
+ if amazon_associate_id:
+ self.searches['isbn'] = ''.join(['http://', AMAZON, '/exec/obidos/ASIN/%s/', amazon_associate_id])
+ self.searches['amazon'] = ''.join(['http://', AMAZON, '/exec/obidos/external-search?mode=blended&keyword=%s&tag=', amazon_associate_id])
+ else:
+ self.searches['isbn'] = ''.join(['http://', AMAZON, '/exec/obidos/ASIN/%s'])
+ self.searches['amazon'] = ''.join(['http://', AMAZON, '/exec/obidos/external-search?mode=blended&keyword=%s'])
+
+ # These are the blocks we know.
+ self.signatures = [
+ # Paragraph.
+ (r'''^p # Paragraph signature
+ %(battr)s # Paragraph attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended paragraph denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.paragraph),
+
+ # Pre-formatted text.
+ (r'''^pre # Pre signature
+ %(battr)s # Pre attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended pre denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.pre),
+
+ # Block code.
+ (r'''^bc # Blockcode signature
+ %(battr)s # Blockcode attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended blockcode denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.bc),
+
+ # Blockquote.
+ (r'''^bq # Blockquote signature
+ %(battr)s # Blockquote attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended blockquote denoted by a second dot
+ (:(?P<cite> # Optional cite attribute
+ ( #
+ %(url)s # URL
+ | "[\w]+(?:\s[\w]+)*" # "Name inside quotes"
+ )) #
+ )? #
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.blockquote),
+
+ # Header.
+ (r'''^h # Header signature
+ (?P<header>\d) # Header number
+ %(battr)s # Header attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended header denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.header),
+
+ # Footnote.
+ (r'''^fn # Footnote signature
+ (?P<footnote>[\d]+) # Footnote number
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended footnote denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''', self.footnote),
+
+ # Definition list.
+ (r'''^dl # Definition list signature
+ %(battr)s # Definition list attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended definition list denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.dl),
+
+ # Ordered list (attributes to first <li>).
+ (r'''^%(olattr)s # Ordered list attributes
+ \# # Ordered list signature
+ %(liattr)s # List item attributes
+ (?P<dot>\.)? # .
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.ol),
+
+ # Unordered list (attributes to first <li>).
+ (r'''^%(olattr)s # Unrdered list attributes
+ \* # Unordered list signature
+ %(liattr)s # Unordered list attributes
+ (?P<dot>\.)? # .
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.ul),
+
+ # Escaped text.
+ (r'''^==?(?P<text>.*?)(==)?$ # Escaped text
+ ''', self.escape),
+
+ (r'''^(?P<text><.*)$ # XHTML tag
+ ''', self.escape),
+
+ # itex code.
+ (r'''^(?P<text> # itex code
+ \\\[ # starts with \[
+ .*? # complicated mathematical equations go here
+ \\\]) # ends with \]
+ ''', self.itex),
+
+ # Tables.
+ (r'''^table # Table signature
+ %(tattr)s # Table attributes
+ (?P<dot>\.) # .
+ (?P<extend>\.)? # Extended blockcode denoted by a second dot
+ \s # whitespace
+ (?P<text>.*) # text
+ ''' % self.res, self.table),
+
+ # Simple tables.
+ (r'''^(?P<text>
+ \|
+ .*)
+ ''', self.table),
+
+ # About.
+ (r'''^(?P<text>tell\sme\sabout\stextile\.)$''', self.about),
+ ]
+
+
+ def preprocess(self):
+ """Pre-processing of the text.
+
+ Remove whitespace, fix carriage returns.
+ """
+ # Remove whitespace.
+ self.text = self.text.strip()
+
+ # Zap carriage returns.
+ self.text = self.text.replace("\r\n", "\n")
+ self.text = self.text.replace("\r", "\n")
+
+ # Minor sanitizing.
+ self.text = self.sanitize(self.text)
+
+
+ def grab_links(self):
+ """Grab link lookups.
+
+ Check the text for link lookups, store them in a
+ dictionary, and clean them up.
+ """
+ # Grab links like this: '[id]example.com'
+ links = {}
+ p = re.compile(r'''(?:^|\n)\[([\w]+?)\](%(url)s)(?:$|\n)''' % self.res, re.VERBOSE)
+ for key, link in p.findall(self.text):
+ links[key] = link
+
+ # And clear them from the text.
+ self.text = p.sub('', self.text)
+
+ return links
+
+
+ def process(self, head_offset=HEAD_OFFSET, validate=VALIDATE, sanitize=SANITIZE, output=OUTPUT, encoding=ENCODING):
+ """Process the text.
+
+ Here we actually process the text, splitting the text in
+ blocks and applying the corresponding function to each
+ one of them.
+ """
+ # Basic global changes.
+ self.preprocess()
+
+ # Grab lookup links and clean them from the text.
+ self._links = self.grab_links()
+
+ # Offset for the headers.
+ self.head_offset = head_offset
+
+ # Process each block.
+ self.blocks = self.split_text()
+
+ text = []
+ for [function, captures] in self.blocks:
+ text.append(function(**captures))
+
+ text = '\n\n'.join(text)
+
+ # Add titles to footnotes.
+ text = self.footnotes(text)
+
+ # Convert to desired output.
+ text = unicode(text, encoding)
+ text = text.encode(output, 'xmlcharrefreplace')
+
+ # Sanitize?
+ if sanitize:
+ p = _HTMLSanitizer()
+ p.feed(text)
+ text = p.output()
+
+ # Validate output.
+ if _tidy and validate:
+ text = _tidy(text)
+
+ return text
+
+
+ def sanitize(self, text):
+ """Fix single tags.
+
+ Fix tags like <img />, <br /> and <hr />.
+
+ ---
+ h1. Sanitizing
+
+ Textile can help you generate valid XHTML(eXtensible HyperText Markup Language).
+ It will fix any single tags that are not properly closed, like
+ @<img />@, @<br />@ and @<hr />@.
+
+ If you have "mx.Tidy":http://www.egenix.com/files/python/mxTidy.html
+ and/or "&micro;TidyLib":http://utidylib.sourceforge.net/ installed,
+ it also can optionally validade the generated code with these wrappers
+ to ensure 100% valid XHTML(eXtensible HyperText Markup Language).
+ """
+ # Fix single tags like <img /> and <br />.
+ text = preg_replace(r'''<(img|br|hr)(.*?)(?:\s*/?\s*)?>''', r'''<\1\2 />''', text)
+
+ # Remove ampersands.
+ text = preg_replace(r'''&(?!#?[xX]?(?:[0-9a-fA-F]+|\w{1,8});)''', r'''&amp;''', text)
+
+ return text
+
+
+ def split_text(self):
+ """Process the blocks from the text.
+
+ Split the blocks according to the signatures, join extended
+ blocks and associate each one of them with a function to
+ process them.
+
+ ---
+ h1. Blocks
+
+ Textile process your text by dividing it in blocks. Each block
+ is identified by a signature and separated from other blocks by
+ an empty line.
+
+ All signatures should end with a period followed by a space. A
+ header @<h1></h1>@ can be done this way:
+
+ pre. h1. This is a header 1.
+
+ Blocks may continue for multiple paragraphs of text. If you want
+ a block signature to stay "active", use two periods after the
+ signature instead of one. For example:
+
+ pre.. bq.. This is paragraph one of a block quote.
+
+ This is paragraph two of a block quote.
+
+ =p. Now we're back to a regular paragraph.
+
+ p. Becomes:
+
+ pre.. <blockquote>
+ <p>This is paragraph one of a block quote.</p>
+
+ <p>This is paragraph two of a block quote.</p>
+ </blockquote>
+
+ <p>Now we&#8217;re back to a regular paragraph.</p>
+
+ p. The blocks can be customised by adding parameters between the
+ signature and the period. These include:
+
+ dl. {style rule}:A CSS(Cascading Style Sheets) style rule.
+ [ll]:A language identifier (for a "lang" attribute).
+ (class) or (#id) or (class#id):For CSS(Cascading Style Sheets) class and id attributes.
+ &gt;, &lt;, =, &lt;&gt;:Modifier characters for alignment. Right-justification, left-justification, centered, and full-justification. The paragraph will also receive the class names "right", "left", "center" and "justify", respectively.
+ ( (one or more):Adds padding on the left. 1em per "(" character is applied. When combined with the align-left or align-right modifier, it makes the block float.
+ ) (one or more):Adds padding on the right. 1em per ")" character is applied. When combined with the align-left or align-right modifier, it makes the block float.
+
+ Here's an overloaded example:
+
+ pre. p(())>(class#id)[en]{color:red}. A simple paragraph.
+
+ Becomes:
+
+ pre. <p lang="en" style="color:red;padding-left:2em;padding-right:2em;float:right;" class="class right" id="id">A simple paragraph.</p>
+ """
+ # Clear signature.
+ clear_sig = r'''^clear(?P<alignment>[<>])?\.$'''
+ clear = None
+
+ extending = 0
+
+ # We capture the \n's because they are important inside "pre..".
+ blocks = re.split(r'''((\n\s*){2,})''', self.text)
+ output = []
+ for block in blocks:
+ # Check for the clear signature.
+ m = re.match(clear_sig, block)
+ if m:
+ clear = m.group('alignment')
+ if clear:
+ clear = {'<': 'clear:left;', '>': 'clear:right;'}[clear]
+ else:
+ clear = 'clear:both;'
+
+ else:
+ # Check each of the code signatures.
+ for regexp, function in self.signatures:
+ p = re.compile(regexp, (re.VERBOSE | re.DOTALL))
+ m = p.match(block)
+ if m:
+ # Put everything in a dictionary.
+ captures = m.groupdict()
+
+ # If we are extending a block, we require a dot to
+ # break it, so we can start lines with '#' inside
+ # an extended <pre> without matching an ordered list.
+ if extending and not captures.get('dot', None):
+ output[-1][1]['text'] += block
+ break
+ elif captures.has_key('dot'):
+ del captures['dot']
+
+ # If a signature matches, we are not extending a block.
+ extending = 0
+
+ # Check if we should extend this block.
+ if captures.has_key('extend'):
+ extending = captures['extend']
+ del captures['extend']
+
+ # Apply head_offset.
+ if captures.has_key('header'):
+ captures['header'] = int(captures['header']) + self.head_offset
+
+ # Apply clear.
+ if clear:
+ captures['clear'] = clear
+ clear = None
+
+ # Save the block to be processed later.
+ output.append([function, captures])
+
+ break
+
+ else:
+ if extending:
+ # Append the text to the last block.
+ output[-1][1]['text'] += block
+ elif block.strip():
+ output.append([self.paragraph, {'text': block}])
+
+ return output
+
+
+ def parse_params(self, parameters, clear=None, align_type='block'):
+ """Parse the parameters from a block signature.
+
+ This function parses the parameters from a block signature,
+ splitting the information about class, id, language and
+ style. The positioning (indentation and alignment) is parsed
+ and stored in the style.
+
+ A paragraph like:
+
+ p>(class#id){color:red}[en]. Paragraph.
+
+ or:
+
+ p{color:red}[en](class#id)>. Paragraph.
+
+ will have its parameters parsed to:
+
+ output = {'lang' : 'en',
+ 'class': 'class',
+ 'id' : 'id',
+ 'style': 'color:red;text-align:right;'}
+
+ Note that order is not important.
+ """
+ if not parameters:
+ if clear:
+ return {'style': clear}
+ else:
+ return {}
+
+ output = {}
+
+ # Match class from (class) or (class#id).
+ m = re.search(r'''\((?P<class>[\w]+(\s[\w]+)*)(\#[\w]+)?\)''', parameters)
+ if m: output['class'] = m.group('class')
+
+ # Match id from (#id) or (class#id).
+ m = re.search(r'''\([\w]*(\s[\w]+)*\#(?P<id>[\w]+)\)''', parameters)
+ if m: output['id'] = m.group('id')
+
+ # Match [language].
+ m = re.search(r'''\[(?P<lang>[\w-]+)\]''', parameters)
+ if m: output['lang'] = m.group('lang')
+
+ # Match {style}.
+ m = re.search(r'''{(?P<style>[^\}]+)}''', parameters)
+ if m:
+ output['style'] = m.group('style').replace('\n', '')
+
+ # If necessary, apppend a semi-comma to the style.
+ if not output['style'].endswith(';'):
+ output['style'] += ';'
+
+ # Clear the block?
+ if clear:
+ output['style'] = output.get('style', '') + clear
+
+ # Remove classes, ids, langs and styles. This makes the
+ # regular expression for the positioning much easier.
+ parameters = preg_replace(r'''\([\#\w\s]+\)''', '', parameters)
+ parameters = preg_replace(r'''\[[\w-]+\]''', '', parameters)
+ parameters = preg_replace(r'''{[\w:;#%-]+}''', '', parameters)
+
+ style = []
+
+ # Count the left indentation.
+ l_indent = parameters.count('(')
+ if l_indent: style.append('padding-left:%dem;' % l_indent)
+
+ # Count the right indentation.
+ r_indent = parameters.count(')')
+ if r_indent: style.append('padding-right:%dem;' % r_indent)
+
+ # Add alignment.
+ if align_type == 'image':
+ align = [('<', 'float:left;', ' left'),
+ ('>', 'float:right;', ' right')]
+
+ valign = [('^', 'vertical-align:text-top;', ' top'),
+ ('-', 'vertical-align:middle;', ' middle'),
+ ('~', 'vertical-align:text-bottom;', ' bottom')]
+
+ # Images can have both a vertical and a horizontal alignment.
+ for alignments in [align, valign]:
+ for _align, _style, _class in alignments:
+ if parameters.count(_align):
+ style.append(_style)
+
+ # Append a class name related to the alignment.
+ output['class'] = output.get('class', '') + _class
+ break
+
+ elif align_type == 'table':
+ align = [('<', 'left'),
+ ('>', 'right'),
+ ('=', 'center'),
+ ('<>', 'justify')]
+
+ valign = [('^', 'top'),
+ ('~', 'bottom')]
+
+ # Horizontal alignment.
+ for _align, _style, in align:
+ if parameters.count(_align):
+ output['align'] = _style
+
+ # Vertical alignment.
+ for _align, _style, in valign:
+ if parameters.count(_align):
+ output['valign'] = _style
+
+ # Colspan and rowspan.
+ m = re.search(r'''\\(\d+)''', parameters)
+ if m:
+ #output['colspan'] = m.groups()
+ output['colspan'] = int(m.groups()[0])
+
+ m = re.search(r'''/(\d+)''', parameters)
+ if m:
+ output['rowspan'] = int(m.groups()[0])
+
+ else:
+ if l_indent or r_indent:
+ alignments = [('<>', 'text-align:justify;', ' justify'),
+ ('=', 'text-align:center;', ' center'),
+ ('<', 'float:left;', ' left'),
+ ('>', 'float:right;', ' right')]
+ else:
+ alignments = [('<>', 'text-align:justify;', ' justify'),
+ ('=', 'text-align:center;', ' center'),
+ ('<', 'text-align:left;', ' left'),
+ ('>', 'text-align:right;', ' right')]
+
+ for _align, _style, _class in alignments:
+ if parameters.count(_align):
+ style.append(_style)
+
+ # Append a class name related to the alignment.
+ output['class'] = output.get('class', '') + _class
+ break
+
+ # Join all the styles.
+ output['style'] = output.get('style', '') + ''.join(style)
+
+ # Remove excess whitespace.
+ if output.has_key('class'):
+ output['class'] = output['class'].strip()
+
+ return output
+
+
+ def build_open_tag(self, tag, attributes={}, single=0):
+ """Build the open tag with specified attributes.
+
+ This function is used by all block builders to
+ generate the opening tags with the attributes of
+ the block.
+ """
+ # Open tag.
+ open_tag = ['<%s' % tag]
+ for k,v in attributes.items():
+ # The ALT attribute can be empty.
+ if k == 'alt' or v: open_tag.append(' %s="%s"' % (k, v))
+
+ if single:
+ open_tag.append(' /')
+
+ # Close tag.
+ open_tag.append('>')
+
+ return ''.join(open_tag)
+
+
+ def paragraph(self, text, parameters=None, attributes=None, clear=None):
+ """Process a paragraph.
+
+ This function processes the paragraphs, enclosing the text in a
+ <p> tag and breaking lines with <br />. Paragraphs are formatted
+ with all the inline rules.
+
+ ---
+ h1. Paragraph
+
+ This is how you write a paragraph:
+
+ pre. p. This is a paragraph, although a short one.
+
+ Since the paragraph is the default block, you can safely omit its
+ signature ([@p@]). Simply write:
+
+ pre. This is a paragraph, although a short one.
+
+ Text in a paragraph block is wrapped in @<p></p>@ tags, and
+ newlines receive a <br /> tag. In both cases Textile will process
+ the text to:
+
+ pre. <p>This is a paragraph, although a short one.</p>
+
+ Text in a paragraph block is processed with all the inline rules.
+ """
+ # Split the lines.
+ lines = re.split('\n{2,}', text)
+
+ # Get the attributes.
+ attributes = attributes or self.parse_params(parameters, clear)
+
+ output = []
+ for line in lines:
+ if line:
+ # Clean the line.
+ line = line.strip()
+
+ # Build the tag.
+ open_tag = self.build_open_tag('p', attributes)
+ close_tag = '</p>'
+
+ # Pop the id because it must be unique.
+ if attributes.has_key('id'): del attributes['id']
+
+ # Break lines.
+ line = preg_replace(r'(<br />|\n)+', '<br />\n', line)
+
+ # Remove <br /> from inside broken HTML tags.
+ line = preg_replace(r'(<[^>]*)<br />\n(.*?>)', r'\1 \2', line)
+
+ # Inline formatting.
+ line = self.inline(line)
+
+ output.append(open_tag + line + close_tag)
+
+ return '\n\n'.join(output)
+
+
+ def pre(self, text, parameters=None, clear=None):
+ """Process pre-formatted text.
+
+ This function processes pre-formatted text into a <pre> tag.
+ No HTML is added for the lines, but @<@ and @>@ are translated into
+ HTML entities.
+
+ ---
+ h1. Pre-formatted text
+
+ Pre-formatted text can be specified using the @pre@ signature.
+ Inside a "pre" block, whitespace is preserved and @<@ and @>@ are
+ translated into HTML(HyperText Markup Language) entities
+ automatically.
+
+ Text in a "pre" block is _not processed_ with any inline rule.
+
+ Here's a simple example:
+
+ pre. pre. This text is pre-formatted.
+ Nothing interesting happens inside here...
+
+ Will become:
+
+ pre. <pre>
+ This text is pre-formatted.
+ Nothing interesting happens inside here...
+ </pre>
+ """
+
+ # Remove trailing whitespace.
+ text = text.rstrip()
+
+ # Get the attributes.
+ attributes = self.parse_params(parameters, clear)
+
+ # Build the tag.
+ #open_tag = self.build_open_tag('pre', attributes) + '\n'
+ open_tag = self.build_open_tag('pre', attributes)
+ close_tag = '\n</pre>'
+
+ # Replace < and >.
+ text = text.replace('<', '&lt;')
+ text = text.replace('>', '&gt;')
+
+ return open_tag + text + close_tag
+
+
+ def bc(self, text, parameters=None, clear=None):
+ """Process block code.
+
+ This function processes block code into a <code> tag inside a
+ <pre>. No HTML is added for the lines, but @<@ and @>@ are translated
+ into HTML entities.
+
+ ---
+ h1. Block code
+
+ A block code, specified by the @bc@ signature, is a block of
+ pre-formatted text which also receives a @<code></code>@ tag. As
+ with "pre", whitespace is preserved and @<@ and @>@ are translated
+ into HTML(HyperText Markup Language) entities automatically.
+
+ Text in a "bc" code is _not processed_ with the inline rules.
+
+ If you have "Twisted":http://www.twistedmatrix.com/ installed,
+ Textile can automatically colorize your Python code if you
+ specify its language as "Python":
+
+ pre. bc[python]. from twisted.python import htmlizer
+
+ This will become:
+
+ pre. <pre>
+ <code lang="python">
+ <span class="py-src-keyword">from</span> <span class="py-src-variable">twisted</span><span class="py-src-op">.</span><span class="py-src-variable">python</span> <span class="py-src-keyword">import</span> <span class="py-src-variable">htmlizer</span>
+ </code>
+ </pre>
+
+ The colors can be specified in your CSS(Cascading Style Sheets)
+ file. If you don't want to install Twisted, you can download just
+ the @htmlizer@ module "independently":http://dealmeida.net/code/htmlizer.py.txt.
+ """
+
+ # Get the attributes.
+ attributes = self.parse_params(parameters, clear)
+
+ # XHTML <code> can't have the attribute lang.
+ if attributes.has_key('lang'):
+ lang = attributes['lang']
+ del attributes['lang']
+ else:
+ lang = None
+
+ # Build the tag.
+ open_tag = '<pre>\n' + self.build_open_tag('code', attributes) + '\n'
+ close_tag = '\n</code>\n</pre>'
+
+ # Colorize Python code?
+ if htmlizer and lang == 'python':
+ text = _color(text)
+ else:
+ # Replace < and >.
+ text = text.replace('<', '&lt;')
+ text = text.replace('>', '&gt;')
+
+ return open_tag + text + close_tag
+
+
+ def dl(self, text, parameters=None, clear=None):
+ """Process definition list.
+
+ This function process definition lists. The text inside
+ the <dt> and <dd> tags is processed for inline formatting.
+
+ ---
+ h1. Definition list
+
+ A definition list starts with the signature @dl@, and has
+ its items separated by a @:@. Here's a simple example:
+
+ pre. dl. name:Sir Lancelot of Camelot.
+ quest:To seek the Holy Grail.
+ color:Blue.
+
+ Becomes:
+
+ pre. <dl>
+ <dt>name</dt>
+ <dd>Sir Lancelot of Camelot.</dd>
+ <dt>quest</dt>
+ <dd>To seek the Holy Grail.</dd>
+ <dt>color</dt>
+ <dd>Blue.</dd>
+ </dl>
+ """
+ # Get the attributes.
+ attributes = self.parse_params(parameters, clear)
+
+ # Build the tag.
+ open_tag = self.build_open_tag('dl', attributes) + '\n'
+ close_tag = '\n</dl>'
+
+ lines = text.split('\n')
+ output = []
+ for line in lines:
+ if line.count(':'):
+ [dt, dd] = line.split(':', 1)
+ else:
+ dt,dd = line, ''
+
+ if dt: output.append('<dt>%s</dt>\n<dd>%s</dd>' % (dt, dd))
+
+ text = '\n'.join(output)
+
+ text = self.inline(text)
+
+ return open_tag + text + close_tag
+
+
+ def blockquote(self, text, parameters=None, cite=None, clear=None):
+ """Process block quote.
+
+ The block quote is inserted into a <blockquote> tag, and
+ processed as a paragraph. An optional cite attribute can
+ be appended on the last line after two dashes (--), or
+ after the period following ':' for compatibility with the
+ Perl version.
+
+ ---
+ h1. Blockquote
+
+ A blockquote is denoted by the signature @bq@. The text in this
+ block will be enclosed in @<blockquote></blockquote>@ and @<p></p>@,
+ receiving the same formatting as a paragraph. For example:
+
+ pre. bq. This is a blockquote.
+
+ Becomes:
+
+ pre. <blockquote>
+ <p>This is a blockquote.</p>
+ </blockquote>
+
+ You can optionally specify the @cite@ attribute of the blockquote,
+ using the following syntax:
+
+ pre. bq.:http://example.com Some text.
+
+ pre. bq.:"John Doe" Some other text.
+
+ Becomes:
+
+ pre. <blockquote cite="http://example.com">
+ <p>Some text.</p>
+ </blockquote>
+
+ pre. <blockquote cite="John Doe">
+ <p>Some other text.</p>
+ </blockquote>
+
+ You can also specify the @cite@ using a pair of dashes on the
+ last line of the blockquote:
+
+ pre. bq. Some text.
+ -- http://example.com
+ """
+
+ # Get the attributes.
+ attributes = self.parse_params(parameters, clear)
+
+ if cite:
+ # Remove the quotes?
+ cite = cite.strip('"')
+ attributes['cite'] = cite
+ else:
+ # The citation should be on the last line.
+ text = text.split('\n')
+ if text[-1].startswith('-- '):
+ attributes['cite'] = text.pop()[3:]
+
+ text = '\n'.join(text)
+
+ # Build the tag.
+ open_tag = self.build_open_tag('blockquote', attributes) + '\n'
+ close_tag = '\n</blockquote>'
+
+ # Process the paragraph, passing the attributes.
+ # Does it make sense to pass the id, class, etc. to
+ # the paragraph instead of applying it to the
+ # blockquote tag?
+ text = self.paragraph(text)
+
+ return open_tag + text + close_tag
+
+
+ def header(self, text, parameters=None, header=1, clear=None):
+ """Process a header.
+
+ The header number is captured by the regular
+ expression and lives in header. If head_offset is
+ set, it is adjusted accordingly.
+
+ ---
+ h1. Header
+
+ A header is produced by the signature @hn@, where @n@ goes
+ from 1 to 6. You can adjust the relative output of the headers
+ passing a @head_offset@ attribute when calling @textile()@.
+
+ To make a header:
+
+ pre. h1. This is a header.
+
+ Becomes:
+
+ pre. <h1>This is a header.</h1>
+ """
+ # Get the attributes.
+ attributes = self.parse_params(parameters, clear)
+
+ # Get the header number and limit it between 1 and 6.
+ n = header
+ n = min(n,6)
+ n = max(n,1)
+
+ # Build the tag.
+ open_tag = self.build_open_tag('h%d' % n, attributes)
+ close_tag = '</h%d>' % n
+
+ text = self.inline(text)
+
+ return open_tag + text + close_tag
+
+
+ def footnote(self, text, parameters=None, footnote=1, clear=None):
+ """Process a footnote.
+
+ A footnote is formatted as a paragraph of class
+ 'footnote' and id 'fn%d', starting with the footnote
+ number in a <sup> tag. Here we just build the
+ attributes and pass them directly to self.paragraph().
+
+ ---
+ h1. Footnote
+
+ A footnote is produced by the signature @fn@ followed by
+ a number. Footnotes are paragraphs of a special CSS(Cascading Style Sheets)
+ class. An example:
+
+ pre. fn1. This is footnote number one.
+
+ Will produce this:
+
+ pre. <p class="footnote" id="fn1"><sup>1</sup> This is footnote number one.</p>
+
+ This footnote can be referenced anywhere on the text by the
+ following way:
+
+ pre. This is a reference[1] to footnote number one.
+
+ Which becomes:
+
+ pre. <p>This is a reference<sup class="footnote"><a href="#fn1" title="This is footnote number one.">1</a></sup> to footnote number 1.</p>
+
+ Note that the text from the footnote appears in the @title@ of the
+ link pointing to it.
+ """
+ # Get the number.
+ n = int(footnote)
+
+ # Build the attributes to the paragraph.
+ attributes = self.parse_params(parameters, clear)
+ attributes['class'] = 'footnote'
+ attributes['id'] = 'fn%d' % n
+
+ # Build the paragraph text.
+ text = ('<sup>%d</sup> ' % n) + text
+
+ # And return the paragraph.
+ return self.paragraph(text=text, attributes=attributes)
+
+
+ def build_li(self, items, liattributes):
+ """Build the list item.
+
+ This function build the list item of an (un)ordered list. It
+ works by peeking at the next list item, and searching for a
+ multi-list. If a multi-list is found, it is processed and
+ appended inside the list item tags, as it should be.
+ """
+ lines = []
+ while len(items):
+ item = items.pop(0)
+
+ # Clean the line.
+ item = item.lstrip()
+ item = item.replace('\n', '<br />\n')
+
+ # Get list item attributes.
+ p = re.compile(r'''^%(liattr)s\s''' % self.res, re.VERBOSE)
+ m = p.match(item)
+ if m:
+ c = m.groupdict('')
+ liparameters = c['liparameters']
+ item = p.sub('', item)
+ else:
+ liparameters = ''
+
+ liattributes = liattributes or self.parse_params(liparameters)
+
+ # Build the item tag.
+ open_tag_li = self.build_open_tag('li', liattributes)
+
+ # Reset the attributes, which should be applied
+ # only to the first <li>.
+ liattributes = {}
+
+ # Build the closing tag.
+ close_tag_li = '</li>'
+
+ # Multi-list recursive routine.
+ # Here we check the _next_ items for a multi-list. If we
+ # find one, we extract all items of the multi-list and
+ # process them recursively.
+ if len(items):
+ inlist = []
+
+ # Grab all the items that start with # or *.
+ n_item = items.pop(0)
+
+ # Grab the <ol> parameters.
+ p = re.compile(r'''^%(olattr)s''' % self.res, re.VERBOSE)
+ m = p.match(n_item)
+ if m:
+ c = m.groupdict('')
+ olparameters = c['olparameters']
+ tmp = p.sub('', n_item)
+ else:
+ olparameters = ''
+
+ # Check for an ordered list inside this one.
+ if tmp.startswith('#'):
+ n_item = tmp
+ inlist.append(n_item)
+ while len(items):
+ # Peek into the next item.
+ n_item = items.pop(0)
+ if n_item.startswith('#'):
+ inlist.append(n_item)
+ else:
+ items.insert(0, n_item)
+ break
+
+ inlist = self.ol('\n'.join(inlist), olparameters=olparameters)
+ item = item + '\n' + inlist + '\n'
+
+ # Check for an unordered list inside this one.
+ elif tmp.startswith('*'):
+ n_item = tmp
+ inlist.append(n_item)
+ while len(items):
+ # Peek into the next item.
+ n_item = items.pop(0)
+ if n_item.startswith('*'):
+ inlist.append(n_item)
+ else:
+ items.insert(0, n_item)
+ break
+
+ inlist = self.ul('\n'.join(inlist), olparameters=olparameters)
+ item = item + '\n' + inlist + '\n'
+
+ # Otherwise we just put it back in the list.
+ else:
+ items.insert(0, n_item)
+
+ item = self.inline(item)
+
+ item = open_tag_li + item + close_tag_li
+ lines.append(item)
+
+ return '\n'.join(lines)
+
+
+ def ol(self, text, liparameters=None, olparameters=None, clear=None):
+ """Build an ordered list.
+
+ This function basically just sets the <ol></ol> with the
+ right attributes, and then pass everything inside to
+ _build_li, which does the real tough recursive job.
+
+ ---
+ h1. Ordered lists
+
+ Ordered lists can be constructed this way:
+
+ pre. # Item number 1.
+ # Item number 2.
+ # Item number 3.
+
+ And you get:
+
+ pre. <ol>
+ <li>Item number 1.</li>
+ <li>Item number 2.</li>
+ <li>Item number 3.</li>
+ </ol>
+
+ If you want a list to "break" an extended block, you should
+ add a period after the hash. This is useful for writing
+ Python code:
+
+ pre.. bc[python].. #!/usr/bin/env python
+
+ # This is a comment, not an ordered list!
+ # So this won't break the extended "bc".
+
+ p. Lists can be nested:
+
+ pre. # Item number 1.
+ ## Item number 1a.
+ ## Item number 1b.
+ # Item number 2.
+ ## Item number 2a.
+
+ Textile will transform this to:
+
+ pre. <ol>
+ <li>Item number 1.
+ <ol>
+ <li>Item number 1a.</li>
+ <li>Item number 1b.</li>
+ </ol>
+ </li>
+ <li>Item number 2.
+ <ol>
+ <li>Item number 2a.</li>
+ </ol>
+ </li>
+ </ol>
+
+ You can also mix ordered and unordered lists:
+
+ pre. * To write well you need:
+ *# to read every day
+ *# to write every day
+ *# and X
+
+ You'll get this:
+
+ pre. <ul>
+ <li>To write well you need:
+ <ol>
+ <li>to read every day</li>
+ <li>to write every day</li>
+ <li>and X</li>
+ </ol>
+ </li>
+ </ul>
+
+ To style a list, the parameters should go before the hash if you want
+ to set the attributes on the @<ol>@ tag:
+
+ pre. (class#id)# one
+ # two
+ # three
+
+ If you want to customize the firsr @<li>@ tag, apply the parameters
+ after the hash:
+
+ pre. #(class#id) one
+ # two
+ # three
+ """
+ # Get the attributes.
+ olattributes = self.parse_params(olparameters, clear)
+ liattributes = self.parse_params(liparameters)
+
+ # Remove list depth.
+ if text.startswith('#'):
+ text = text[1:]
+
+ items = text.split('\n#')
+
+ # Build the open tag.
+ open_tag = self.build_open_tag('ol', olattributes) + '\n'
+
+ close_tag = '\n</ol>'
+
+ # Build the list items.
+ text = self.build_li(items, liattributes)
+
+ return open_tag + text + close_tag
+
+
+ def ul(self, text, liparameters=None, olparameters=None, clear=None):
+ """Build an unordered list.
+
+ This function basically just sets the <ul></ul> with the
+ right attributes, and then pass everything inside to
+ _build_li, which does the real tough recursive job.
+
+ ---
+ h1. Unordered lists
+
+ Unordered lists behave exactly like the ordered lists, and are
+ defined using a star:
+
+ pre. * Python
+ * Perl
+ * PHP
+
+ Becomes:
+
+ pre. <ul>
+ <li>Python</li>
+ <li>Perl</li>
+ <li><span class="caps">PHP</span></li>
+ </ul>
+ """
+ # Get the attributes.
+ olattributes = self.parse_params(olparameters, clear)
+ liattributes = self.parse_params(liparameters)
+
+ # Remove list depth.
+ if text.startswith('*'):
+ text = text[1:]
+
+ items = text.split('\n*')
+
+ # Build the open tag.
+ open_tag = self.build_open_tag('ul', olattributes) + '\n'
+
+ close_tag = '\n</ul>'
+
+ # Build the list items.
+ text = self.build_li(items, liattributes)
+
+ return open_tag + text + close_tag
+
+
+ def table(self, text, parameters=None, clear=None):
+ """Build a table.
+
+ To build a table we split the text in lines to get the
+ rows, and split the rows between '|' to get the individual
+ cells.
+
+ ---
+ h1. Tables
+
+ Making a simple table is as easy as possible:
+
+ pre. |a|b|c|
+ |1|2|3|
+
+ Will be processed into:
+
+ pre. <table>
+ <tr>
+ <td>a</td>
+ <td>b</td>
+ <td>c</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>2</td>
+ <td>3</td>
+ </tr>
+ </table>
+
+ If you want to customize the @<table>@ tag, you must use the
+ @table@ signature:
+
+ pre. table(class#id)[en]. |a|b|c|
+ |1|2|3|
+
+ To customize a row, apply the modifier _before_ the first @|@:
+
+ pre. table. (class)<>|a|b|c|
+ |1|2|3|
+
+ Individual cells can by customized by adding the parameters _after_
+ the @|@, proceded by a period and a space:
+
+ pre. |(#id). a|b|c|
+ |1|2|3|
+
+ The allowed modifiers are:
+
+ dl. {style rule}:A CSS(Cascading Style Sheets) style rule.
+ (class) or (#id) or (class#id):A CSS(Cascading Style Sheets) class and/or id attribute.
+ ( (one or more):Adds 1em of padding to the left for each '(' character.
+ ) (one or more):Adds 1em of padding to the right for each ')' character.
+ &lt;:Aligns to the left (floats to left for tables if combined with the ')' modifier).
+ &gt;:Aligns to the right (floats to right for tables if combined with the '(' modifier).
+ =:Aligns to center (sets left, right margins to 'auto' for tables).
+ &lt;&gt;:For cells only. Justifies text.
+ ^:For rows and cells only. Aligns to the top.
+ ~ (tilde):For rows and cells only. Aligns to the bottom.
+ _ (underscore):Can be applied to a table row or cell to indicate a header row or cell.
+ \\2 or \\3 or \\4, etc.:Used within cells to indicate a colspan of 2, 3, 4, etc. columns. When you see "\\", think "push forward".
+ /2 or /3 or /4, etc.:Used within cells to indicate a rowspan of 2, 3, 4, etc. rows. When you see "/", think "push downward".
+
+ When a cell is identified as a header cell and an alignment is
+ specified, that becomes the default alignment for cells below it.
+ You can always override this behavior by specifying an alignment
+ for one of the lower cells.
+ """
+ attributes = self.parse_params(parameters, clear, align_type='table')
+ #attributes['cellspacing'] = '0'
+
+ # Build the <table>.
+ open_tag = self.build_open_tag('table', attributes) + '\n'
+ close_tag = '</table>'
+
+ output = []
+ default_align = {}
+ rows = re.split(r'''\n+''', text)
+ for row in rows:
+ # Get the columns.
+ columns = row.split('|')
+
+ # Build the <tr>.
+ parameters = columns.pop(0)
+
+ rowattr = self.parse_params(parameters, align_type='table')
+ open_tr = self.build_open_tag('tr', rowattr) + '\n'
+ output.append(open_tr)
+
+ # Does the row define headers?
+ if parameters.count('_'):
+ td_tag = 'th'
+ else:
+ td_tag = 'td'
+
+ col = 0
+ for cell in columns[:-1]:
+ p = re.compile(r'''(?:%(tattr)s\.\s)?(?P<text>.*)''' % self.res, re.VERBOSE)
+ m = p.match(cell)
+ if m:
+ c = m.groupdict('')
+ cellattr = self.parse_params(c['parameters'], align_type='table')
+
+ # Get the width of this cell.
+ width = cellattr.get('colspan', 1)
+
+ # Is this a header?
+ if c['parameters'].count('_'):
+ td_tag = 'th'
+
+ # If it is a header, let's set the default alignment.
+ if td_tag == 'th':
+ # Set the default aligment for all cells below this one.
+ # This is a little tricky because this header can have
+ # a colspan set.
+ for i in range(col, col+width):
+ default_align[i] = cellattr.get('align', None)
+
+ else:
+ # Apply the default align, if any.
+ cellattr['align'] = cellattr.get('align', default_align.get(col, None))
+
+ open_td = self.build_open_tag(td_tag, cellattr)
+ close_td = '</%s>\n' % td_tag
+
+ #output.append(open_td + c['text'].strip() + close_td)
+ output.append(open_td + self.inline(c['text'].strip()) + close_td)
+
+ col += width
+
+ output.append('</tr>\n')
+
+ text = open_tag + ''.join(output) + close_tag
+
+ return text
+
+
+ def escape(self, text):
+ """Do nothing.
+
+ This is used to match escaped text. Nothing to see here!
+
+ ---
+ h1. Escaping
+
+ If you don't want Textile processing a block, you can simply
+ enclose it inside @==@:
+
+ pre. p. Regular paragraph
+
+ pre. ==
+ Escaped portion -- will not be formatted
+ by Textile at all
+ ==
+
+ pre. p. Back to normal.
+
+ This can also be used inline, disabling the formatting temporarily:
+
+ pre. p. This is ==*a test*== of escaping.
+ """
+ return text
+
+
+ def itex(self, text):
+ """Convert itex to MathML.
+
+ If the itex2mml binary is set, we use it to convert the
+ itex to MathML. Otherwise, the text is unprocessed and
+ return as is.
+
+ ---
+ h1. itex
+
+ Textile can automatically convert itex code to MathML(Mathematical Markup Language)
+ for you, if you have the itex2MML binary (you can download it
+ from the "Movable Type plugin":http://golem.ph.utexas.edu/~distler/blog/files/itexToMML.tar.gz).
+
+ Block equations should be enclosed inbetween @\[@ and @\]@:
+
+ pre. \[ e^{i\pi} + 1 = 0 \]
+
+ Will be translated to:
+
+ pre. <math xmlns='http://www.w3.org/1998/Math/MathML' mode='display'>
+ <msup><mi>e</mi> <mrow><mi>i</mi>
+ <mi>&amp;pi;</mi></mrow></msup>
+ <mo>+</mo><mn>1</mn><mo>=</mo><mn>0</mn>
+ </math>
+
+ Equations can also be displayed inline:
+
+ pre. Euler's formula, $e^{i\pi}+1=0$, ...
+
+ (Note that if you want to display MathML(Mathematical Markup Language)
+ your content must be served as @application/xhtml+xml@, which is not
+ accepted by all browsers.)
+ """
+ if itex2mml:
+ try:
+ text = os.popen("echo '%s' | %s" % (text, itex2mml)).read()
+ except:
+ pass
+
+ return text
+
+
+ def about(self, text=None):
+ """Show PyTextile's functionalities.
+
+ An introduction to PyTextile. Can be called when running the
+ main script or if you write the following line:
+
+ 'tell me about textile.'
+
+ But keep it a secret!
+ """
+
+ about = []
+ about.append(textile('h1. This is Textile', head_offset=self.head_offset))
+ about.append(textile(__doc__.split('---', 1)[1], head_offset=self.head_offset))
+
+ functions = [(self.split_text, 1),
+ (self.paragraph, 2),
+ (self.pre, 2),
+ (self.bc, 2),
+ (self.blockquote, 2),
+ (self.dl, 2),
+ (self.header, 2),
+ (self.footnote, 2),
+ (self.escape, 2),
+ (self.itex, 2),
+ (self.ol, 2),
+ (self.ul, 2),
+ (self.table, 2),
+ (self.inline, 1),
+ (self.qtags, 2),
+ (self.glyphs, 2),
+ (self.macros, 2),
+ (self.acronym, 2),
+ (self.images, 1),
+ (self.links, 1),
+ (self.sanitize, 1),
+ ]
+
+ for function, offset in functions:
+ doc = function.__doc__.split('---', 1)[1]
+ doc = doc.split('\n')
+ lines = []
+ for line in doc:
+ line = line.strip()
+ lines.append(line)
+
+ doc = '\n'.join(lines)
+ about.append(textile(doc, head_offset=self.head_offset+offset))
+
+ about = '\n'.join(about)
+ about = about.replace('<br />', '')
+
+ return about
+
+
+ def acronym(self, text):
+ """Process acronyms.
+
+ Acronyms can have letters in upper and lower caps, or even numbers,
+ provided that the numbers and upper caps are the same in the
+ abbreviation and in the description. For example:
+
+ XHTML(eXtensible HyperText Markup Language)
+ OPeNDAP(Open source Project for a Network Data Access Protocol)
+ L94(Levitus 94)
+
+ are all valid acronyms.
+
+ ---
+ h1. Acronyms
+
+ You can define acronyms in your text the following way:
+
+ pre. This is XHTML(eXtensible HyperText Markup Language).
+
+ The resulting code is:
+
+ pre. <p><acronym title="eXtensible HyperText Markup Language"><span class="caps">XHTML</span></acronym></p>
+
+ Acronyms can have letters in upper and lower caps, or even numbers,
+ provided that the numbers and upper caps are the same in the
+ abbreviation and in the description. For example:
+
+ pre. XHTML(eXtensible HyperText Markup Language)
+ OPeNDAP(Open source Project for a Network Data Access Protocol)
+ L94(Levitus 94)
+
+ are all valid acronyms.
+ """
+ # Find the acronyms.
+ acronyms = r'''(?P<acronym>[\w]+)\((?P<definition>[^\(\)]+?)\)'''
+
+ # Check all acronyms.
+ for acronym, definition in re.findall(acronyms, text):
+ caps_acronym = ''.join(re.findall('[A-Z\d]+', acronym))
+ caps_definition = ''.join(re.findall('[A-Z\d]+', definition))
+ if caps_acronym and caps_acronym == caps_definition:
+ text = text.replace('%s(%s)' % (acronym, definition), '<acronym title="%s">%s</acronym>' % (definition, acronym))
+
+ text = html_replace(r'''(^|\s)([A-Z]{3,})\b(?!\()''', r'''\1<span class="caps">\2</span>''', text)
+
+ return text
+
+
+ def footnotes(self, text):
+ """Add titles to footnotes references.
+
+ This function searches for footnotes references like this [1], and
+ adds a title to the link containing the first paragraph of the
+ footnote.
+ """
+ # Search for footnotes.
+ p = re.compile(r'''<p class="footnote" id="fn(?P<n>\d+)"><sup>(?P=n)</sup>(?P<note>.*)</p>''')
+ for m in p.finditer(text):
+ n = m.group('n')
+ note = m.group('note').strip()
+
+ # Strip HTML from note.
+ note = re.sub('<.*?>', '', note)
+
+ # Add the title.
+ text = text.replace('<a href="#fn%s">' % n, '<a href="#fn%s" title="%s">' % (n, note))
+
+ return text
+
+
+ def macros(self, m):
+ """Quick macros.
+
+ This function replaces macros inside brackets using a built-in
+ dictionary, and also unicode names if the key doesn't exist.
+
+ ---
+ h1. Macros
+
+ Textile has support for character macros, which should be enclosed
+ in curly braces. A few useful ones are:
+
+ pre. {C=} or {=C}: euro sign
+ {+-} or {-+}: plus-minus sign
+ {L-} or {-L}: pound sign.
+
+ You can also make accented characters:
+
+ pre. Expos{e'}
+
+ Becomes:
+
+ pre. <p>Expos&amp;#233;</p>
+
+ You can also specify Unicode names like:
+
+ pre. {umbrella}
+ {white smiling face}
+ """
+ entity = m.group(1)
+
+ macros = {'c|': '&#162;', # cent sign
+ '|c': '&#162;', # cent sign
+ 'L-': '&#163;', # pound sign
+ '-L': '&#163;', # pound sign
+ 'Y=': '&#165;', # yen sign
+ '=Y': '&#165;', # yen sign
+ '(c)': '&#169;', # copyright sign
+ '<<': '&#171;', # left-pointing double angle quotation
+ '(r)': '&#174;', # registered sign
+ '+_': '&#177;', # plus-minus sign
+ '_+': '&#177;', # plus-minus sign
+ '>>': '&#187;', # right-pointing double angle quotation
+ '1/4': '&#188;', # vulgar fraction one quarter
+ '1/2': '&#189;', # vulgar fraction one half
+ '3/4': '&#190;', # vulgar fraction three quarters
+ 'A`': '&#192;', # latin capital letter a with grave
+ '`A': '&#192;', # latin capital letter a with grave
+ 'A\'': '&#193;', # latin capital letter a with acute
+ '\'A': '&#193;', # latin capital letter a with acute
+ 'A^': '&#194;', # latin capital letter a with circumflex
+ '^A': '&#194;', # latin capital letter a with circumflex
+ 'A~': '&#195;', # latin capital letter a with tilde
+ '~A': '&#195;', # latin capital letter a with tilde
+ 'A"': '&#196;', # latin capital letter a with diaeresis
+ '"A': '&#196;', # latin capital letter a with diaeresis
+ 'Ao': '&#197;', # latin capital letter a with ring above
+ 'oA': '&#197;', # latin capital letter a with ring above
+ 'AE': '&#198;', # latin capital letter ae
+ 'C,': '&#199;', # latin capital letter c with cedilla
+ ',C': '&#199;', # latin capital letter c with cedilla
+ 'E`': '&#200;', # latin capital letter e with grave
+ '`E': '&#200;', # latin capital letter e with grave
+ 'E\'': '&#201;', # latin capital letter e with acute
+ '\'E': '&#201;', # latin capital letter e with acute
+ 'E^': '&#202;', # latin capital letter e with circumflex
+ '^E': '&#202;', # latin capital letter e with circumflex
+ 'E"': '&#203;', # latin capital letter e with diaeresis
+ '"E': '&#203;', # latin capital letter e with diaeresis
+ 'I`': '&#204;', # latin capital letter i with grave
+ '`I': '&#204;', # latin capital letter i with grave
+ 'I\'': '&#205;', # latin capital letter i with acute
+ '\'I': '&#205;', # latin capital letter i with acute
+ 'I^': '&#206;', # latin capital letter i with circumflex
+ '^I': '&#206;', # latin capital letter i with circumflex
+ 'I"': '&#207;', # latin capital letter i with diaeresis
+ '"I': '&#207;', # latin capital letter i with diaeresis
+ 'D-': '&#208;', # latin capital letter eth
+ '-D': '&#208;', # latin capital letter eth
+ 'N~': '&#209;', # latin capital letter n with tilde
+ '~N': '&#209;', # latin capital letter n with tilde
+ 'O`': '&#210;', # latin capital letter o with grave
+ '`O': '&#210;', # latin capital letter o with grave
+ 'O\'': '&#211;', # latin capital letter o with acute
+ '\'O': '&#211;', # latin capital letter o with acute
+ 'O^': '&#212;', # latin capital letter o with circumflex
+ '^O': '&#212;', # latin capital letter o with circumflex
+ 'O~': '&#213;', # latin capital letter o with tilde
+ '~O': '&#213;', # latin capital letter o with tilde
+ 'O"': '&#214;', # latin capital letter o with diaeresis
+ '"O': '&#214;', # latin capital letter o with diaeresis
+ 'O/': '&#216;', # latin capital letter o with stroke
+ '/O': '&#216;', # latin capital letter o with stroke
+ 'U`': '&#217;', # latin capital letter u with grave
+ '`U': '&#217;', # latin capital letter u with grave
+ 'U\'': '&#218;', # latin capital letter u with acute
+ '\'U': '&#218;', # latin capital letter u with acute
+ 'U^': '&#219;', # latin capital letter u with circumflex
+ '^U': '&#219;', # latin capital letter u with circumflex
+ 'U"': '&#220;', # latin capital letter u with diaeresis
+ '"U': '&#220;', # latin capital letter u with diaeresis
+ 'Y\'': '&#221;', # latin capital letter y with acute
+ '\'Y': '&#221;', # latin capital letter y with acute
+ 'a`': '&#224;', # latin small letter a with grave
+ '`a': '&#224;', # latin small letter a with grave
+ 'a\'': '&#225;', # latin small letter a with acute
+ '\'a': '&#225;', # latin small letter a with acute
+ 'a^': '&#226;', # latin small letter a with circumflex
+ '^a': '&#226;', # latin small letter a with circumflex
+ 'a~': '&#227;', # latin small letter a with tilde
+ '~a': '&#227;', # latin small letter a with tilde
+ 'a"': '&#228;', # latin small letter a with diaeresis
+ '"a': '&#228;', # latin small letter a with diaeresis
+ 'ao': '&#229;', # latin small letter a with ring above
+ 'oa': '&#229;', # latin small letter a with ring above
+ 'ae': '&#230;', # latin small letter ae
+ 'c,': '&#231;', # latin small letter c with cedilla
+ ',c': '&#231;', # latin small letter c with cedilla
+ 'e`': '&#232;', # latin small letter e with grave
+ '`e': '&#232;', # latin small letter e with grave
+ 'e\'': '&#233;', # latin small letter e with acute
+ '\'e': '&#233;', # latin small letter e with acute
+ 'e^': '&#234;', # latin small letter e with circumflex
+ '^e': '&#234;', # latin small letter e with circumflex
+ 'e"': '&#235;', # latin small letter e with diaeresis
+ '"e': '&#235;', # latin small letter e with diaeresis
+ 'i`': '&#236;', # latin small letter i with grave
+ '`i': '&#236;', # latin small letter i with grave
+ 'i\'': '&#237;', # latin small letter i with acute
+ '\'i': '&#237;', # latin small letter i with acute
+ 'i^': '&#238;', # latin small letter i with circumflex
+ '^i': '&#238;', # latin small letter i with circumflex
+ 'i"': '&#239;', # latin small letter i with diaeresis
+ '"i': '&#239;', # latin small letter i with diaeresis
+ 'n~': '&#241;', # latin small letter n with tilde
+ '~n': '&#241;', # latin small letter n with tilde
+ 'o`': '&#242;', # latin small letter o with grave
+ '`o': '&#242;', # latin small letter o with grave
+ 'o\'': '&#243;', # latin small letter o with acute
+ '\'o': '&#243;', # latin small letter o with acute
+ 'o^': '&#244;', # latin small letter o with circumflex
+ '^o': '&#244;', # latin small letter o with circumflex
+ 'o~': '&#245;', # latin small letter o with tilde
+ '~o': '&#245;', # latin small letter o with tilde
+ 'o"': '&#246;', # latin small letter o with diaeresis
+ '"o': '&#246;', # latin small letter o with diaeresis
+ ':-': '&#247;', # division sign
+ '-:': '&#247;', # division sign
+ 'o/': '&#248;', # latin small letter o with stroke
+ '/o': '&#248;', # latin small letter o with stroke
+ 'u`': '&#249;', # latin small letter u with grave
+ '`u': '&#249;', # latin small letter u with grave
+ 'u\'': '&#250;', # latin small letter u with acute
+ '\'u': '&#250;', # latin small letter u with acute
+ 'u^': '&#251;', # latin small letter u with circumflex
+ '^u': '&#251;', # latin small letter u with circumflex
+ 'u"': '&#252;', # latin small letter u with diaeresis
+ '"u': '&#252;', # latin small letter u with diaeresis
+ 'y\'': '&#253;', # latin small letter y with acute
+ '\'y': '&#253;', # latin small letter y with acute
+ 'y"': '&#255', # latin small letter y with diaeresis
+ '"y': '&#255', # latin small letter y with diaeresis
+ 'OE': '&#338;', # latin capital ligature oe
+ 'oe': '&#339;', # latin small ligature oe
+ '*': '&#8226;', # bullet
+ 'Fr': '&#8355;', # french franc sign
+ 'L=': '&#8356;', # lira sign
+ '=L': '&#8356;', # lira sign
+ 'Rs': '&#8360;', # rupee sign
+ 'C=': '&#8364;', # euro sign
+ '=C': '&#8364;', # euro sign
+ 'tm': '&#8482;', # trade mark sign
+ '<-': '&#8592;', # leftwards arrow
+ '->': '&#8594;', # rightwards arrow
+ '<=': '&#8656;', # leftwards double arrow
+ '=>': '&#8658;', # rightwards double arrow
+ '=/': '&#8800;', # not equal to
+ '/=': '&#8800;', # not equal to
+ '<_': '&#8804;', # less-than or equal to
+ '_<': '&#8804;', # less-than or equal to
+ '>_': '&#8805;', # greater-than or equal to
+ '_>': '&#8805;', # greater-than or equal to
+ ':(': '&#9785;', # white frowning face
+ ':)': '&#9786;', # white smiling face
+ 'spade': '&#9824;', # black spade suit
+ 'club': '&#9827;', # black club suit
+ 'heart': '&#9829;', # black heart suit
+ 'diamond': '&#9830;', # black diamond suit
+ }
+
+ try:
+ # Try the key.
+ entity = macros[entity]
+ except KeyError:
+ try:
+ # Try a unicode entity.
+ entity = unicodedata.lookup(entity)
+ entity = entity.encode('ascii', 'xmlcharrefreplace')
+ except:
+ # Return the unmodified entity.
+ entity = '{%s}' % entity
+
+ return entity
+
+
+ def glyphs(self, text):
+ """Glyph formatting.
+
+ This function replaces quotations marks, dashes and a few other
+ symbol for numerical entities. The em/en dashes use definitions
+ comes from http://alistapart.com/articles/emen/.
+
+ ---
+ h1. Glyphs
+
+ Textile replaces some of the characters in your text with their
+ equivalent numerical entities. These include:
+
+ * Replace single and double primes used as quotation marks with HTML(HyperText Markup Language) entities for opening and closing quotation marks in readable text, while leaving untouched the primes required within HTML(HyperText Markup Language) tags.
+ * Replace double hyphens (==--==) with an em-dash (&#8212;) entity.
+ * Replace triple hyphens (==---==) with two em-dash (&#8212;&#8212;) entities.
+ * Replace single hyphens surrounded by spaces with an en-dash (&#8211;) entity.
+ * Replace triplets of periods (==...==) with an ellipsis (&#8230;) entity.
+ * Convert many nonstandard characters to browser-safe entities corresponding to keyboard input.
+ * Convert ==(TM)==, ==(R)==, and ==(C)== to &#8482;, &#174;, and &#169;.
+ * Convert the letter x to a dimension sign: 2==x==4 to 2x4 and 8 ==x== 10 to 8x10.
+ """
+ glyphs = [(r'''"(?<!\w)\b''', r'''&#8220;'''), # double quotes
+ (r'''"''', r'''&#8221;'''), # double quotes
+ (r"""\b'""", r'''&#8217;'''), # single quotes
+ (r"""'(?<!\w)\b""", r'''&#8216;'''), # single quotes
+ (r"""'""", r'''&#8217;'''), # single single quote
+ (r'''(\b|^)( )?\.{3}''', r'''\1&#8230;'''), # ellipsis
+ (r'''\b---\b''', r'''&#8212;&#8212;'''), # double em dash
+ (r'''\s?--\s?''', r'''&#8212;'''), # em dash
+ (r'''(\d+)-(\d+)''', r'''\1&#8211;\2'''), # en dash (1954-1999)
+ (r'''(\d+)-(\W)''', r'''\1&#8212;\2'''), # em dash (1954--)
+ (r'''\s-\s''', r''' &#8211; '''), # en dash
+ (r'''(\d+) ?x ?(\d+)''', r'''\1&#215;\2'''), # dimension sign
+ (r'''\b ?(\((tm|TM)\))''', r'''&#8482;'''), # trademark
+ (r'''\b ?(\([rR]\))''', r'''&#174;'''), # registered
+ (r'''\b ?(\([cC]\))''', r'''&#169;'''), # copyright
+ (r'''([^\s])\[(\d+)\]''', #
+ r'''\1<sup class="footnote"><a href="#fn\2">\2</a></sup>'''),# footnote
+ ]
+
+ # Apply macros.
+ text = re.sub(r'''{([^}]+)}''', self.macros, text)
+
+ # LaTeX style quotes.
+ text = text.replace('\x60\x60', '&#8220;')
+ text = text.replace('\xb4\xb4', '&#8221;')
+
+ # Linkify URL and emails.
+ url = r'''(?=[a-zA-Z0-9./#]) # Must start correctly
+ ((?: # Match the leading part (proto://hostname, or just hostname)
+ (?:ftp|https?|telnet|nntp) # protocol
+ :// # ://
+ (?: # Optional 'username:password@'
+ \w+ # username
+ (?::\w+)? # optional :password
+ @ # @
+ )? #
+ [-\w]+(?:\.\w[-\w]*)+ # hostname (sub.example.com)
+ ) #
+ (?::\d+)? # Optional port number
+ (?: # Rest of the URL, optional
+ /? # Start with '/'
+ [^.!,?;:"'<>()\[\]{}\s\x7F-\xFF]* # Can't start with these
+ (?: #
+ [.!,?;:]+ # One or more of these
+ [^.!,?;:"'<>()\[\]{}\s\x7F-\xFF]+ # Can't finish with these
+ #'" # # or ' or "
+ )* #
+ )?) #
+ '''
+
+ email = r'''(?:mailto:)? # Optional mailto:
+ ([-\+\w]+ # username
+ \@ # at
+ [-\w]+(?:\.\w[-\w]*)+) # hostname
+ '''
+
+ # If there is no html, do a simple search and replace.
+ if not re.search(r'''<.*>''', text):
+ for glyph_search, glyph_replace in glyphs:
+ text = preg_replace(glyph_search, glyph_replace, text)
+
+ # Linkify.
+ text = re.sub(re.compile(url, re.VERBOSE), r'''<a href="\1">\1</a>''', text)
+ text = re.sub(re.compile(email, re.VERBOSE), r'''<a href="mailto:\1">\1</a>''', text)
+
+ else:
+ lines = []
+ # Else split the text into an array at <>.
+ for line in re.split('(<.*?>)', text):
+ if not re.match('<.*?>', line):
+ for glyph_search, glyph_replace in glyphs:
+ line = preg_replace(glyph_search, glyph_replace, line)
+
+ # Linkify.
+ line = re.sub(re.compile(url, re.VERBOSE), r'''<a href="\1">\1</a>''', line)
+ line = re.sub(re.compile(email, re.VERBOSE), r'''<a href="mailto:\1">\1</a>''', line)
+
+ lines.append(line)
+
+ text = ''.join(lines)
+
+ return text
+
+
+ def qtags(self, text):
+ """Quick tags formatting.
+
+ This function does the inline formatting of text, like
+ bold, italic, strong and also itex code.
+
+ ---
+ h1. Quick tags
+
+ Quick tags allow you to format your text, making it bold,
+ emphasized or small, for example. The quick tags operators
+ include:
+
+ dl. ==*strong*==:Translates into @<strong>strong</strong>@.
+ ==_emphasis_==:Translates into @<em>emphasis</em>@.
+ ==**bold**==:Translates into @<b>bold</b>@.
+ ==__italics__==:Translates into @<i>italics</i>@.
+ ==++bigger++==:Translates into @<big>bigger</big>@.
+ ==--smaller--==:Translates into: @<small>smaller</small>@.
+ ==-deleted text-==:Translates into @<del>deleted text</del>@.
+ ==+inserted text+==:Translates into @<ins>inserted text</ins>@.
+ ==^superscript^==:Translates into @<sup>superscript</sup>@.
+ ==~subscript~==:Translates into @<sub>subscript</sub>@.
+ ==%span%==:Translates into @<span>span</span>@.
+ ==@code@==:Translates into @<code>code</code>@.
+
+ Note that within a "==@==...==@==" section, @<@ and @>@ are
+ translated into HTML entities automatically.
+
+ Inline formatting operators accept the following modifiers:
+
+ dl. {style rule}:A CSS(Cascading Style Sheets) style rule.
+ [ll]:A language identifier (for a "lang" attribute).
+ (class) or (#id) or (class#id):For CSS(Cascading Style Sheets) class and id attributes.
+ """
+ # itex2mml.
+ text = re.sub('\$(.*?)\$', lambda m: self.itex(m.group()), text)
+
+ # Add span tags to upper-case words which don't have a description.
+ #text = preg_replace(r'''(^|\s)([A-Z]{3,})\b(?!\()''', r'''\1<span class="caps">\2</span>''', text)
+
+ # Quick tags.
+ qtags = [('**', 'b', {'qf': '(?<!\*)\*\*(?!\*)', 'cls': '\*'}),
+ ('__', 'i', {'qf': '(?<!_)__(?!_)', 'cls': '_'}),
+ ('??', 'cite', {'qf': '\?\?(?!\?)', 'cls': '\?'}),
+ ('-', 'del', {'qf': '(?<!\-)\-(?!\-)', 'cls': '-'}),
+ ('+', 'ins', {'qf': '(?<!\+)\+(?!\+)', 'cls': '\+'}),
+ ('*', 'strong', {'qf': '(?<!\*)\*(?!\*)', 'cls': '\*'}),
+ ('_', 'em', {'qf': '(?<!_)_(?!_)', 'cls': '_'}),
+ ('++', 'big', {'qf': '(?<!\+)\+\+(?!\+)', 'cls': '\+\+'}),
+ ('--', 'small', {'qf': '(?<!\-)\-\-(?!\-)', 'cls': '\-\-'}),
+ ('~', 'sub', {'qf': '(?<!\~)\~(?!(\\\/~))', 'cls': '\~'}),
+ ('@', 'code', {'qf': '(?<!@)@(?!@)', 'cls': '@'}),
+ ('%', 'span', {'qf': '(?<!%)%(?!%)', 'cls': '%'}),
+ ]
+
+ # Superscript.
+ text = re.sub(r'''(?<!\^)\^(?!\^)(.+?)(?<!\^)\^(?!\^)''', r'''<sup>\1</sup>''', text)
+
+ # This is from the perl version of Textile.
+ for qtag, htmltag, redict in qtags:
+ self.res.update(redict)
+ p = re.compile(r'''(?: #
+ ^ # Start of string
+ | #
+ (?<=[\s>'"]) # Whitespace, end of tag, quotes
+ | #
+ (?P<pre>[{[]) # Surrounded by [ or {
+ | #
+ (?<=%(punct)s) # Punctuation
+ ) #
+ %(qf)s # opening tag
+ %(qattr)s # attributes
+ (?P<text>[^%(cls)s\s].*?) # text
+ (?<=\S) # non-whitespace
+ %(qf)s #
+ (?: #
+ $ # End of string
+ | #
+ (?P<post>[\]}]) # Surrounded by ] or }
+ | #
+ (?=%(punct)s{1,2}|\s) # punctuation
+ ) #
+ ''' % self.res, re.VERBOSE)
+
+ def _replace(m):
+ c = m.groupdict('')
+
+ attributes = self.parse_params(c['parameters'])
+ open_tag = self.build_open_tag(htmltag, attributes)
+ close_tag = '</%s>' % htmltag
+
+ # Replace < and > inside <code></code>.
+ if htmltag == 'code':
+ c['text'] = c['text'].replace('<', '&lt;')
+ c['text'] = c['text'].replace('>', '&gt;')
+
+ return open_tag + c['text'] + close_tag
+
+ text = p.sub(_replace, text)
+
+ return text
+
+
+ def images(self, text):
+ """Process images.
+
+ This function process images tags, with or without links. Images
+ can have vertical and/or horizontal alignment, and can be resized
+ unefficiently using width and height tags.
+
+ ---
+ h1. Images
+
+ An image is generated by enclosing the image source in @!@:
+
+ pre. !/path/to/image!
+
+ You may optionally specify an alternative text for the image, which
+ will also be used as its title:
+
+ pre. !image.jpg (Nice picture)!
+
+ Becomes:
+
+ pre. <p><img src="image.jpg" alt="Nice picture" title="Nice picture" /></p>
+
+ If you want to make the image point to a link, simply append a
+ comma and the URL(Universal Republic of Love) to the image:
+
+ pre. !image.jpg!:http://diveintopython.org
+
+ Images can also be resized. These are all equivalent:
+
+ pre. !image.jpg 10x20!
+ !image.jpg 10w 20h!
+ !image.jpg 20h 10w!
+
+ The image @image.jpg@ will be resized to width 10 and height 20.
+
+ Modifiers to the @<img>@ tag go after the opening @!@:
+
+ pre. !(class#id)^image.jpg!
+
+ Allowed modifiers include:
+
+ dl. &lt;:Align the image to the left (causes the image to float if CSS options are enabled).
+ &gt;:Align the image to the right (causes the image to float if CSS options are enabled).
+ - (dash):Aligns the image to the middle.
+ ^:Aligns the image to the top.
+ ~ (tilde):Aligns the image to the bottom.
+ {style rule}:Applies a CSS style rule to the image.
+ (class) or (#id) or (class#id):Applies a CSS class and/or id to the image.
+ ( (one or more):Pads 1em on the left for each '(' character.
+ ) (one or more):Pads 1em on the right for each ')' character.
+
+ Images receive the class "top" when using top alignment, "bottom"
+ for bottom alignment and "middle" for middle alignment.
+ """
+ # Compile the beast.
+ p = re.compile(r'''\! # Opening !
+ %(iattr)s # Image attributes
+ (?P<src>%(url)s) # Image src
+ \s? # Optional whitesapce
+ ( #
+ \( #
+ (?P<alt>.*?) # Optional (alt) attribute
+ \) #
+ )? #
+ \s? # Optional whitespace
+ %(resize)s # Resize parameters
+ \! # Closing !
+ ( # Optional link
+ : # starts with ':'
+ (?P<link> #
+ %(url)s # link HREF
+ ) #
+ )? #
+ ''' % self.res, re.VERBOSE)
+
+ for m in p.finditer(text):
+ c = m.groupdict('')
+
+ # Build the parameters for the <img /> tag.
+ attributes = self.parse_params(c['parameters'], align_type='image')
+ attributes.update(c)
+ if attributes['alt']:
+ attributes['title'] = attributes['alt']
+
+ # Append height and width.
+ attributes['width'] = m.groups()[5] or m.groups()[7] or m.groups()[10]
+ attributes['height'] = m.groups()[6] or m.groups()[8] or m.groups()[9]
+
+ # Create the image tag.
+ tag = self.image(attributes)
+
+ text = text.replace(m.group(), tag)
+
+ return text
+
+
+ def image(self, attributes):
+ """Process each image.
+
+ This method builds the <img> tag for each image in the text. It's
+ separated from the 'images' method so it can be easily overriden when
+ subclassing Textiler. Useful if you want to download and/or process
+ the images, for example.
+ """
+ link = attributes['link']
+ del attributes['link']
+ del attributes['parameters']
+
+ # Build the tag.
+ tag = self.build_open_tag('img', attributes, single=1)
+
+ if link:
+ href = preg_replace('&(?!(#|amp))', '&amp;', link)
+ tag = '<a href="%s">%s</a>' % (href, tag)
+
+ return tag
+
+
+ def links(self, text):
+ """Process links.
+
+ This function is responsible for processing links. It has
+ some nice shortcuts to Google, Amazon and IMDB queries.
+
+ ---
+ h1. Links
+
+ A links is done the following way:
+
+ pre. "This is the text link":http://example.com
+
+ The result from this markup is:
+
+ pre. <p><a href="http://example.com">This is the text link</a></p>
+
+ You can add an optional @title@ attribute:
+
+ pre. "This is the text link(This is the title)":http://example.com
+
+ The link can be customised as well:
+
+ pre. "(nospam)E-mail me please":mailto:someone@example.com
+
+ You can use either single or double quotes. They must be enclosed in
+ whitespace, punctuation or brackets:
+
+ pre. You["gotta":http://example.com]seethis!
+
+ If you are going to reference the same link a couple of times, you
+ can define a lookup list anywhere on your document:
+
+ pre. [python]http://www.python.org
+
+ Links to the Python website can then be defined the following way:
+
+ pre. "Check this":python
+
+ There are also shortcuts for Amazon, IMDB(Internet Movie DataBase) and
+ Google queries:
+
+ pre. "Has anyone seen this guy?":imdb:Stephen+Fry
+ "Really nice book":amazon:Goedel+Escher+Bach
+ "PyBlosxom":google
+ ["Using Textile and Blosxom with Python":google:python blosxom textile]
+
+ Becomes:
+
+ pre. <a href="http://www.imdb.com/Find?for=Stephen+Fry">Has anyone seen this guy?</a>
+ <a href="http://www.amazon.com/exec/obidos/external-search?index=blended&amp;keyword=Goedel+Escher+Bach">Really nice book</a>
+ <a href="http://www.google.com/search?q=PyBlosxom">PyBlosxom</a>
+ <a href="http://www.google.com/search?q=python+blosxom+textile">Using Textile and Blosxom with Python</a>
+ """
+ linkres = [r'''\[ # [
+ (?P<quote>"|') # Opening quotes
+ %(lattr)s # Link attributes
+ (?P<text>[^"]+?) # Link text
+ \s? # Optional whitespace
+ (?:\((?P<title>[^\)]+?)\))? # Optional (title)
+ (?P=quote) # Closing quotes
+ : # :
+ (?P<href>[^\]]+) # HREF
+ \] # ]
+ ''' % self.res,
+ r'''(?P<quote>"|') # Opening quotes
+ %(lattr)s # Link attributes
+ (?P<text>[^"]+?) # Link text
+ \s? # Optional whitespace
+ (?:\((?P<title>[^\)]+?)\))? # Optional (title)
+ (?P=quote) # Closing quotes
+ : # :
+ (?P<href>%(url)s) # HREF
+ ''' % self.res]
+
+ for linkre in linkres:
+ p = re.compile(linkre, re.VERBOSE)
+ for m in p.finditer(text):
+ c = m.groupdict('')
+
+ attributes = self.parse_params(c['parameters'])
+ attributes['title'] = c['title'].replace('"', '&quot;')
+
+ # Search lookup list.
+ link = self._links.get(c['href'], None) or c['href']
+
+ # Hyperlinks for Amazon, IMDB and Google searches.
+ parts = link.split(':', 1)
+ proto = parts[0]
+ if len(parts) == 2:
+ query = parts[1]
+ else:
+ query = c['text']
+
+ query = query.replace(' ', '+')
+
+ # Look for smart search.
+ if self.searches.has_key(proto):
+ link = self.searches[proto] % query
+
+ # Fix URL.
+ attributes['href'] = preg_replace('&(?!(#|amp))', '&amp;', link)
+
+ open_tag = self.build_open_tag('a', attributes)
+ close_tag = '</a>'
+
+ repl = open_tag + c['text'] + close_tag
+
+ text = text.replace(m.group(), repl)
+
+ return text
+
+
+ def format(self, text):
+ """Text formatting.
+
+ This function basically defines the order on which the
+ formatting is applied.
+ """
+ text = self.qtags(text)
+ text = self.images(text)
+ text = self.links(text)
+ text = self.acronym(text)
+ text = self.glyphs(text)
+
+ return text
+
+
+ def inline(self, text):
+ """Inline formatting.
+
+ This function calls the formatting on the inline text,
+ taking care to avoid the escaped parts.
+
+ ---
+ h1. Inline
+
+ Inline formatting is applied within a block of text.
+ """
+ if not re.search(r'''==(.*?)==''', text):
+ text = self.format(text)
+
+ else:
+ lines = []
+ # Else split the text into an array at <>.
+ for line in re.split('(==.*?==)', text):
+ if not re.match('==.*?==', line):
+ line = self.format(line)
+ else:
+ line = line[2:-2]
+
+ lines.append(line)
+
+ text = ''.join(lines)
+
+ return text
+
+
+def textile(text, **args):
+ """This is Textile.
+
+ Generates XHTML from a simple markup developed by Dean Allen.
+
+ This function should be called like this:
+
+ textile(text, head_offset=0, validate=0, sanitize=0,
+ encoding='latin-1', output='ASCII')
+ """
+ return Textiler(text).process(**args)
+
+
+if __name__ == '__main__':
+ print textile('tell me about textile.', head_offset=1)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.pyc
new file mode 100644
index 0000000000..baba6254d3
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textile.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.py
new file mode 100755
index 0000000000..edb2a5652d
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+
+import sys, string, re, optparse
+import config, filetool, comment
+
+
+
+
+def convertMac2Unix(content):
+ return content.replace("\r", "\n")
+
+def convertMac2Dos(content):
+ return content.replace("\r", "\r\n")
+
+def convertDos2Unix(content):
+ return content.replace("\r\n", "\n")
+
+def convertDos2Mac(content):
+ return content.replace("\r\n", "\r")
+
+def convertUnix2Dos(content):
+ return content.replace("\n", "\r\n")
+
+def convertUnix2Mac(content):
+ return content.replace("\n", "\r")
+
+
+
+
+def any2Unix(content):
+ # DOS must be first, because it is a combination of Unix & Mac
+ return convertMac2Unix(convertDos2Unix(content))
+
+def any2Dos(content):
+ # to protect old DOS breaks first, we need to convert to
+ # a line ending with single character first e.g. Unix
+ return convertUnix2Dos(any2Unix(content))
+
+def any2Mac(content):
+ # to protect old DOS breaks first, we need to convert to
+ # a line ending with single character first e.g. Unix
+ return convertUnix2Mac(any2Unix(content))
+
+
+
+def getLineEndingName(content):
+ if "\r\n" in content:
+ return "dos"
+
+ if "\r" in content:
+ return "mac"
+
+ # defaults to unix
+ return "unix"
+
+def getLineEndingSequence(content):
+ if "\r\n" in content:
+ return "\r\n"
+
+ if "\r" in content:
+ return "\r"
+
+ # defaults to unix
+ return "\n"
+
+
+
+def tab2Space(content, spaces=2):
+ return content.replace("\t", " " * spaces)
+
+def spaces2Tab(content, spaces=2):
+ return content.replace(" " * spaces, "\t")
+
+
+
+def removeTrailingSpaces(content):
+ ending = getLineEndingSequence(content)
+ lines = content.split(ending)
+ length = len(lines)
+ pos = 0
+
+ while pos < length:
+ lines[pos] = lines[pos].rstrip()
+ pos += 1
+
+ return ending.join(lines)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+def main():
+ allowed = [ "any2Dos", "any2Mac", "any2Unix", "convertDos2Mac", "convertDos2Unix", "convertMac2Dos", "convertMac2Unix", "convertUnix2Dos", "convertUnix2Mac", "spaces2Tab", "tab2Space" ]
+
+ parser = optparse.OptionParser()
+
+ parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=False, help="Quiet output mode.")
+ parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Verbose output mode.")
+ parser.add_option("-c", "--command", dest="command", default="normalize", help="Normalize a file")
+ parser.add_option("--encoding", dest="encoding", default="utf-8", metavar="ENCODING", help="Defines the encoding expected for input files.")
+
+ (options, args) = parser.parse_args()
+
+ if not options.command in allowed:
+ print "Unallowed command: %s" % options.command
+ sys.exit(1)
+
+ if len(args) == 0:
+ print "Needs one or more arguments (files) to modify!"
+ sys.exit(1)
+
+ for fileName in args:
+ if options.verbose:
+ print " * Running %s on: %s" % (options.command, fileName)
+
+ origFileContent = filetool.read(fileName, options.encoding)
+ patchedFileContent = eval(options.command + "(origFileContent)")
+
+ if patchedFileContent != origFileContent:
+ filetool.save(fileName, patchedFileContent, options.encoding)
+
+
+
+
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
+ \ No newline at end of file
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.pyc
new file mode 100644
index 0000000000..7df9a3c20a
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/textutil.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.py
new file mode 100755
index 0000000000..2f8e40436b
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.py
@@ -0,0 +1,349 @@
+#!/usr/bin/env python
+
+import sys, string, re, optparse
+import config, filetool, comment
+
+R_WHITESPACE = re.compile(r"(\s+)")
+R_NONWHITESPACE = re.compile("\S+")
+R_NUMBER = re.compile("^[0-9]+")
+R_NEWLINE = re.compile(r"(\n)")
+
+# Ideas from: http://www.regular-expressions.info/examplesprogrammer.html
+# Multicomment RegExp inspired by: http://ostermiller.org/findcomment.html
+
+# builds regexp strings
+S_STRING_A = "'[^'\\\n]*(\\.|\n[^'\\\n]*)*'"
+S_STRING_B = '"[^"\\\n]*(\\.|\n[^"\\\n]*)*"'
+
+S_FLOAT = "([0-9]+\.[0-9]+)"
+
+S_OPERATORS_2 = r"(==)|(!=)|(\+\+)|(--)|(-=)|(\+=)|(\*=)|(/=)|(%=)|(&&)|(\|\|)|(\>=)|(\<=)|(>>)|(<<)|(\^\|)|(\|=)|(\^=)|(&=)|(::)|(\.\.)"
+S_OPERATORS_3 = r"(===)|(!==)|(\<\<=)|(\>\>=)|(\>\>\>)"
+S_OPERATORS_4 = r"(\>\>\>=)"
+S_OPERATORS = "(" + S_OPERATORS_4 + "|" + S_OPERATORS_3 + "|" + S_OPERATORS_2 + ")"
+
+S_REGEXP = "(\/[^\t\n\r\f\v\/]+?\/[mgi]*)"
+S_REGEXP_A = "\.(match|search|split)\s*\(\s*\(*\s*" + S_REGEXP + "\s*\)*\s*\)"
+S_REGEXP_B = "\.(replace)\s*\(\s*\(*\s*" + S_REGEXP + "\s*\)*\s*?,?"
+S_REGEXP_C = "\s*\(*\s*" + S_REGEXP + "\)*\.(test|exec)\s*\(\s*"
+S_REGEXP_D = "(:|=|\?)\s*\(*\s*" + S_REGEXP + "\s*\)*"
+S_REGEXP_ALL = S_REGEXP_A + "|" + S_REGEXP_B + "|" + S_REGEXP_C + "|" + S_REGEXP_D
+
+S_ALL = "(" + comment.S_BLOCK_COMMENT + "|" + comment.S_INLINE_COMMENT + "|" + S_STRING_A + "|" + S_STRING_B + "|" + S_REGEXP_ALL + "|" + S_FLOAT + "|" + S_OPERATORS + ")"
+
+# compile regexp strings
+R_STRING_A = re.compile("^" + S_STRING_A + "$")
+R_STRING_B = re.compile("^" + S_STRING_B + "$")
+R_FLOAT = re.compile("^" + S_FLOAT + "$")
+R_OPERATORS = re.compile(S_OPERATORS)
+R_REGEXP = re.compile(S_REGEXP)
+R_REGEXP_A = re.compile(S_REGEXP_A)
+R_REGEXP_B = re.compile(S_REGEXP_B)
+R_REGEXP_C = re.compile(S_REGEXP_C)
+R_REGEXP_D = re.compile(S_REGEXP_D)
+R_ALL = re.compile(S_ALL)
+
+
+
+
+parseLine = 1
+parseColumn = 1
+parseUniqueId = ""
+
+
+
+def protectEscape(s):
+ return s.replace("\\\\", "__$ESCAPE0$__").replace("\\\"", "__$ESCAPE1$__").replace("\\\'", "__$ESCAPE2__").replace("\/", "__$ESCAPE3__").replace("\!", "__$ESCAPE4__")
+
+
+
+def recoverEscape(s):
+ return s.replace("__$ESCAPE0$__", "\\\\").replace("__$ESCAPE1$__", "\\\"").replace("__$ESCAPE2__", "\\'").replace("__$ESCAPE3__", "\/").replace("__$ESCAPE4__", "\!")
+
+
+
+def parseElement(element):
+ global parseUniqueId
+ global parseLine
+ global parseColumn
+
+ if config.JSPROTECTED.has_key(element):
+ # print "PROTECTED: %s" % PROTECTED[content]
+ obj = { "type" : "protected", "detail" : config.JSPROTECTED[element], "source" : element, "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId }
+
+ elif element in config.JSBUILTIN:
+ # print "BUILTIN: %s" % content
+ obj = { "type" : "builtin", "detail" : "", "source" : element, "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId }
+
+ elif R_NUMBER.search(element):
+ # print "NUMBER: %s" % content
+ obj = { "type" : "number", "detail" : "int", "source" : element, "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId }
+
+ elif element.startswith("_"):
+ # print "PRIVATE NAME: %s" % content
+ obj = { "type" : "name", "detail" : "private", "source" : element, "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId }
+
+ elif len(element) > 0:
+ # print "PUBLIC NAME: %s" % content
+ obj = { "type" : "name", "detail" : "public", "source" : element, "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId }
+
+ parseColumn += len(element)
+
+ return obj
+
+
+def parsePart(part):
+ global parseUniqueId
+ global parseLine
+ global parseColumn
+
+ tokens = []
+ element = ""
+
+ for line in R_NEWLINE.split(part):
+ if line == "\n":
+ tokens.append({ "type" : "eol", "source" : "", "detail" : "", "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId })
+ parseColumn = 1
+ parseLine += 1
+
+ else:
+ for item in R_WHITESPACE.split(line):
+ if item == "":
+ continue
+
+ if not R_NONWHITESPACE.search(item):
+ parseColumn += len(item)
+ continue
+
+ # print "ITEM: '%s'" % item
+
+ for char in item:
+ # work on single character tokens, otherwise concat to a bigger element
+ if config.JSTOKENS.has_key(char):
+ # convert existing element
+ if element != "":
+ if R_NONWHITESPACE.search(element):
+ tokens.append(parseElement(element))
+
+ element = ""
+
+ # add character to token list
+ tokens.append({ "type" : "token", "detail" : config.JSTOKENS[char], "source" : char, "line" : parseLine, "column" : parseColumn, "id" : parseUniqueId })
+ parseColumn += 1
+
+ else:
+ element += char
+
+ # convert remaining stuff to tokens
+ if element != "":
+ if R_NONWHITESPACE.search(element):
+ tokens.append(parseElement(element))
+
+ element = ""
+
+ return tokens
+
+
+
+def parseFragmentLead(content, fragment, tokens):
+ pos = content.find(fragment)
+
+ if pos > 0:
+ tokens.extend(parsePart(recoverEscape(content[0:pos])))
+
+ return content[pos+len(fragment):]
+
+
+
+def hasLeadingContent(tokens):
+ pos = len(tokens) - 1
+ while pos > 0:
+ if tokens[pos]["type"] == "eol":
+ break
+
+ else:
+ return True
+
+ return False
+
+
+
+
+
+def parseStream(content, uniqueId=""):
+ # make global variables available
+ global parseLine
+ global parseColumn
+ global parseUniqueId
+
+ # reset global stuff
+ parseColumn = 1
+ parseLine = 1
+ parseUniqueId = uniqueId
+
+ # prepare storage
+ tokens = []
+ content = protectEscape(content)
+
+ # print " * searching for patterns..."
+ all = R_ALL.findall(content)
+
+ # print " * structuring..."
+ for item in all:
+ fragment = item[0]
+
+ # print "Found: '%s'" % fragment
+
+ if comment.R_BLOCK_COMMENT.match(fragment):
+ source = recoverEscape(fragment)
+ format = comment.getFormat(source)
+ multiline = comment.isMultiLine(source)
+
+ # print "Type:MultiComment"
+ content = parseFragmentLead(content, fragment, tokens)
+
+ atBegin = not hasLeadingContent(tokens)
+ if re.compile("^\s*\n").search(content):
+ atEnd = True
+ else:
+ atEnd = False
+
+ # print "Begin: %s, End: %s" % (atBegin, atEnd)
+
+ # Fixing source content
+ if atBegin:
+ source = comment.outdent(source, parseColumn - 1)
+
+ source = comment.correct(source)
+
+ connection = "before"
+
+ if atEnd and not atBegin:
+ connection = "after"
+ else:
+ connection = "before"
+
+ tokens.append({ "type" : "comment", "detail" : format, "multiline" : multiline, "connection" : connection, "source" : source, "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn, "begin" : atBegin, "end" : atEnd })
+ parseLine += len(fragment.split("\n")) - 1
+
+ elif comment.R_INLINE_COMMENT.match(fragment):
+ # print "Type:SingleComment"
+ source = recoverEscape(fragment)
+ content = parseFragmentLead(content, fragment, tokens)
+
+ atBegin = hasLeadingContent(tokens)
+ atEnd = True
+
+ if atBegin:
+ connection = "after"
+ else:
+ connection = "before"
+
+ source = comment.correct(source)
+
+ tokens.append({ "type" : "comment", "detail" : "inline", "multiline" : False, "connection" : connection, "source" : source, "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn, "begin" : atBegin, "end" : atEnd })
+
+ elif R_STRING_A.match(fragment):
+ # print "Type:StringA: %s" % fragment
+ content = parseFragmentLead(content, fragment, tokens)
+ tokens.append({ "type" : "string", "detail" : "singlequotes", "source" : recoverEscape(fragment)[1:-1].replace("\\\n",""), "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn })
+
+ elif R_STRING_B.match(fragment):
+ # print "Type:StringB: %s" % fragment
+ content = parseFragmentLead(content, fragment, tokens)
+ tokens.append({ "type" : "string", "detail" : "doublequotes", "source" : recoverEscape(fragment)[1:-1].replace("\\\n",""), "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn })
+
+ elif R_FLOAT.match(fragment):
+ # print "Type:Float: %s" % fragment
+ content = parseFragmentLead(content, fragment, tokens)
+ tokens.append({ "type" : "number", "detail" : "float", "source" : fragment, "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn })
+
+ elif R_OPERATORS.match(fragment):
+ # print "Type:Operator: %s" % fragment
+ content = parseFragmentLead(content, fragment, tokens)
+ tokens.append({ "type" : "token", "detail" : config.JSTOKENS[fragment], "source" : fragment, "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn })
+
+ else:
+ fragresult = R_REGEXP.search(fragment)
+
+ if fragresult:
+ # print "Type:RegExp: %s" % fragresult.group(0)
+
+ if R_REGEXP_A.match(fragment) or R_REGEXP_B.match(fragment) or R_REGEXP_C.match(fragment) or R_REGEXP_D.match(fragment):
+ content = parseFragmentLead(content, fragresult.group(0), tokens)
+ tokens.append({ "type" : "regexp", "detail" : "", "source" : recoverEscape(fragresult.group(0)), "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn })
+
+ else:
+ print "Bad regular expression: %s" % fragresult.group(0)
+
+ else:
+ print "Type:None!"
+
+ tokens.extend(parsePart(recoverEscape(content)))
+ tokens.append({ "type" : "eof", "source" : "", "detail" : "", "id" : parseUniqueId, "line" : parseLine, "column" : parseColumn })
+
+ return tokens
+
+
+
+def parseFile(fileName, uniqueId="", encoding="utf-8"):
+ return parseStream(filetool.read(fileName, encoding), uniqueId)
+
+
+
+
+def convertTokensToString(tokens):
+ tokenizedString = ""
+
+ for token in tokens:
+ tokenizedString += "%s%s" % (token, "\n")
+
+ return tokenizedString
+
+
+
+
+
+def main():
+ parser = optparse.OptionParser()
+
+ parser.add_option("-w", "--write", action="store_true", dest="write", default=False, help="Writes file to incoming fileName + EXTENSION.")
+ parser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", help="The EXTENSION to use", default=".tokenized")
+ parser.add_option("--encoding", dest="encoding", default="utf-8", metavar="ENCODING", help="Defines the encoding expected for input files.")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ print "Needs one or more arguments (files) to tokenize!"
+ sys.exit(1)
+
+ for fileName in args:
+ if options.write:
+ print "Compiling %s => %s%s" % (fileName, fileName, options.extension)
+ else:
+ print "Compiling %s => stdout" % fileName
+
+ tokenString = convertTokensToString(parseFile(fileName, "", options.encoding))
+
+ if options.write:
+ filetool.save(fileName + options.extension, tokenString, options.encoding)
+
+ else:
+ try:
+ print tokenString
+
+ except UnicodeEncodeError:
+ print " * Could not encode result to ascii. Use '-w' instead."
+ sys.exit(1)
+
+
+
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.pyc
new file mode 100644
index 0000000000..b4b81bd063
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tokenizer.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.py
new file mode 100755
index 0000000000..c6147a18b3
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.py
@@ -0,0 +1,563 @@
+#!/usr/bin/env python
+
+class NodeAccessException (Exception):
+ def __init__ (self, msg, node):
+ Exception.__init__(self, msg)
+ self.node = node
+
+
+class Node:
+ def __init__ (self, type):
+ self.type = type
+
+
+
+
+
+ def hasAttributes(self):
+ return hasattr(self, "attributes")
+
+ def set(self, key, value):
+ """Sets an attribute"""
+ if not isinstance(value, (basestring, int, long, float, complex, bool)):
+ raise NodeAccessException("'value' is no string or number: " + str(value), self)
+ if not self.hasAttributes():
+ self.attributes = {}
+ self.attributes[key] = value
+ return self
+
+ def get(self, key, mandatory = True):
+ value = None
+ if hasattr(self, "attributes") and key in self.attributes:
+ value = self.attributes[key]
+
+ if value != None:
+ return value
+ elif mandatory:
+ raise NodeAccessException("Node " + self.type + " has no attribute " + key, self)
+
+ def remove(self, key):
+ del self.attributes[key]
+ if len(self.attributes) == 0:
+ del self.attributes
+
+
+
+
+
+
+
+ def hasParent(self):
+ return hasattr(self, "parent") and self.parent != None
+
+ def hasChildren(self, ignoreComments = False):
+ if not ignoreComments:
+ return hasattr(self, "children") and len(self.children) > 0
+ else:
+ if not hasattr(self, "children"):
+ return False
+
+ for child in self.children:
+ if child.type != "comment" and child.type != "commentsBefore" and child.type != "commentsAfter":
+ return True
+
+ def addChild(self, childNode, index = None):
+ if childNode:
+ if not self.hasChildren():
+ self.children = []
+
+ if childNode.hasParent():
+ childNode.parent.removeChild(childNode)
+
+ if index != None:
+ self.children.insert(index, childNode)
+ else:
+ self.children.append(childNode)
+ childNode.parent = self
+ return self
+
+ def removeChild(self, childNode):
+ if self.hasChildren():
+ self.children.remove(childNode)
+ childNode.parent = None
+ if len(self.children) == 0:
+ del self.children
+
+ def replaceChild(self, oldChild, newChild):
+ if self.hasChildren():
+ if newChild.hasParent():
+ newChild.parent.removeChild(newChild)
+
+ self.children.insert(self.children.index(oldChild), newChild)
+ newChild.parent = self
+ self.children.remove(oldChild)
+
+
+
+
+
+
+ def getChild(self, type, mandatory = True):
+ if self.hasChildren():
+ for child in self.children:
+ if child.type == type:
+ return child
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no child with type " + type, self)
+
+ def hasChildRecursive(self, type):
+ if isinstance(type, basestring):
+ if self.type == type:
+ return True
+ elif isinstance(type, list):
+ if self.type in type:
+ return True
+
+ if self.hasChildren():
+ for child in self.children:
+ if child.hasChildRecursive(type):
+ return True
+
+ return False
+
+ def hasChild(self, type):
+ if self.hasChildren():
+ for child in self.children:
+ if isinstance(type, basestring):
+ if child.type == type:
+ return True
+ elif isinstance(type, list):
+ if child.type in type:
+ return True
+
+ return False
+
+ def getChildrenLength(self, ignoreComments=False):
+ if self.hasChildren():
+ if ignoreComments:
+ counter = 0
+ for child in self.children:
+ if not child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ counter += 1
+ return counter
+
+ else:
+ return len(self.children)
+
+ return 0
+
+
+
+ def makeComplex(self):
+ makeComplex = self.get("makeComplex", False)
+
+ if makeComplex != None:
+ return makeComplex
+
+ else:
+ makeComplex = False
+
+
+
+ if self.type == "comment":
+ makeComplex = True
+
+ elif self.type == "block":
+ if self.hasChildren():
+ counter = 0
+ for child in self.children:
+ if child.type != "commentsAfter":
+ counter += 1
+ if counter > 1:
+ makeComplex = True
+
+ elif self.type == "loop":
+ if self.get("loopType") == "IF" and self.hasParent() and self.parent.type == "elseStatement":
+ pass
+ else:
+ makeComplex = True
+
+ elif self.type == "function":
+ makeComplex = self.getChild("body").hasChild("block") and self.getChild("body").getChild("block").getChildrenLength() > 0
+
+ elif self.type in [ "loop", "switch" ]:
+ makeComplex = True
+
+ elif self.hasChild("commentsBefore"):
+ makeComplex = True
+
+
+
+ # Final test: Ask the children (slower)
+ if not makeComplex and not self.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ makeComplex = self.isComplex()
+
+
+ self.set("makeComplex", makeComplex)
+
+ # print "makeComplex: %s = %s" % (self.type, makeComplex)
+
+ return makeComplex
+
+
+
+ def isComplex(self):
+ isComplex = self.get("isComplex", False)
+
+ if isComplex != None:
+ return isComplex
+
+ else:
+ isComplex = False
+
+
+
+ if not self.hasChildren():
+ isComplex = False
+
+ elif self.type == "block":
+ counter = 0
+ if self.hasChildren():
+ for child in self.children:
+ if child.type != "commentsAfter":
+ counter += 1
+
+ if child.hasChild("commentsBefore"):
+ counter += 1
+
+ if counter > 1:
+ break
+
+ if counter > 1:
+ isComplex = True
+
+ else:
+ if self.getChildrenLength() == 0:
+ isComplex = False
+
+ # in else, try to find the mode of the previous if first
+ elif self.hasParent() and self.parent.type == "elseStatement":
+ isComplex = self.parent.parent.getChild("statement").hasComplexBlock()
+
+ # in if, try to find the mode of the parent if (if existent)
+ elif self.hasParent() and self.parent.type == "statement" and self.parent.parent.type == "loop" and self.parent.parent.get("loopType") == "IF":
+ if self.parent.parent.hasParent() and self.parent.parent.parent.hasParent():
+ if self.parent.parent.parent.parent.type == "loop":
+ isComplex = self.parent.parent.parent.parent.getChild("statement").hasComplexBlock()
+
+ # in catch/finally, try to find the mode of the try statement
+ elif self.hasParent() and self.parent.hasParent() and self.parent.parent.type in [ "catch", "finally" ]:
+ isComplex = self.parent.parent.parent.getChild("statement").hasComplexBlock()
+
+ elif self.type == "elseStatement":
+ if self.hasComplexBlock():
+ isComplex = True
+ elif self.hasChild("loop") and self.getChild("loop").getChild("statement").hasComplexBlock():
+ isComplex = True
+
+ elif self.type == "array" :
+ if self.getChildrenLength(True) > 5:
+ isComplex = True
+
+ elif self.type == "map" :
+ ml = self.getChildrenLength(True)
+ if ml > 1:
+ isComplex = True
+
+ # Final test: Ask the children (slower)
+ if not (self.type == "elseStatement" and self.hasChild("loop")):
+ if not isComplex and self.hasComplexChildren():
+ isComplex = True
+
+ # print self.type + " :: %s" % isComplex
+ self.set("isComplex", isComplex)
+
+ # print "isComplex: %s = %s" % (self.type, isComplex)
+
+ return isComplex
+
+
+
+ def hasComplexChildren(self):
+ if self.hasChildren():
+ for child in self.children:
+ if child.makeComplex():
+ return True
+
+ return False
+
+
+ def hasComplexBlock(self):
+ if self.hasChild("block"):
+ return self.getChild("block").isComplex()
+
+ return False
+
+
+ def hasBlockChildren(self):
+ if self.hasChild("block"):
+ return self.getChild("block").hasChildren()
+
+ return False
+
+
+ def getChildPosition(self, searchedChild, ignoreComments = False):
+ if self.hasChildren() and searchedChild in self.children:
+ if ignoreComments:
+ counter = 0
+ for child in self.children:
+ if child == searchedChild:
+ return counter
+
+ if not child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ counter += 1
+
+ else:
+ return self.children.index(searchedChild)
+
+ return -1
+
+
+
+ def getChildByPosition(self, pos, mandatory = True, ignoreComments = False):
+ if self.hasChildren():
+ i = 0
+ for child in self.children:
+ if ignoreComments and child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ continue
+
+ if i == pos:
+ return child
+
+ i += 1
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no child as position %s" % pos, self)
+
+
+
+ def getChildByAttribute(self, key, value, mandatory = True):
+ if self.hasChildren():
+ for child in self.children:
+ if child.get(key) == value:
+ return child
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no child with attribute " + key + " = " + value, self)
+
+ def getChildByTypeAndAttribute(self, type, key, value, mandatory = True):
+ if self.hasChildren():
+ for child in self.children:
+ if child.type == type and child.get(key) == value:
+ return child
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no child with type " + type + " and attribute " + key + " = " + value, self)
+
+ def getFirstChild(self, mandatory = True, ignoreComments = False):
+ if self.hasChildren():
+ for child in self.children:
+ if ignoreComments and child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ continue
+
+ return child
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no children", self)
+
+ def getLastChild(self, mandatory = True, ignoreComments = False):
+ if self.hasChildren():
+ if not ignoreComments:
+ return self.children[-1]
+ else:
+ pos = len(self.children) - 1
+ while pos >= 0:
+ child = self.children[pos]
+
+ if ignoreComments and child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ pos -= 1
+ continue
+
+ return child
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no children", self)
+
+ def getPreviousSibling(self, mandatory = True, ignoreComments = False):
+ if self.hasParent():
+ prev = None
+ for child in self.parent.children:
+
+ if ignoreComments and child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ continue
+
+ if child == self:
+ if prev != None:
+ return prev
+ else:
+ break
+
+ prev = child
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no previous sibling", self)
+
+ def getFollowingSibling(self, mandatory = True, ignoreComments = False):
+ if self.hasParent():
+ prev = None
+
+ for child in self.parent.children:
+ if ignoreComments and child.type in [ "comment", "commentsBefore", "commentsAfter" ]:
+ continue
+
+ if prev != None:
+ return child
+
+ if child == self:
+ prev = child
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no following sibling", self)
+
+ def isFirstChild(self, ignoreComments = False):
+ if not self.hasParent():
+ return False
+
+ return self.parent.getFirstChild(False, ignoreComments) == self
+
+ def isLastChild(self, ignoreComments = False):
+ if not self.hasParent():
+ return False
+
+ return self.parent.getLastChild(False, ignoreComments) == self
+
+ def addListChild(self, listName, childNode):
+ listNode = self.getChild(listName, False)
+ if not listNode:
+ listNode = Node(listName)
+ self.addChild(listNode)
+ listNode.addChild(childNode)
+
+ def getListChildByAttribute(self, listName, key, value, mandatory = True):
+ listNode = self.getChild(listName, False)
+ if listNode:
+ return listNode.getChildByAttribute(key, value, mandatory)
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no child " + listName, self)
+
+ def getFirstListChild(self, listName, mandatory = True):
+ listNode = self.getChild(listName, False)
+ if listNode:
+ return listNode.getFirstChild(mandatory)
+
+ if mandatory:
+ raise NodeAccessException("Node " + self.type + " has no child " + listName, self)
+
+ def getAllChildrenOfType(self, type):
+ return self._getAllChildrenOfType(type, [])
+
+ def _getAllChildrenOfType(self, type, found=[]):
+ if self.hasChildren():
+ for child in self.children:
+ if child.type == type:
+ found.append(child)
+
+ child._getAllChildrenOfType(type, found)
+
+ return found
+
+
+
+
+def nodeToXmlString(node, prefix = "", childPrefix = " ", newLine="\n"):
+ hasText = False
+ asString = prefix + "<" + node.type
+ if node.hasAttributes():
+ for key in node.attributes:
+ if key == "text":
+ hasText = True
+ else:
+ asString += " " + key + "=\"" + escapeXmlChars(node.attributes[key], True) + "\""
+
+ if not node.hasChildren() and not hasText:
+ asString += "/>" + newLine
+ else:
+ asString += ">"
+
+ if hasText:
+ if node.hasChildren():
+ asString += newLine + prefix + childPrefix
+ else:
+ asString += newLine + prefix + childPrefix
+
+ asString += "<text>" + escapeXmlChars(node.attributes["text"], False) + "</text>" + newLine
+
+ if node.hasChildren():
+ asString += newLine
+ for child in node.children:
+ asString += nodeToXmlString(child, prefix + childPrefix, childPrefix, newLine)
+
+ asString += prefix + "</" + node.type + ">" + newLine
+
+ return asString
+
+
+
+def nodeToJsonString(node, prefix = "", childPrefix = " ", newLine="\n"):
+ asString = prefix + '{type:"' + escapeJsonChars(node.type) + '"'
+
+ if node.hasAttributes():
+ asString += ',attributes:{'
+ firstAttribute = True
+ for key in node.attributes:
+ if not firstAttribute:
+ asString += ','
+ asString += '"' + key + '":"' + escapeJsonChars(node.attributes[key]) + '"'
+ firstAttribute = False
+ asString += '}'
+
+ if node.hasChildren():
+ asString += ',children:[' + newLine
+
+ firstChild = True
+ prefix = prefix + childPrefix
+ for child in node.children:
+ asString += nodeToJsonString(child, prefix, childPrefix, newLine) + ',' + newLine
+ firstChild = False
+
+ # NOTE We remove the ',\n' of the last child
+ if newLine == "":
+ asString = asString[:-1] + prefix + ']'
+ else:
+ asString = asString[:-2] + newLine + prefix + ']'
+
+ asString += '}'
+
+ return asString
+
+
+
+def escapeXmlChars(text, inAttribute):
+ if isinstance(text, basestring):
+ text = text.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
+ if inAttribute:
+ text = text.replace("\"", "&quot;")
+ elif isinstance(text, bool):
+ text = str(text).lower()
+ else:
+ text = str(text)
+
+ return text
+
+
+
+def escapeJsonChars(text):
+ if isinstance(text, basestring):
+ text = text.replace('"', '\\"').replace('\n', '\\n').replace('\r', '\\r')
+ elif isinstance(text, bool):
+ text = str(text).lower()
+ else:
+ text = str(text)
+
+ return text
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.pyc
new file mode 100644
index 0000000000..888c97c5e2
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/tree.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.py
new file mode 100755
index 0000000000..d473520e80
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.py
@@ -0,0 +1,1021 @@
+#!/usr/bin/env python
+
+import sys, optparse
+import tokenizer, tree, filetool, variableoptimizer
+
+
+SINGLE_LEFT_OPERATORS = [ "NOT", "BITNOT", "ADD", "SUB", "INC", "DEC" ]
+
+SINGLE_RIGHT_OPERATORS = [ "INC", "DEC" ]
+
+MULTI_TOKEN_OPERATORS = [ "HOOK", "ADD", "SUB", "MUL", "DIV", "MOD", \
+ "LT", "LE", "GT", "GE", "EQ", "NE", "SHEQ", "SHNE", \
+ "AND", "OR", "BITOR", "BITXOR", "BITAND", "POWEROF", \
+ "LSH", "RSH", "URSH" ]
+
+MULTI_PROTECTED_OPERATORS = [ "INSTANCEOF", "IN" ]
+
+ASSIGN_OPERATORS = [ "ASSIGN", "ASSIGN_ADD", "ASSIGN_SUB", "ASSIGN_MUL", \
+ "ASSIGN_DIV", "ASSIGN_MOD", "ASSIGN_BITOR", "ASSIGN_BITXOR", "ASSIGN_BITAND", \
+ "ASSIGN_LSH", "ASSIGN_RSH", "ASSIGN_URSH" ]
+
+LOOP_KEYWORDS = [ "WHILE", "IF", "FOR", "WITH" ]
+
+
+class TokenStream:
+ def __init__ (self, tokens):
+ self.tokens = tokens
+ self.commentsBefore = None
+ self.parsepos = -1
+ self.eolBefore = False
+
+ def curr (self):
+ """Returns the current token."""
+ return self.tokens[self.parsepos]
+
+ def currType (self):
+ return self.curr()["type"]
+
+ def currDetail (self):
+ return self.curr()["detail"]
+
+ def currSource (self):
+ return self.curr()["source"]
+
+ def currLine (self):
+ return self.curr()["line"]
+
+ def currColumn (self):
+ return self.curr()["column"]
+
+ def currMultiline (self):
+ return self.curr()["multiline"]
+
+ def currConnection (self):
+ return self.curr()["connection"]
+
+ def currIsType (self, tokenType, tokenDetail = None):
+ if self.currType() != tokenType:
+ return False
+ else:
+ if tokenDetail == None:
+ return True
+ elif type(tokenDetail) == list:
+ return self.currDetail() in tokenDetail
+ else:
+ return self.currDetail() == tokenDetail
+
+ def expectCurrType (self, tokenType, tokenDetail = None):
+ if not self.currIsType(tokenType, tokenDetail):
+ expectedDesc = tokenType
+ if type(tokenDetail) == str:
+ expectedDesc += "/" + tokenDetail
+ raiseSyntaxException(self.curr(), expectedDesc)
+
+ def finished (self):
+ # NOTE: the last token is end of file
+ return self.parsepos >= len(self.tokens) - 1
+
+ def next (self, item=None, after=False):
+ length = len(self.tokens)
+ self.eolBefore = False
+ self.breakBefore = False
+
+ token = None
+ while self.parsepos < length - 1:
+ self.parsepos += 1
+
+ token = self.tokens[self.parsepos]
+
+ if token["type"] == "eol":
+ if self.eolBefore:
+ self.breakBefore = True
+
+ self.eolBefore = True
+ # ignore end of line
+ pass
+
+ elif token["type"] == "comment":
+ # After current item
+ if token["connection"] == "after":
+ if not token.has_key("inserted") or not token["inserted"]:
+ if item:
+ commentNode = tree.Node("comment")
+ commentNode.set("line", token["line"])
+ commentNode.set("column", token["column"])
+ commentNode.set("text", token["source"])
+ commentNode.set("detail", token["detail"])
+ commentNode.set("multiline", token["multiline"])
+ commentNode.set("connection", token["connection"])
+ commentNode.set("begin", token["begin"])
+ commentNode.set("end", token["end"])
+
+ if after:
+ item.addListChild("commentsAfter", commentNode)
+ else:
+ item.addChild(commentNode)
+
+ self.eolBefore = False
+ self.breakBefore = False
+
+ else:
+ print "Found unresolved after comment in line %s, column %s" % (token["line"], token["column"])
+ print token["source"]
+ pass
+
+ # Documentation and Block comments of next item
+ else:
+ if not self.commentsBefore:
+ self.commentsBefore = []
+
+ commentNode = tree.Node("comment")
+ commentNode.set("line", token["line"])
+ commentNode.set("column", token["column"])
+ commentNode.set("text", token["source"])
+ commentNode.set("detail", token["detail"])
+ commentNode.set("multiline", token["multiline"])
+ commentNode.set("connection", token["connection"])
+ commentNode.set("begin", token["begin"])
+ commentNode.set("end", token["end"])
+
+ self.commentsBefore.append(commentNode)
+
+ self.eolBefore = False
+ self.breakBefore = False
+
+ else:
+ break
+
+ #print "next token: " + str(token)
+
+ if token == None:
+ # return end of file token
+ return self.tokens[length - 1]
+ else:
+ return token
+
+ # alternative to use, when we want to check if the next token
+ # is a comment, but are not able to use next() because if there is
+ # no comment we want to leave in our position
+ def comment (self, item, after=False):
+ length = len(self.tokens)
+
+ token = None
+ pos = self.parsepos
+
+ while pos < length - 1:
+ pos += 1
+ token = self.tokens[pos]
+
+ if token["type"] == "comment" and token["connection"] == "after" and (not token.has_key("inserted") or not token["inserted"]):
+ commentNode = tree.Node("comment")
+ commentNode.set("line", token["line"])
+ commentNode.set("column", token["column"])
+ commentNode.set("text", token["source"])
+ commentNode.set("detail", token["detail"])
+ commentNode.set("multiline", token["multiline"])
+ commentNode.set("connection", token["connection"])
+ commentNode.set("begin", token["begin"])
+ commentNode.set("end", token["end"])
+
+ token["inserted"] = True
+
+ if after:
+ item.addListChild("commentsAfter", commentNode)
+ else:
+ item.addChild(commentNode)
+
+ else:
+ break
+
+ def hadEolBefore(self):
+ return self.eolBefore
+
+ def hadBreakBefore(self):
+ return self.breakBefore
+
+ def clearCommentsBefore(self):
+ commentsBefore = self.commentsBefore
+ self.commentsBefore = None
+ return commentsBefore
+
+
+
+class SyntaxException (Exception):
+ pass
+
+
+
+def createItemNode(type, stream):
+ # print "CREATE %s" % type
+
+ node = tree.Node(type)
+ node.set("line", stream.currLine())
+ node.set("column", stream.currColumn())
+
+ commentsBefore = stream.clearCommentsBefore()
+ if commentsBefore:
+ for comment in commentsBefore:
+ node.addListChild("commentsBefore", comment)
+
+ return node
+
+
+
+def raiseSyntaxException (token, expectedDesc = None):
+ if expectedDesc:
+ msg = "Expected " + expectedDesc + " but found "
+ else:
+ msg = "Unexpected "
+
+ msg += token["type"]
+
+ if token["detail"]:
+ msg += "/" + token["detail"]
+
+ msg += ": '" + token["source"] + "'. file:" + \
+ token["id"] + ", line:" + str(token["line"]) + \
+ ", column:" + str(token["column"])
+
+ raise SyntaxException(msg)
+
+
+
+def createSyntaxTree (tokenArr):
+ """Creates a syntax tree from a token stream.
+
+ tokens: the token stream."""
+
+ stream = TokenStream(tokenArr)
+ stream.next()
+
+ rootBlock = tree.Node("file")
+ rootBlock.set("file", stream.curr()["id"])
+
+ while not stream.finished():
+ rootBlock.addChild(readStatement(stream))
+
+ return rootBlock
+
+
+
+def readExpression (stream):
+ return readStatement(stream, True)
+
+
+
+def readStatement (stream, expressionMode = False, overrunSemicolon = True, inStatementList = False):
+ item = None
+
+ eolBefore = stream.hadEolBefore()
+ breakBefore = stream.hadBreakBefore()
+
+ # print "PROGRESS: %s - %s (%s) [expr=%s]" % (stream.currType(), stream.currDetail(), stream.currLine(), expressionMode)
+
+ if currIsIdentifier(stream, True):
+ # statement starts with an identifier
+ variable = readVariable(stream, True)
+ variable = readObjectOperation(stream, variable)
+
+ if stream.currIsType("token", ASSIGN_OPERATORS):
+ # This is an assignment
+ item = createItemNode("assignment", stream)
+ item.set("operator", stream.currDetail())
+ stream.next(item)
+
+ item.addListChild("left", variable)
+ item.addListChild("right", readExpression(stream))
+ elif stream.currIsType("token", "COLON") and not expressionMode:
+ # This is a label
+ item = variable
+ item.type = "label"
+ stream.next(variable)
+ else:
+ # Something else comes after the variable -> It's a sole variable
+ item = variable
+
+ # Any comments found for the variable belong to the extracted item
+ commentsChild = variable.getChild("commentsBefore", False)
+ if item and commentsChild != None:
+ variable.removeChild(commentsChild)
+ item.addChild(commentsChild, 0)
+ elif stream.currIsType("protected", "FUNCTION"):
+ item = createItemNode("function", stream)
+ stream.next(item)
+
+ # Read optional function name
+ if stream.currIsType("name"):
+ item.set("name", stream.currSource())
+ stream.next(item)
+
+ readParamList(item, stream)
+ item.addListChild("body", readBlock(stream))
+
+ # Check for direct execution: function() {}()
+ if stream.currIsType("token", "LP"):
+ # The function is executed directly
+ functionItem = item
+ item = createItemNode("call", stream)
+ item.addListChild("operand", functionItem)
+ readParamList(item, stream)
+ item = readObjectOperation(stream, item)
+ elif stream.currIsType("protected", "VOID"):
+ stream.next(item)
+ item = createItemNode("void", stream)
+ stream.next(item)
+ item.addChild(readStatement(stream, expressionMode))
+ stream.expectCurrType("token", "RP")
+ stream.next(item, True)
+ item = readObjectOperation(stream, item)
+ elif stream.currIsType("token", "LP"):
+ igroup = createItemNode("group", stream)
+ stream.next(igroup)
+ igroup.addChild(readStatement(stream, expressionMode))
+ stream.expectCurrType("token", "RP")
+ stream.next(igroup, True)
+ oper = readObjectOperation(stream, igroup)
+
+ # supports e.g. (this.editor.object || this.editor.iframe).style.marginTop = null;
+ if stream.currIsType("token", ASSIGN_OPERATORS):
+ # This is an assignment
+ item = createItemNode("assignment", stream)
+ item.set("operator", stream.currDetail())
+ stream.next(item)
+
+ item.addListChild("left", oper)
+ item.addListChild("right", readExpression(stream))
+ else:
+ # Something else comes after the variable -> It's a sole variable
+ item = oper
+
+ elif stream.currIsType("string"):
+ item = createItemNode("constant", stream)
+ item.set("constantType", "string")
+ item.set("value", stream.currSource())
+ item.set("detail", stream.currDetail())
+ stream.next(item, True)
+ # This is a member accessor (E.g. "bla.blubb")
+ item = readObjectOperation(stream, item)
+ elif stream.currIsType("number"):
+ item = createItemNode("constant", stream)
+ item.set("constantType", "number")
+ item.set("value", stream.currSource())
+ item.set("detail", stream.currDetail())
+ stream.next(item, True)
+ # This is a member accessor (E.g. "bla.blubb")
+ item = readObjectOperation(stream, item)
+ elif stream.currIsType("regexp"):
+ item = createItemNode("constant", stream)
+ item.set("constantType", "regexp")
+ item.set("value", stream.currSource())
+ stream.next(item, True)
+ # This is a member accessor (E.g. "bla.blubb")
+ item = readObjectOperation(stream, item)
+ elif expressionMode and (stream.currIsType("protected", "TRUE") or stream.currIsType("protected", "FALSE")):
+ item = createItemNode("constant", stream)
+ item.set("constantType", "boolean")
+ item.set("value", stream.currSource())
+ stream.next(item, True)
+ elif expressionMode and stream.currIsType("protected", "NULL"):
+ item = createItemNode("constant", stream)
+ item.set("constantType", "null")
+ item.set("value", stream.currSource())
+ stream.next(item, True)
+ elif expressionMode and stream.currIsType("token", "LC"):
+ item = readMap(stream)
+ elif expressionMode and stream.currIsType("token", "LB"):
+ item = readArray(stream)
+ elif stream.currIsType("token", SINGLE_LEFT_OPERATORS):
+ item = createItemNode("operation", stream)
+ item.set("operator", stream.currDetail())
+ item.set("left", True)
+ stream.next(item)
+ item.addListChild("first", readExpression(stream))
+ elif stream.currIsType("protected", "TYPEOF"):
+ item = createItemNode("operation", stream)
+ item.set("operator", "TYPEOF")
+ item.set("left", True)
+ stream.next(item)
+ item.addListChild("first", readExpression(stream))
+ elif stream.currIsType("protected", "NEW"):
+ item = readInstantiation(stream)
+ item = readObjectOperation(stream, item)
+ elif not expressionMode and stream.currIsType("protected", "VAR"):
+ item = createItemNode("definitionList", stream)
+ stream.next(item)
+ finished = False
+ while not finished:
+ if not currIsIdentifier(stream, False):
+ raiseSyntaxException(stream.curr(), "identifier")
+
+ childitem = createItemNode("definition", stream)
+ childitem.set("identifier", stream.currSource())
+ stream.next(childitem)
+ if stream.currIsType("token", "ASSIGN"):
+ assign = createItemNode("assignment", stream)
+ childitem.addChild(assign)
+ stream.next(assign)
+ assign.addChild(readExpression(stream))
+
+ item.addChild(childitem)
+
+ # Check whether anothe definition follows, e.g. "var a, b=1, c=4"
+ if stream.currIsType("token", "COMMA"):
+ stream.next(item)
+ else:
+ finished = True
+
+ stream.comment(item, True)
+
+ elif not expressionMode and stream.currIsType("protected", LOOP_KEYWORDS):
+ item = readLoop(stream)
+ elif not expressionMode and stream.currIsType("protected", "DO"):
+ item = readDoWhile(stream)
+ elif not expressionMode and stream.currIsType("protected", "SWITCH"):
+ item = readSwitch(stream)
+ elif not expressionMode and stream.currIsType("protected", "TRY"):
+ item = readTryCatch(stream)
+ elif not expressionMode and stream.currIsType("token", "LC"):
+ item = readBlock(stream)
+ elif not expressionMode and stream.currIsType("protected", "RETURN"):
+ item = createItemNode("return", stream)
+ stream.next(item)
+ # NOTE: The expression after the return keyword is optional
+ if not stream.currIsType("token", "SEMICOLON") and not stream.currIsType("token", "RC"):
+ item.addListChild("expression", readExpression(stream))
+ stream.comment(item, True)
+ elif not expressionMode and stream.currIsType("protected", "THROW"):
+ item = createItemNode("throw", stream)
+ stream.next(item)
+ item.addListChild("expression", readExpression(stream))
+ stream.comment(item, True)
+ elif not expressionMode and stream.currIsType("protected", "DELETE"):
+ item = createItemNode("delete", stream)
+ stream.next(item)
+ item.addListChild("expression", readExpression(stream))
+ stream.comment(item, True)
+ elif not expressionMode and stream.currIsType("protected", "BREAK"):
+ item = createItemNode("break", stream)
+ stream.next(item)
+ # NOTE: The label after the break keyword is optional
+ if not stream.hadEolBefore() and stream.currIsType("name"):
+ item.set("label", stream.currSource())
+ # As the label is an attribute, we need to put following comments into after
+ # to differenciate between comments before and after the label
+ stream.next(item, True)
+ elif not expressionMode and stream.currIsType("protected", "CONTINUE"):
+ item = createItemNode("continue", stream)
+ stream.next(item)
+ # NOTE: The label after the continue keyword is optional
+ if not stream.hadEolBefore() and stream.currIsType("name"):
+ item.set("label", stream.currSource())
+ stream.next(item, True)
+
+ if not item:
+ if stream.currIsType("token", "SEMICOLON") and not expressionMode:
+ # This is an empty statement
+ item = createItemNode("emptyStatement", stream)
+ stream.next(item)
+ else:
+ if expressionMode:
+ expectedDesc = "expression"
+ else:
+ expectedDesc = "statement"
+ raiseSyntaxException(stream.curr(), expectedDesc)
+
+ # check whether this is an operation
+ if stream.currIsType("token", MULTI_TOKEN_OPERATORS) or stream.currIsType("protected", MULTI_PROTECTED_OPERATORS) or (stream.currIsType("token", SINGLE_RIGHT_OPERATORS) and not stream.hadEolBefore()):
+ # its an operation -> We've already parsed the first operand (in item)
+ parsedItem = item
+
+ oper = stream.currDetail()
+
+ item = createItemNode("operation", stream)
+ item.addListChild("first", parsedItem)
+ item.set("operator", oper)
+ stream.next(item)
+
+ if oper in MULTI_TOKEN_OPERATORS or oper in MULTI_PROTECTED_OPERATORS:
+ # It's a multi operator -> There must be a second argument
+ item.addListChild("second", readExpression(stream))
+ if oper == "HOOK":
+ # It's a "? :" operation -> There must be a third argument
+ stream.expectCurrType("token", "COLON")
+ stream.next(item)
+ item.addListChild("third", readExpression(stream))
+
+ # Deep scan on single right operators e.g. if(i-- > 4)
+ if oper in SINGLE_RIGHT_OPERATORS and stream.currIsType("token", MULTI_TOKEN_OPERATORS) and expressionMode:
+ paroper = stream.currDetail()
+
+ paritem = createItemNode("operation", stream)
+ paritem.addListChild("first", item)
+ paritem.set("operator", paroper)
+ stream.next(item)
+
+ if paroper in MULTI_TOKEN_OPERATORS or paroper in MULTI_PROTECTED_OPERATORS:
+ # It's a multi operator -> There must be a second argument
+ paritem.addListChild("second", readExpression(stream))
+ if paroper == "HOOK":
+ # It's a "? :" operation -> There must be a third argument
+ stream.expectCurrType("token", "COLON")
+ stream.next(item)
+ paritem.addListChild("third", readExpression(stream))
+
+ # return parent item
+ item = paritem
+
+
+
+ # check whether this is a combined statement, e.g. "bla(), i++"
+ if not expressionMode and not inStatementList and stream.currIsType("token", "COMMA"):
+ statementList = createItemNode("statementList", stream)
+ statementList.addChild(item)
+ while stream.currIsType("token", "COMMA"):
+ stream.next(statementList)
+ statementList.addChild(readStatement(stream, False, False, True))
+ item = statementList
+
+ # go over the optional semicolon
+ if not expressionMode and overrunSemicolon and stream.currIsType("token", "SEMICOLON"):
+ stream.next(item, True)
+
+
+ item.set("eolBefore", eolBefore)
+ item.set("breakBefore", breakBefore)
+
+ return item
+
+
+
+def currIsIdentifier (stream, allowThis):
+ det = stream.currDetail()
+ return stream.currIsType("name") or stream.currIsType("builtin") \
+ or (stream.currIsType("protected") and \
+ (det == "INFINITY" or det == "PROTOTYPE" or det == "CALL" or \
+ det == "APPLY" or (allowThis and det == "THIS")))
+
+
+
+def readVariable (stream, allowArrays):
+ # Note: keywords may be used as identifiers, too
+ item = createItemNode("variable", stream)
+
+ done = False
+ firstIdentifier = True
+ while not done:
+ if not currIsIdentifier(stream, firstIdentifier):
+ raiseSyntaxException(stream.curr(), "identifier")
+
+ identifier = createItemNode("identifier", stream)
+ identifier.set("name", stream.currSource())
+ stream.next(identifier)
+
+ if allowArrays:
+ while stream.currIsType("token", "LB"):
+ accessor = createItemNode("accessor", stream)
+ stream.next(accessor)
+ accessor.addListChild("identifier", identifier)
+ accessor.addListChild("key", readExpression(stream))
+
+ stream.expectCurrType("token", "RB")
+ stream.next(accessor, True)
+
+ identifier = accessor
+
+ item.addChild(identifier)
+
+ firstIdentifier = False
+
+ if stream.currIsType("token", "DOT"):
+ stream.next(item)
+ else:
+ done = True
+
+ return item
+
+
+
+def readObjectOperation(stream, operand, onlyAllowMemberAccess = False):
+ if stream.currIsType("token", "DOT"):
+ # This is a member accessor (E.g. "bla.blubb")
+ item = createItemNode("accessor", stream)
+ stream.next(item)
+ item.addListChild("left", operand)
+
+ # special mode for constants which should be assigned to an accessor first
+ if operand.type == "constant":
+ item.addListChild("right", readVariable(stream, False))
+ item = readObjectOperation(stream, item)
+ else:
+ item.addListChild("right", readObjectOperation(stream, readVariable(stream, False)))
+
+ elif stream.currIsType("token", "LP"):
+ # This is a function call (E.g. "bla(...)")
+ item = createItemNode("call", stream)
+ item.addListChild("operand", operand)
+ readParamList(item, stream)
+ item = readObjectOperation(stream, item)
+ elif stream.currIsType("token", "LB"):
+ # This is an array access (E.g. "bla[...]")
+ item = createItemNode("accessor", stream)
+ stream.next(item)
+ item.addListChild("identifier", operand)
+ item.addListChild("key", readExpression(stream))
+
+ stream.expectCurrType("token", "RB")
+ stream.next(item, True)
+ item = readObjectOperation(stream, item)
+ else:
+ item = operand
+
+ # Any comments found for the operand belong to the item
+ if operand != item:
+ commentsChild = operand.getChild("commentsBefore", False)
+ if commentsChild != None:
+ operand.removeChild(commentsChild)
+ item.addChild(commentsChild, 0)
+
+ return item
+
+
+
+def readParamList (node, stream):
+ stream.expectCurrType("token", "LP")
+
+ params = createItemNode("params", stream)
+ node.addChild(params)
+
+ stream.next(params)
+
+ firstParam = True
+ lastExpr = None
+ while not stream.currIsType("token", "RP"):
+ if firstParam:
+ firstParam = False
+ else:
+ stream.expectCurrType("token", "COMMA")
+ stream.next(lastExpr, True)
+
+ lastExpr = readExpression(stream)
+ params.addChild(lastExpr)
+
+ # Has an end defined by the loop above
+ # This means that all comments following are after item
+ stream.next(params, True)
+
+
+def readBlock(stream):
+ stream.expectCurrType("token", "LC")
+ item = createItemNode("block", stream)
+
+ # Iterate through children
+ stream.next(item)
+ while not stream.currIsType("token", "RC"):
+ item.addChild(readStatement(stream))
+
+ # Has an end defined by the loop above
+ # This means that all comments following are after item
+ stream.next(item, True)
+
+ return item
+
+
+def readMap(stream):
+ stream.expectCurrType("token", "LC")
+
+ item = createItemNode("map", stream)
+ stream.next(item)
+
+ # NOTE: We use our own flag for checking whether the array already has entries
+ # and not item.hasChildren(), because item.hasChildren() is also true
+ # when there are comments before the array
+ hasEntries = False
+
+ while not stream.currIsType("token", "RC"):
+ if hasEntries:
+ stream.expectCurrType("token", "COMMA")
+ stream.next(item)
+
+ if not currIsIdentifier(stream, True) and not stream.currIsType("string") and not stream.currIsType("number"):
+ raiseSyntaxException(stream.curr(), "map key (identifier, string or number)")
+
+ keyvalue = createItemNode("keyvalue", stream)
+ keyvalue.set("key", stream.currSource())
+
+ if stream.currIsType("string"):
+ keyvalue.set("quote", stream.currDetail())
+
+ stream.next(keyvalue)
+ stream.expectCurrType("token", "COLON")
+ stream.next(keyvalue, True)
+ keyvalue.addListChild("value", readExpression(stream))
+
+ item.addChild(keyvalue)
+
+ hasEntries = True
+
+ # Has an end defined by the loop above
+ # This means that all comments following are after item
+ stream.next(item, True)
+
+ return item
+
+
+
+def readArray(stream):
+ stream.expectCurrType("token", "LB")
+
+ item = createItemNode("array", stream)
+ stream.next(item)
+
+ # NOTE: We use our own flag for checking whether the array already has entries
+ # and not item.hasChildren(), because item.hasChildren() is also true
+ # when there are comments before the array
+ hasEntries = False
+ while not stream.currIsType("token", "RB"):
+ if hasEntries:
+ stream.expectCurrType("token", "COMMA")
+ stream.next(item)
+
+ item.addChild(readExpression(stream))
+ hasEntries = True
+
+ # Has an end defined by the loop above
+ # This means that all comments following are after item
+ stream.next(item, True)
+
+ # Support constructs like [ "foo", "bar" ].join("")
+ item = readObjectOperation(stream, item)
+
+ return item
+
+
+
+def readInstantiation(stream):
+ stream.expectCurrType("protected", "NEW")
+
+ item = createItemNode("instantiation", stream)
+ stream.next(item)
+
+ # Could be a simple variable or a just-in-time function declaration (closure)
+ # Read this as expression
+ stmnt = readStatement(stream, True, False)
+ item.addListChild("expression", stmnt)
+
+ return item
+
+
+
+def readLoop(stream):
+ stream.expectCurrType("protected", LOOP_KEYWORDS)
+
+ loopType = stream.currDetail()
+
+ item = createItemNode("loop", stream)
+ item.set("loopType", loopType)
+
+ stream.next(item)
+ stream.expectCurrType("token", "LP")
+
+ if loopType == "FOR":
+ stream.next(item)
+
+ if not stream.currIsType("token", "SEMICOLON"):
+ # Read the optional first statement
+ first = createItemNode("first", stream)
+ item.addChild(first)
+ first.addChild(readStatement(stream, False, False))
+ stream.comment(first, True)
+
+ if stream.currIsType("token", "SEMICOLON"):
+ # It's a for (;;) loop
+ item.set("forVariant", "iter")
+
+ stream.next(item)
+ if not stream.currIsType("token", "SEMICOLON"):
+ # Read the optional second expression
+ second = createItemNode("second", stream)
+ item.addChild(second)
+ second.addChild(readExpression(stream))
+ stream.comment(second, True)
+
+ stream.expectCurrType("token", "SEMICOLON")
+ stream.next(item)
+
+ if not stream.currIsType("token", "RP"):
+ # Read the optional third statement
+ third = createItemNode("third", stream)
+ item.addChild(third)
+ third.addChild(readStatement(stream, False, False))
+ stream.comment(third, True)
+
+ elif stream.currIsType("token", "RP"):
+ # It's a for ( in ) loop
+ item.set("forVariant", "in")
+ pass
+
+ else:
+ raiseSyntaxException(stream.curr(), "semicolon or in")
+
+ stream.expectCurrType("token", "RP")
+
+ else:
+ expr = createItemNode("expression", stream)
+ stream.next(expr)
+ expr.addChild(readExpression(stream))
+ item.addChild(expr)
+ stream.comment(expr, True)
+ stream.expectCurrType("token", "RP")
+
+ # comments should be already completed from the above code
+ stmnt = createItemNode("statement", stream)
+ item.addChild(stmnt)
+ stream.next()
+ stmnt.addChild(readStatement(stream))
+
+ if loopType == "IF" and stream.currIsType("protected", "ELSE"):
+ elseStmnt = createItemNode("elseStatement", stream)
+ item.addChild(elseStmnt)
+ stream.next(elseStmnt)
+ elseStmnt.addChild(readStatement(stream))
+
+ return item
+
+
+
+def readDoWhile(stream):
+ stream.expectCurrType("protected", "DO")
+
+ item = createItemNode("loop", stream)
+ item.set("loopType", "DO")
+ stream.next(item)
+
+ stmnt = createItemNode("statement", stream)
+ item.addChild(stmnt)
+ stmnt.addChild(readStatement(stream))
+
+ stream.expectCurrType("protected", "WHILE")
+ stream.next(item)
+
+ stream.expectCurrType("token", "LP")
+
+ expr = createItemNode("expression", stream)
+ item.addChild(expr)
+ stream.next(expr)
+
+ expr.addChild(readExpression(stream))
+
+ stream.expectCurrType("token", "RP")
+ stream.next(item, True)
+
+ return item
+
+
+def readSwitch(stream):
+ stream.expectCurrType("protected", "SWITCH")
+
+ item = createItemNode("switch", stream)
+ item.set("switchType", "case")
+
+ stream.next(item)
+ stream.expectCurrType("token", "LP")
+
+ expr = createItemNode("expression", stream)
+ stream.next(expr)
+ item.addChild(expr)
+ expr.addChild(readExpression(stream))
+
+ stream.expectCurrType("token", "RP")
+ stream.next(expr, True)
+
+ stream.expectCurrType("token", "LC")
+ stmnt = createItemNode("statement", stream)
+ item.addChild(stmnt)
+ stream.next(stmnt)
+
+ while not stream.currIsType("token", "RC"):
+ if stream.currIsType("protected", "CASE"):
+ caseItem = createItemNode("case", stream)
+ stream.next(caseItem)
+ caseItem.addListChild("expression", readExpression(stream))
+ stmnt.addChild(caseItem)
+
+ stream.expectCurrType("token", "COLON")
+ stream.next(caseItem, True)
+
+ elif stream.currIsType("protected", "DEFAULT"):
+ defaultItem = createItemNode("default", stream)
+ stmnt.addChild(defaultItem)
+ stream.next(defaultItem)
+
+ stream.expectCurrType("token", "COLON")
+ stream.next(defaultItem, True)
+
+ else:
+ raiseSyntaxException(stream.curr(), "case or default")
+
+ while not stream.currIsType("token", "RC") and not stream.currIsType("protected", "CASE") and not stream.currIsType("protected", "DEFAULT"):
+ stmnt.addChild(readStatement(stream))
+
+ stream.next(stmnt, True)
+
+ return item
+
+
+def readTryCatch(stream):
+ stream.expectCurrType("protected", "TRY")
+
+ item = createItemNode("switch", stream)
+ item.set("switchType", "catch")
+ stream.next(item)
+
+ item.addListChild("statement", readStatement(stream))
+
+ while stream.currIsType("protected", "CATCH"):
+ catchItem = createItemNode("catch", stream)
+ stream.next(catchItem)
+
+ stream.expectCurrType("token", "LP")
+
+ exprItem = createItemNode("expression", stream)
+ catchItem.addChild(exprItem)
+ stream.next(exprItem)
+ exprItem.addChild(readExpression(stream))
+
+ stream.expectCurrType("token", "RP")
+ stream.next(exprItem, True)
+
+ stmnt = createItemNode("statement", stream)
+ catchItem.addChild(stmnt)
+ stmnt.addChild(readStatement(stream))
+
+ item.addChild(catchItem)
+
+ if stream.currIsType("protected", "FINALLY"):
+ finallyItem = createItemNode("finally", stream)
+ stream.next(finallyItem)
+
+ stmnt = createItemNode("statement", stream)
+ finallyItem.addChild(stmnt)
+ stmnt.addChild(readStatement(stream))
+
+ item.addChild(finallyItem)
+
+ return item
+
+
+
+
+
+
+
+
+
+def main():
+ parser = optparse.OptionParser()
+
+ parser.add_option("-w", "--write", action="store_true", dest="write", default=False, help="Writes file to incoming fileName + EXTENSION.")
+ parser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", help="The EXTENSION to use", default=".compiled")
+ parser.add_option("--optimize-variables", action="store_true", dest="optimizeVariables", default=False, help="Optimize variables. Reducing size.")
+ parser.add_option("--encoding", dest="encoding", default="utf-8", metavar="ENCODING", help="Defines the encoding expected for input files.")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ print "Needs one or more arguments (files) to compile!"
+ sys.exit(1)
+
+ for fileName in args:
+ if options.write:
+ print "Generating tree of %s => %s%s" % (fileName, fileName, options.extension)
+ else:
+ print "Generating tree of %s => stdout" % fileName
+
+ restree = createSyntaxTree(tokenizer.parseFile(fileName, "", options.encoding))
+
+ if options.optimizeVariables:
+ variableoptimizer.search(restree, [], 0, "$")
+
+ compiledString = tree.nodeToXmlString(restree)
+ if options.write:
+ filetool.save(fileName + options.extension, compiledString)
+
+ else:
+ try:
+ print compiledString
+
+ except UnicodeEncodeError:
+ print " * Could not encode result to ascii. Use '-w' instead."
+ sys.exit(1)
+
+
+
+if __name__ == '__main__':
+ try:
+ main()
+
+ except KeyboardInterrupt:
+ print
+ print " * Keyboard Interrupt"
+ sys.exit(1)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.pyc
new file mode 100644
index 0000000000..6e142ea91e
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/treegenerator.pyc
Binary files differ
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.py b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.py
new file mode 100755
index 0000000000..6fa148dd5d
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+
+import tree, mapper
+
+def search(node, found, level=0, prefix="$", register=False, debug=False):
+ if node.type == "function":
+ if register:
+ name = node.get("name", False)
+ if name != None and not name in found:
+ # print "Name: %s" % funcName
+ found.append(name)
+
+ foundLen = len(found)
+ register = True
+
+ if debug:
+ print "\n%s<scope line='%s'>" % ((" " * level), node.get("line"))
+
+ # e.g. func(name1, name2);
+ elif register and node.type == "variable" and node.hasChildren() and len(node.children) == 1:
+ if node.parent.type == "params" and node.parent.parent.type != "call":
+ first = node.getFirstChild()
+
+ if first.type == "identifier":
+ name = first.get("name")
+
+ if not name in found:
+ found.append(name)
+
+ # e.g. var name1, name2 = "foo";
+ elif register and node.type == "definition":
+ name = node.get("identifier", False)
+
+ if name != None:
+ if not name in found:
+ found.append(name)
+
+ # Iterate over children
+ if node.hasChildren():
+ if node.type == "function":
+ for child in node.children:
+ search(child, found, level+1, prefix, register, debug)
+
+ else:
+ for child in node.children:
+ search(child, found, level, prefix, register, debug)
+
+ # Function closed
+ if node.type == "function":
+
+ # Debug
+ if debug:
+ for item in found:
+ print " %s<item>%s</item>" % ((" " * level), item)
+ print "%s</scope>" % (" " * level)
+
+ # Iterate over content
+ # Replace variables in current scope
+ update(node, found, prefix, debug)
+ del found[foundLen:]
+
+
+
+def update(node, found, prefix="$", debug=False):
+ # Handle all identifiers
+ if node.type == "identifier":
+
+ isFirstChild = False
+ isVariableMember = False
+
+ if node.parent.type == "variable":
+ isVariableMember = True
+ varParent = node.parent.parent
+
+ if not (varParent.type == "right" and varParent.parent.type == "accessor"):
+ isFirstChild = node.parent.getFirstChild(True, True) == node
+
+ elif node.parent.type == "identifier" and node.parent.parent.type == "accessor":
+ isVariableMember = True
+ accessor = node.parent.parent
+ isFirstChild = accessor.parent.getFirstChild(True, True) == accessor
+
+ # inside a variable parent only respect the first member
+ if not isVariableMember or isFirstChild:
+ idenName = node.get("name", False)
+
+ if idenName != None and idenName in found:
+ replName = "%s%s" % (prefix, mapper.convert(found.index(idenName)))
+ node.set("name", replName)
+
+ if debug:
+ print " - Replaced '%s' with '%s'" % (idenName, replName)
+
+ # Handle variable definition
+ elif node.type == "definition":
+ idenName = node.get("identifier", False)
+
+ if idenName != None and idenName in found:
+ replName = "%s%s" % (prefix, mapper.convert(found.index(idenName)))
+ node.set("identifier", replName)
+
+ if debug:
+ print " - Replaced '%s' with '%s'" % (idenName, replName)
+
+ # Handle function definition
+ elif node.type == "function":
+ idenName = node.get("name", False)
+
+ if idenName != None and idenName in found:
+ replName = "%s%s" % (prefix, mapper.convert(found.index(idenName)))
+ node.set("name", replName)
+
+ if debug:
+ print " - Replaced '%s' with '%s'" % (idenName, replName)
+
+ # Iterate over children
+ if node.hasChildren():
+ for child in node.children:
+ update(child, found, prefix, debug)
diff --git a/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.pyc b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.pyc
new file mode 100644
index 0000000000..5e31ca7bc0
--- /dev/null
+++ b/webapps/qooxdoo-0.6.3-sdk/frontend/framework/tool/modules/variableoptimizer.pyc
Binary files differ