$
This commit is contained in:
15
node_modules/node-gyp/gyp/tools/README
generated
vendored
Normal file
15
node_modules/node-gyp/gyp/tools/README
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
pretty_vcproj:
|
||||
Usage: pretty_vcproj.py "c:\path\to\vcproj.vcproj" [key1=value1] [key2=value2]
|
||||
|
||||
They key/value pair are used to resolve vsprops name.
|
||||
|
||||
For example, if I want to diff the base.vcproj project:
|
||||
|
||||
pretty_vcproj.py z:\dev\src-chrome\src\base\build\base.vcproj "$(SolutionDir)=z:\dev\src-chrome\src\chrome\\" "$(CHROMIUM_BUILD)=" "$(CHROME_BUILD_TYPE)=" > original.txt
|
||||
pretty_vcproj.py z:\dev\src-chrome\src\base\base_gyp.vcproj "$(SolutionDir)=z:\dev\src-chrome\src\chrome\\" "$(CHROMIUM_BUILD)=" "$(CHROME_BUILD_TYPE)=" > gyp.txt
|
||||
|
||||
And you can use your favorite diff tool to see the changes.
|
||||
|
||||
Note: In the case of base.vcproj, the original vcproj is one level up the generated one.
|
||||
I suggest you do a search and replace for '"..\' and replace it with '"' in original.txt
|
||||
before you perform the diff.
|
5
node_modules/node-gyp/gyp/tools/Xcode/README
generated
vendored
Normal file
5
node_modules/node-gyp/gyp/tools/Xcode/README
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
Specifications contains syntax formatters for Xcode 3. These do not appear to be supported yet on Xcode 4. To use these with Xcode 3 please install both the gyp.pbfilespec and gyp.xclangspec files in
|
||||
|
||||
~/Library/Application Support/Developer/Shared/Xcode/Specifications/
|
||||
|
||||
and restart Xcode.
|
27
node_modules/node-gyp/gyp/tools/Xcode/Specifications/gyp.pbfilespec
generated
vendored
Normal file
27
node_modules/node-gyp/gyp/tools/Xcode/Specifications/gyp.pbfilespec
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
gyp.pbfilespec
|
||||
GYP source file spec for Xcode 3
|
||||
|
||||
There is not much documentation available regarding the format
|
||||
of .pbfilespec files. As a starting point, see for instance the
|
||||
outdated documentation at:
|
||||
http://maxao.free.fr/xcode-plugin-interface/specifications.html
|
||||
and the files in:
|
||||
/Developer/Library/PrivateFrameworks/XcodeEdit.framework/Versions/A/Resources/
|
||||
|
||||
Place this file in directory:
|
||||
~/Library/Application Support/Developer/Shared/Xcode/Specifications/
|
||||
*/
|
||||
|
||||
(
|
||||
{
|
||||
Identifier = sourcecode.gyp;
|
||||
BasedOn = sourcecode;
|
||||
Name = "GYP Files";
|
||||
Extensions = ("gyp", "gypi");
|
||||
MIMETypes = ("text/gyp");
|
||||
Language = "xcode.lang.gyp";
|
||||
IsTextFile = YES;
|
||||
IsSourceFile = YES;
|
||||
}
|
||||
)
|
226
node_modules/node-gyp/gyp/tools/Xcode/Specifications/gyp.xclangspec
generated
vendored
Normal file
226
node_modules/node-gyp/gyp/tools/Xcode/Specifications/gyp.xclangspec
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
/*
|
||||
Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style license that can be
|
||||
found in the LICENSE file.
|
||||
|
||||
gyp.xclangspec
|
||||
GYP language specification for Xcode 3
|
||||
|
||||
There is not much documentation available regarding the format
|
||||
of .xclangspec files. As a starting point, see for instance the
|
||||
outdated documentation at:
|
||||
http://maxao.free.fr/xcode-plugin-interface/specifications.html
|
||||
and the files in:
|
||||
/Developer/Library/PrivateFrameworks/XcodeEdit.framework/Versions/A/Resources/
|
||||
|
||||
Place this file in directory:
|
||||
~/Library/Application Support/Developer/Shared/Xcode/Specifications/
|
||||
*/
|
||||
|
||||
(
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.keyword";
|
||||
Syntax = {
|
||||
Words = (
|
||||
"and",
|
||||
"or",
|
||||
"<!",
|
||||
"<",
|
||||
);
|
||||
Type = "xcode.syntax.keyword";
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.target.declarator";
|
||||
Syntax = {
|
||||
Words = (
|
||||
"'target_name'",
|
||||
);
|
||||
Type = "xcode.syntax.identifier.type";
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.string.singlequote";
|
||||
Syntax = {
|
||||
IncludeRules = (
|
||||
"xcode.lang.string",
|
||||
"xcode.lang.gyp.keyword",
|
||||
"xcode.lang.number",
|
||||
);
|
||||
Start = "'";
|
||||
End = "'";
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.comma";
|
||||
Syntax = {
|
||||
Words = ( ",", );
|
||||
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp";
|
||||
Description = "GYP Coloring";
|
||||
BasedOn = "xcode.lang.simpleColoring";
|
||||
IncludeInMenu = YES;
|
||||
Name = "GYP";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer.toplevel";
|
||||
IncludeRules = (
|
||||
"xcode.lang.gyp.dictionary",
|
||||
);
|
||||
Type = "xcode.syntax.plain";
|
||||
};
|
||||
},
|
||||
|
||||
// The following rule returns tokens to the other rules
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.lexer";
|
||||
Syntax = {
|
||||
IncludeRules = (
|
||||
"xcode.lang.gyp.comment",
|
||||
"xcode.lang.string",
|
||||
'xcode.lang.gyp.targetname.declarator',
|
||||
"xcode.lang.gyp.string.singlequote",
|
||||
"xcode.lang.number",
|
||||
"xcode.lang.gyp.comma",
|
||||
);
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.lexer.toplevel";
|
||||
Syntax = {
|
||||
IncludeRules = (
|
||||
"xcode.lang.gyp.comment",
|
||||
);
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.assignment";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Rules = (
|
||||
"xcode.lang.gyp.assignment.lhs",
|
||||
":",
|
||||
"xcode.lang.gyp.assignment.rhs",
|
||||
);
|
||||
};
|
||||
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.target.declaration";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Rules = (
|
||||
"xcode.lang.gyp.target.declarator",
|
||||
":",
|
||||
"xcode.lang.gyp.target.name",
|
||||
);
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.target.name";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Rules = (
|
||||
"xcode.lang.gyp.string.singlequote",
|
||||
);
|
||||
Type = "xcode.syntax.definition.function";
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.assignment.lhs";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Rules = (
|
||||
"xcode.lang.gyp.string.singlequote",
|
||||
);
|
||||
Type = "xcode.syntax.identifier.type";
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.assignment.rhs";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Rules = (
|
||||
"xcode.lang.gyp.string.singlequote?",
|
||||
"xcode.lang.gyp.array?",
|
||||
"xcode.lang.gyp.dictionary?",
|
||||
"xcode.lang.number?",
|
||||
);
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.dictionary";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Start = "{";
|
||||
End = "}";
|
||||
Foldable = YES;
|
||||
Recursive = YES;
|
||||
IncludeRules = (
|
||||
"xcode.lang.gyp.target.declaration",
|
||||
"xcode.lang.gyp.assignment",
|
||||
);
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.array";
|
||||
Syntax = {
|
||||
Tokenizer = "xcode.lang.gyp.lexer";
|
||||
Start = "[";
|
||||
End = "]";
|
||||
Foldable = YES;
|
||||
Recursive = YES;
|
||||
IncludeRules = (
|
||||
"xcode.lang.gyp.array",
|
||||
"xcode.lang.gyp.dictionary",
|
||||
"xcode.lang.gyp.string.singlequote",
|
||||
);
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.todo.mark";
|
||||
Syntax = {
|
||||
StartChars = "T";
|
||||
Match = (
|
||||
"^\(TODO\(.*\):[ \t]+.*\)$", // include "TODO: " in the markers list
|
||||
);
|
||||
// This is the order of captures. All of the match strings above need the same order.
|
||||
CaptureTypes = (
|
||||
"xcode.syntax.mark"
|
||||
);
|
||||
Type = "xcode.syntax.comment";
|
||||
};
|
||||
},
|
||||
|
||||
{
|
||||
Identifier = "xcode.lang.gyp.comment";
|
||||
BasedOn = "xcode.lang.comment"; // for text macros
|
||||
Syntax = {
|
||||
Start = "#";
|
||||
End = "\n";
|
||||
IncludeRules = (
|
||||
"xcode.lang.url",
|
||||
"xcode.lang.url.mail",
|
||||
"xcode.lang.comment.mark",
|
||||
"xcode.lang.gyp.todo.mark",
|
||||
);
|
||||
Type = "xcode.syntax.comment";
|
||||
};
|
||||
},
|
||||
)
|
12
node_modules/node-gyp/gyp/tools/emacs/README
generated
vendored
Normal file
12
node_modules/node-gyp/gyp/tools/emacs/README
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
How to install gyp-mode for emacs:
|
||||
|
||||
Add the following to your ~/.emacs (replace ... with the path to your gyp
|
||||
checkout).
|
||||
|
||||
(setq load-path (cons ".../tools/emacs" load-path))
|
||||
(require 'gyp)
|
||||
|
||||
Restart emacs (or eval-region the added lines) and you should be all set.
|
||||
|
||||
Please note that ert is required for running the tests, which is included in
|
||||
Emacs 24, or available separately from https://github.com/ohler/ert
|
63
node_modules/node-gyp/gyp/tools/emacs/gyp-tests.el
generated
vendored
Normal file
63
node_modules/node-gyp/gyp/tools/emacs/gyp-tests.el
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
;;; gyp-tests.el - unit tests for gyp-mode.
|
||||
|
||||
;; Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
;; Use of this source code is governed by a BSD-style license that can be
|
||||
;; found in the LICENSE file.
|
||||
|
||||
;; The recommended way to run these tests is to run them from the command-line,
|
||||
;; with the run-unit-tests.sh script.
|
||||
|
||||
(require 'cl)
|
||||
(require 'ert)
|
||||
(require 'gyp)
|
||||
|
||||
(defconst samples (directory-files "testdata" t ".gyp$")
|
||||
"List of golden samples to check")
|
||||
|
||||
(defun fontify (filename)
|
||||
(with-temp-buffer
|
||||
(insert-file-contents-literally filename)
|
||||
(gyp-mode)
|
||||
(font-lock-fontify-buffer)
|
||||
(buffer-string)))
|
||||
|
||||
(defun read-golden-sample (filename)
|
||||
(with-temp-buffer
|
||||
(insert-file-contents-literally (concat filename ".fontified"))
|
||||
(read (current-buffer))))
|
||||
|
||||
(defun equivalent-face (face)
|
||||
"For the purposes of face comparison, we're not interested in the
|
||||
differences between certain faces. For example, the difference between
|
||||
font-lock-comment-delimiter and font-lock-comment-face."
|
||||
(cl-case face
|
||||
((font-lock-comment-delimiter-face) font-lock-comment-face)
|
||||
(t face)))
|
||||
|
||||
(defun text-face-properties (s)
|
||||
"Extract the text properties from s"
|
||||
(let ((result (list t)))
|
||||
(dotimes (i (length s))
|
||||
(setq result (cons (equivalent-face (get-text-property i 'face s))
|
||||
result)))
|
||||
(nreverse result)))
|
||||
|
||||
(ert-deftest test-golden-samples ()
|
||||
"Check that fontification produces the same results as the golden samples"
|
||||
(dolist (sample samples)
|
||||
(let ((golden (read-golden-sample sample))
|
||||
(fontified (fontify sample)))
|
||||
(should (equal golden fontified))
|
||||
(should (equal (text-face-properties golden)
|
||||
(text-face-properties fontified))))))
|
||||
|
||||
(defun create-golden-sample (filename)
|
||||
"Create a golden sample by fontifying filename and writing out the printable
|
||||
representation of the fontified buffer (with text properties) to the
|
||||
FILENAME.fontified"
|
||||
(with-temp-file (concat filename ".fontified")
|
||||
(print (fontify filename) (current-buffer))))
|
||||
|
||||
(defun create-golden-samples ()
|
||||
"Recreate the golden samples"
|
||||
(dolist (sample samples) (create-golden-sample sample)))
|
275
node_modules/node-gyp/gyp/tools/emacs/gyp.el
generated
vendored
Normal file
275
node_modules/node-gyp/gyp/tools/emacs/gyp.el
generated
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
;;; gyp.el - font-lock-mode support for gyp files.
|
||||
|
||||
;; Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
;; Use of this source code is governed by a BSD-style license that can be
|
||||
;; found in the LICENSE file.
|
||||
|
||||
;; Put this somewhere in your load-path and
|
||||
;; (require 'gyp)
|
||||
|
||||
(require 'python)
|
||||
(require 'cl)
|
||||
|
||||
(when (string-match "python-mode.el" (symbol-file 'python-mode 'defun))
|
||||
(error (concat "python-mode must be loaded from python.el (bundled with "
|
||||
"recent emacsen), not from the older and less maintained "
|
||||
"python-mode.el")))
|
||||
|
||||
(defadvice python-indent-calculate-levels (after gyp-outdent-closing-parens
|
||||
activate)
|
||||
"De-indent closing parens, braces, and brackets in gyp-mode."
|
||||
(when (and (eq major-mode 'gyp-mode)
|
||||
(string-match "^ *[])}][],)}]* *$"
|
||||
(buffer-substring-no-properties
|
||||
(line-beginning-position) (line-end-position))))
|
||||
(setf (first python-indent-levels)
|
||||
(- (first python-indent-levels) python-continuation-offset))))
|
||||
|
||||
(defadvice python-indent-guess-indent-offset (around
|
||||
gyp-indent-guess-indent-offset
|
||||
activate)
|
||||
"Guess correct indent offset in gyp-mode."
|
||||
(or (and (not (eq major-mode 'gyp-mode))
|
||||
ad-do-it)
|
||||
(save-excursion
|
||||
(save-restriction
|
||||
(widen)
|
||||
(goto-char (point-min))
|
||||
;; Find first line ending with an opening brace that is not a comment.
|
||||
(or (and (re-search-forward "\\(^[[{]$\\|^.*[^#].*[[{]$\\)")
|
||||
(forward-line)
|
||||
(/= (current-indentation) 0)
|
||||
(set (make-local-variable 'python-indent-offset)
|
||||
(current-indentation))
|
||||
(set (make-local-variable 'python-continuation-offset)
|
||||
(current-indentation)))
|
||||
(message "Can't guess gyp indent offset, using default: %s"
|
||||
python-continuation-offset))))))
|
||||
|
||||
(define-derived-mode gyp-mode python-mode "Gyp"
|
||||
"Major mode for editing .gyp files. See http://code.google.com/p/gyp/"
|
||||
;; gyp-parse-history is a stack of (POSITION . PARSE-STATE) tuples,
|
||||
;; with greater positions at the top of the stack. PARSE-STATE
|
||||
;; is a list of section symbols (see gyp-section-name and gyp-parse-to)
|
||||
;; with most nested section symbol at the front of the list.
|
||||
(set (make-local-variable 'gyp-parse-history) '((1 . (list))))
|
||||
(gyp-add-font-lock-keywords))
|
||||
|
||||
(defun gyp-set-indentation ()
|
||||
"Hook function to configure python indentation to suit gyp mode."
|
||||
(set (make-local-variable 'python-indent-offset) 2)
|
||||
(set (make-local-variable 'python-continuation-offset) 2)
|
||||
(set (make-local-variable 'python-indent-guess-indent-offset) t)
|
||||
(python-indent-guess-indent-offset))
|
||||
|
||||
(add-hook 'gyp-mode-hook 'gyp-set-indentation)
|
||||
|
||||
(add-to-list 'auto-mode-alist '("\\.gyp\\'" . gyp-mode))
|
||||
(add-to-list 'auto-mode-alist '("\\.gypi\\'" . gyp-mode))
|
||||
(add-to-list 'auto-mode-alist '("/\\.gclient\\'" . gyp-mode))
|
||||
|
||||
;;; Font-lock support
|
||||
|
||||
(defconst gyp-dependencies-regexp
|
||||
(regexp-opt (list "dependencies" "export_dependent_settings"))
|
||||
"Regular expression to introduce 'dependencies' section")
|
||||
|
||||
(defconst gyp-sources-regexp
|
||||
(regexp-opt (list "action" "files" "include_dirs" "includes" "inputs"
|
||||
"libraries" "outputs" "sources"))
|
||||
"Regular expression to introduce 'sources' sections")
|
||||
|
||||
(defconst gyp-conditions-regexp
|
||||
(regexp-opt (list "conditions" "target_conditions"))
|
||||
"Regular expression to introduce conditions sections")
|
||||
|
||||
(defconst gyp-variables-regexp
|
||||
"^variables"
|
||||
"Regular expression to introduce variables sections")
|
||||
|
||||
(defconst gyp-defines-regexp
|
||||
"^defines"
|
||||
"Regular expression to introduce 'defines' sections")
|
||||
|
||||
(defconst gyp-targets-regexp
|
||||
"^targets"
|
||||
"Regular expression to introduce 'targets' sections")
|
||||
|
||||
(defun gyp-section-name (section)
|
||||
"Map the sections we are interested in from SECTION to symbol.
|
||||
|
||||
SECTION is a string from the buffer that introduces a section. The result is
|
||||
a symbol representing the kind of section.
|
||||
|
||||
This allows us to treat (for the purposes of font-lock) several different
|
||||
section names as the same kind of section. For example, a 'sources section
|
||||
can be introduced by the 'sources', 'inputs', 'outputs' keyword.
|
||||
|
||||
'other is the default section kind when a more specific match is not made."
|
||||
(cond ((string-match-p gyp-dependencies-regexp section) 'dependencies)
|
||||
((string-match-p gyp-sources-regexp section) 'sources)
|
||||
((string-match-p gyp-variables-regexp section) 'variables)
|
||||
((string-match-p gyp-conditions-regexp section) 'conditions)
|
||||
((string-match-p gyp-targets-regexp section) 'targets)
|
||||
((string-match-p gyp-defines-regexp section) 'defines)
|
||||
(t 'other)))
|
||||
|
||||
(defun gyp-invalidate-parse-states-after (target-point)
|
||||
"Erase any parse information after target-point."
|
||||
(while (> (caar gyp-parse-history) target-point)
|
||||
(setq gyp-parse-history (cdr gyp-parse-history))))
|
||||
|
||||
(defun gyp-parse-point ()
|
||||
"The point of the last parse state added by gyp-parse-to."
|
||||
(caar gyp-parse-history))
|
||||
|
||||
(defun gyp-parse-sections ()
|
||||
"A list of section symbols holding at the last parse state point."
|
||||
(cdar gyp-parse-history))
|
||||
|
||||
(defun gyp-inside-dictionary-p ()
|
||||
"Predicate returning true if the parser is inside a dictionary."
|
||||
(not (eq (cadar gyp-parse-history) 'list)))
|
||||
|
||||
(defun gyp-add-parse-history (point sections)
|
||||
"Add parse state SECTIONS to the parse history at POINT so that parsing can be
|
||||
resumed instantly."
|
||||
(while (>= (caar gyp-parse-history) point)
|
||||
(setq gyp-parse-history (cdr gyp-parse-history)))
|
||||
(setq gyp-parse-history (cons (cons point sections) gyp-parse-history)))
|
||||
|
||||
(defun gyp-parse-to (target-point)
|
||||
"Parses from (point) to TARGET-POINT adding the parse state information to
|
||||
gyp-parse-state-history. Parsing stops if TARGET-POINT is reached or if a
|
||||
string literal has been parsed. Returns nil if no further parsing can be
|
||||
done, otherwise returns the position of the start of a parsed string, leaving
|
||||
the point at the end of the string."
|
||||
(let ((parsing t)
|
||||
string-start)
|
||||
(while parsing
|
||||
(setq string-start nil)
|
||||
;; Parse up to a character that starts a sexp, or if the nesting
|
||||
;; level decreases.
|
||||
(let ((state (parse-partial-sexp (gyp-parse-point)
|
||||
target-point
|
||||
-1
|
||||
t))
|
||||
(sections (gyp-parse-sections)))
|
||||
(if (= (nth 0 state) -1)
|
||||
(setq sections (cdr sections)) ; pop out a level
|
||||
(cond ((looking-at-p "['\"]") ; a string
|
||||
(setq string-start (point))
|
||||
(goto-char (scan-sexps (point) 1))
|
||||
(if (gyp-inside-dictionary-p)
|
||||
;; Look for sections inside a dictionary
|
||||
(let ((section (gyp-section-name
|
||||
(buffer-substring-no-properties
|
||||
(+ 1 string-start)
|
||||
(- (point) 1)))))
|
||||
(setq sections (cons section (cdr sections)))))
|
||||
;; Stop after the string so it can be fontified.
|
||||
(setq target-point (point)))
|
||||
((looking-at-p "{")
|
||||
;; Inside a dictionary. Increase nesting.
|
||||
(forward-char 1)
|
||||
(setq sections (cons 'unknown sections)))
|
||||
((looking-at-p "\\[")
|
||||
;; Inside a list. Increase nesting
|
||||
(forward-char 1)
|
||||
(setq sections (cons 'list sections)))
|
||||
((not (eobp))
|
||||
;; other
|
||||
(forward-char 1))))
|
||||
(gyp-add-parse-history (point) sections)
|
||||
(setq parsing (< (point) target-point))))
|
||||
string-start))
|
||||
|
||||
(defun gyp-section-at-point ()
|
||||
"Transform the last parse state, which is a list of nested sections and return
|
||||
the section symbol that should be used to determine font-lock information for
|
||||
the string. Can return nil indicating the string should not have any attached
|
||||
section."
|
||||
(let ((sections (gyp-parse-sections)))
|
||||
(cond
|
||||
((eq (car sections) 'conditions)
|
||||
;; conditions can occur in a variables section, but we still want to
|
||||
;; highlight it as a keyword.
|
||||
nil)
|
||||
((and (eq (car sections) 'list)
|
||||
(eq (cadr sections) 'list))
|
||||
;; conditions and sources can have items in [[ ]]
|
||||
(caddr sections))
|
||||
(t (cadr sections)))))
|
||||
|
||||
(defun gyp-section-match (limit)
|
||||
"Parse from (point) to LIMIT returning by means of match data what was
|
||||
matched. The group of the match indicates what style font-lock should apply.
|
||||
See also `gyp-add-font-lock-keywords'."
|
||||
(gyp-invalidate-parse-states-after (point))
|
||||
(let ((group nil)
|
||||
(string-start t))
|
||||
(while (and (< (point) limit)
|
||||
(not group)
|
||||
string-start)
|
||||
(setq string-start (gyp-parse-to limit))
|
||||
(if string-start
|
||||
(setq group (cl-case (gyp-section-at-point)
|
||||
('dependencies 1)
|
||||
('variables 2)
|
||||
('conditions 2)
|
||||
('sources 3)
|
||||
('defines 4)
|
||||
(nil nil)))))
|
||||
(if group
|
||||
(progn
|
||||
;; Set the match data to indicate to the font-lock mechanism the
|
||||
;; highlighting to be performed.
|
||||
(set-match-data (append (list string-start (point))
|
||||
(make-list (* (1- group) 2) nil)
|
||||
(list (1+ string-start) (1- (point)))))
|
||||
t))))
|
||||
|
||||
;;; Please see http://code.google.com/p/gyp/wiki/GypLanguageSpecification for
|
||||
;;; canonical list of keywords.
|
||||
(defun gyp-add-font-lock-keywords ()
|
||||
"Add gyp-mode keywords to font-lock mechanism."
|
||||
;; TODO(jknotten): Move all the keyword highlighting into gyp-section-match
|
||||
;; so that we can do the font-locking in a single font-lock pass.
|
||||
(font-lock-add-keywords
|
||||
nil
|
||||
(list
|
||||
;; Top-level keywords
|
||||
(list (concat "['\"]\\("
|
||||
(regexp-opt (list "action" "action_name" "actions" "cflags"
|
||||
"cflags_cc" "conditions" "configurations"
|
||||
"copies" "defines" "dependencies" "destination"
|
||||
"direct_dependent_settings"
|
||||
"export_dependent_settings" "extension" "files"
|
||||
"include_dirs" "includes" "inputs" "ldflags" "libraries"
|
||||
"link_settings" "mac_bundle" "message"
|
||||
"msvs_external_rule" "outputs" "product_name"
|
||||
"process_outputs_as_sources" "rules" "rule_name"
|
||||
"sources" "suppress_wildcard"
|
||||
"target_conditions" "target_defaults"
|
||||
"target_defines" "target_name" "toolsets"
|
||||
"targets" "type" "variables" "xcode_settings"))
|
||||
"[!/+=]?\\)") 1 'font-lock-keyword-face t)
|
||||
;; Type of target
|
||||
(list (concat "['\"]\\("
|
||||
(regexp-opt (list "loadable_module" "static_library"
|
||||
"shared_library" "executable" "none"))
|
||||
"\\)") 1 'font-lock-type-face t)
|
||||
(list "\\(?:target\\|action\\)_name['\"]\\s-*:\\s-*['\"]\\([^ '\"]*\\)" 1
|
||||
'font-lock-function-name-face t)
|
||||
(list 'gyp-section-match
|
||||
(list 1 'font-lock-function-name-face t t) ; dependencies
|
||||
(list 2 'font-lock-variable-name-face t t) ; variables, conditions
|
||||
(list 3 'font-lock-constant-face t t) ; sources
|
||||
(list 4 'font-lock-preprocessor-face t t)) ; preprocessor
|
||||
;; Variable expansion
|
||||
(list "<@?(\\([^\n )]+\\))" 1 'font-lock-variable-name-face t)
|
||||
;; Command expansion
|
||||
(list "<!@?(\\([^\n )]+\\))" 1 'font-lock-variable-name-face t)
|
||||
)))
|
||||
|
||||
(provide 'gyp)
|
7
node_modules/node-gyp/gyp/tools/emacs/run-unit-tests.sh
generated
vendored
Normal file
7
node_modules/node-gyp/gyp/tools/emacs/run-unit-tests.sh
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
emacs --no-site-file --no-init-file --batch \
|
||||
--load ert.el --load gyp.el --load gyp-tests.el \
|
||||
-f ert-run-tests-batch-and-exit
|
1105
node_modules/node-gyp/gyp/tools/emacs/testdata/media.gyp
generated
vendored
Normal file
1105
node_modules/node-gyp/gyp/tools/emacs/testdata/media.gyp
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1107
node_modules/node-gyp/gyp/tools/emacs/testdata/media.gyp.fontified
generated
vendored
Normal file
1107
node_modules/node-gyp/gyp/tools/emacs/testdata/media.gyp.fontified
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
102
node_modules/node-gyp/gyp/tools/graphviz.py
generated
vendored
Normal file
102
node_modules/node-gyp/gyp/tools/graphviz.py
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Using the JSON dumped by the dump-dependency-json generator,
|
||||
generate input suitable for graphviz to render a dependency graph of
|
||||
targets."""
|
||||
|
||||
|
||||
import collections
|
||||
import json
|
||||
import sys
|
||||
|
||||
|
||||
def ParseTarget(target):
|
||||
target, _, suffix = target.partition("#")
|
||||
filename, _, target = target.partition(":")
|
||||
return filename, target, suffix
|
||||
|
||||
|
||||
def LoadEdges(filename, targets):
|
||||
"""Load the edges map from the dump file, and filter it to only
|
||||
show targets in |targets| and their depedendents."""
|
||||
|
||||
file = open("dump.json")
|
||||
edges = json.load(file)
|
||||
file.close()
|
||||
|
||||
# Copy out only the edges we're interested in from the full edge list.
|
||||
target_edges = {}
|
||||
to_visit = targets[:]
|
||||
while to_visit:
|
||||
src = to_visit.pop()
|
||||
if src in target_edges:
|
||||
continue
|
||||
target_edges[src] = edges[src]
|
||||
to_visit.extend(edges[src])
|
||||
|
||||
return target_edges
|
||||
|
||||
|
||||
def WriteGraph(edges):
|
||||
"""Print a graphviz graph to stdout.
|
||||
|edges| is a map of target to a list of other targets it depends on."""
|
||||
|
||||
# Bucket targets by file.
|
||||
files = collections.defaultdict(list)
|
||||
for src, dst in edges.items():
|
||||
build_file, target_name, toolset = ParseTarget(src)
|
||||
files[build_file].append(src)
|
||||
|
||||
print("digraph D {")
|
||||
print(" fontsize=8") # Used by subgraphs.
|
||||
print(" node [fontsize=8]")
|
||||
|
||||
# Output nodes by file. We must first write out each node within
|
||||
# its file grouping before writing out any edges that may refer
|
||||
# to those nodes.
|
||||
for filename, targets in files.items():
|
||||
if len(targets) == 1:
|
||||
# If there's only one node for this file, simplify
|
||||
# the display by making it a box without an internal node.
|
||||
target = targets[0]
|
||||
build_file, target_name, toolset = ParseTarget(target)
|
||||
print(
|
||||
f' "{target}" [shape=box, label="{filename}\\n{target_name}"]'
|
||||
)
|
||||
else:
|
||||
# Group multiple nodes together in a subgraph.
|
||||
print(' subgraph "cluster_%s" {' % filename)
|
||||
print(' label = "%s"' % filename)
|
||||
for target in targets:
|
||||
build_file, target_name, toolset = ParseTarget(target)
|
||||
print(f' "{target}" [label="{target_name}"]')
|
||||
print(" }")
|
||||
|
||||
# Now that we've placed all the nodes within subgraphs, output all
|
||||
# the edges between nodes.
|
||||
for src, dsts in edges.items():
|
||||
for dst in dsts:
|
||||
print(f' "{src}" -> "{dst}"')
|
||||
|
||||
print("}")
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print(__doc__, file=sys.stderr)
|
||||
print(file=sys.stderr)
|
||||
print("usage: %s target1 target2..." % (sys.argv[0]), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
edges = LoadEdges("dump.json", sys.argv[1:])
|
||||
|
||||
WriteGraph(edges)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
156
node_modules/node-gyp/gyp/tools/pretty_gyp.py
generated
vendored
Normal file
156
node_modules/node-gyp/gyp/tools/pretty_gyp.py
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Pretty-prints the contents of a GYP file."""
|
||||
|
||||
|
||||
import sys
|
||||
import re
|
||||
|
||||
|
||||
# Regex to remove comments when we're counting braces.
|
||||
COMMENT_RE = re.compile(r"\s*#.*")
|
||||
|
||||
# Regex to remove quoted strings when we're counting braces.
|
||||
# It takes into account quoted quotes, and makes sure that the quotes match.
|
||||
# NOTE: It does not handle quotes that span more than one line, or
|
||||
# cases where an escaped quote is preceded by an escaped backslash.
|
||||
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
|
||||
QUOTE_RE = re.compile(QUOTE_RE_STR)
|
||||
|
||||
|
||||
def comment_replace(matchobj):
|
||||
return matchobj.group(1) + matchobj.group(2) + "#" * len(matchobj.group(3))
|
||||
|
||||
|
||||
def mask_comments(input):
|
||||
"""Mask the quoted strings so we skip braces inside quoted strings."""
|
||||
search_re = re.compile(r"(.*?)(#)(.*)")
|
||||
return [search_re.sub(comment_replace, line) for line in input]
|
||||
|
||||
|
||||
def quote_replace(matchobj):
|
||||
return "{}{}{}{}".format(
|
||||
matchobj.group(1),
|
||||
matchobj.group(2),
|
||||
"x" * len(matchobj.group(3)),
|
||||
matchobj.group(2),
|
||||
)
|
||||
|
||||
|
||||
def mask_quotes(input):
|
||||
"""Mask the quoted strings so we skip braces inside quoted strings."""
|
||||
search_re = re.compile(r"(.*?)" + QUOTE_RE_STR)
|
||||
return [search_re.sub(quote_replace, line) for line in input]
|
||||
|
||||
|
||||
def do_split(input, masked_input, search_re):
|
||||
output = []
|
||||
mask_output = []
|
||||
for (line, masked_line) in zip(input, masked_input):
|
||||
m = search_re.match(masked_line)
|
||||
while m:
|
||||
split = len(m.group(1))
|
||||
line = line[:split] + r"\n" + line[split:]
|
||||
masked_line = masked_line[:split] + r"\n" + masked_line[split:]
|
||||
m = search_re.match(masked_line)
|
||||
output.extend(line.split(r"\n"))
|
||||
mask_output.extend(masked_line.split(r"\n"))
|
||||
return (output, mask_output)
|
||||
|
||||
|
||||
def split_double_braces(input):
|
||||
"""Masks out the quotes and comments, and then splits appropriate
|
||||
lines (lines that matche the double_*_brace re's above) before
|
||||
indenting them below.
|
||||
|
||||
These are used to split lines which have multiple braces on them, so
|
||||
that the indentation looks prettier when all laid out (e.g. closing
|
||||
braces make a nice diagonal line).
|
||||
"""
|
||||
double_open_brace_re = re.compile(r"(.*?[\[\{\(,])(\s*)([\[\{\(])")
|
||||
double_close_brace_re = re.compile(r"(.*?[\]\}\)],?)(\s*)([\]\}\)])")
|
||||
|
||||
masked_input = mask_quotes(input)
|
||||
masked_input = mask_comments(masked_input)
|
||||
|
||||
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
|
||||
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def count_braces(line):
|
||||
"""keeps track of the number of braces on a given line and returns the result.
|
||||
|
||||
It starts at zero and subtracts for closed braces, and adds for open braces.
|
||||
"""
|
||||
open_braces = ["[", "(", "{"]
|
||||
close_braces = ["]", ")", "}"]
|
||||
closing_prefix_re = re.compile(r"(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$")
|
||||
cnt = 0
|
||||
stripline = COMMENT_RE.sub(r"", line)
|
||||
stripline = QUOTE_RE.sub(r"''", stripline)
|
||||
for char in stripline:
|
||||
for brace in open_braces:
|
||||
if char == brace:
|
||||
cnt += 1
|
||||
for brace in close_braces:
|
||||
if char == brace:
|
||||
cnt -= 1
|
||||
|
||||
after = False
|
||||
if cnt > 0:
|
||||
after = True
|
||||
|
||||
# This catches the special case of a closing brace having something
|
||||
# other than just whitespace ahead of it -- we don't want to
|
||||
# unindent that until after this line is printed so it stays with
|
||||
# the previous indentation level.
|
||||
if cnt < 0 and closing_prefix_re.match(stripline):
|
||||
after = True
|
||||
return (cnt, after)
|
||||
|
||||
|
||||
def prettyprint_input(lines):
|
||||
"""Does the main work of indenting the input based on the brace counts."""
|
||||
indent = 0
|
||||
basic_offset = 2
|
||||
for line in lines:
|
||||
if COMMENT_RE.match(line):
|
||||
print(line)
|
||||
else:
|
||||
line = line.strip("\r\n\t ") # Otherwise doesn't strip \r on Unix.
|
||||
if len(line) > 0:
|
||||
(brace_diff, after) = count_braces(line)
|
||||
if brace_diff != 0:
|
||||
if after:
|
||||
print(" " * (basic_offset * indent) + line)
|
||||
indent += brace_diff
|
||||
else:
|
||||
indent += brace_diff
|
||||
print(" " * (basic_offset * indent) + line)
|
||||
else:
|
||||
print(" " * (basic_offset * indent) + line)
|
||||
else:
|
||||
print("")
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) > 1:
|
||||
data = open(sys.argv[1]).read().splitlines()
|
||||
else:
|
||||
data = sys.stdin.read().splitlines()
|
||||
# Split up the double braces.
|
||||
lines = split_double_braces(data)
|
||||
|
||||
# Indent and print the output.
|
||||
prettyprint_input(lines)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
181
node_modules/node-gyp/gyp/tools/pretty_sln.py
generated
vendored
Normal file
181
node_modules/node-gyp/gyp/tools/pretty_sln.py
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Prints the information in a sln file in a diffable way.
|
||||
|
||||
It first outputs each projects in alphabetical order with their
|
||||
dependencies.
|
||||
|
||||
Then it outputs a possible build order.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import pretty_vcproj
|
||||
|
||||
__author__ = "nsylvain (Nicolas Sylvain)"
|
||||
|
||||
|
||||
def BuildProject(project, built, projects, deps):
|
||||
# if all dependencies are done, we can build it, otherwise we try to build the
|
||||
# dependency.
|
||||
# This is not infinite-recursion proof.
|
||||
for dep in deps[project]:
|
||||
if dep not in built:
|
||||
BuildProject(dep, built, projects, deps)
|
||||
print(project)
|
||||
built.append(project)
|
||||
|
||||
|
||||
def ParseSolution(solution_file):
|
||||
# All projects, their clsid and paths.
|
||||
projects = dict()
|
||||
|
||||
# A list of dependencies associated with a project.
|
||||
dependencies = dict()
|
||||
|
||||
# Regular expressions that matches the SLN format.
|
||||
# The first line of a project definition.
|
||||
begin_project = re.compile(
|
||||
r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
|
||||
r'}"\) = "(.*)", "(.*)", "(.*)"$'
|
||||
)
|
||||
# The last line of a project definition.
|
||||
end_project = re.compile("^EndProject$")
|
||||
# The first line of a dependency list.
|
||||
begin_dep = re.compile(r"ProjectSection\(ProjectDependencies\) = postProject$")
|
||||
# The last line of a dependency list.
|
||||
end_dep = re.compile("EndProjectSection$")
|
||||
# A line describing a dependency.
|
||||
dep_line = re.compile(" *({.*}) = ({.*})$")
|
||||
|
||||
in_deps = False
|
||||
solution = open(solution_file)
|
||||
for line in solution:
|
||||
results = begin_project.search(line)
|
||||
if results:
|
||||
# Hack to remove icu because the diff is too different.
|
||||
if results.group(1).find("icu") != -1:
|
||||
continue
|
||||
# We remove "_gyp" from the names because it helps to diff them.
|
||||
current_project = results.group(1).replace("_gyp", "")
|
||||
projects[current_project] = [
|
||||
results.group(2).replace("_gyp", ""),
|
||||
results.group(3),
|
||||
results.group(2),
|
||||
]
|
||||
dependencies[current_project] = []
|
||||
continue
|
||||
|
||||
results = end_project.search(line)
|
||||
if results:
|
||||
current_project = None
|
||||
continue
|
||||
|
||||
results = begin_dep.search(line)
|
||||
if results:
|
||||
in_deps = True
|
||||
continue
|
||||
|
||||
results = end_dep.search(line)
|
||||
if results:
|
||||
in_deps = False
|
||||
continue
|
||||
|
||||
results = dep_line.search(line)
|
||||
if results and in_deps and current_project:
|
||||
dependencies[current_project].append(results.group(1))
|
||||
continue
|
||||
|
||||
# Change all dependencies clsid to name instead.
|
||||
for project in dependencies:
|
||||
# For each dependencies in this project
|
||||
new_dep_array = []
|
||||
for dep in dependencies[project]:
|
||||
# Look for the project name matching this cldis
|
||||
for project_info in projects:
|
||||
if projects[project_info][1] == dep:
|
||||
new_dep_array.append(project_info)
|
||||
dependencies[project] = sorted(new_dep_array)
|
||||
|
||||
return (projects, dependencies)
|
||||
|
||||
|
||||
def PrintDependencies(projects, deps):
|
||||
print("---------------------------------------")
|
||||
print("Dependencies for all projects")
|
||||
print("---------------------------------------")
|
||||
print("-- --")
|
||||
|
||||
for (project, dep_list) in sorted(deps.items()):
|
||||
print("Project : %s" % project)
|
||||
print("Path : %s" % projects[project][0])
|
||||
if dep_list:
|
||||
for dep in dep_list:
|
||||
print(" - %s" % dep)
|
||||
print("")
|
||||
|
||||
print("-- --")
|
||||
|
||||
|
||||
def PrintBuildOrder(projects, deps):
|
||||
print("---------------------------------------")
|
||||
print("Build order ")
|
||||
print("---------------------------------------")
|
||||
print("-- --")
|
||||
|
||||
built = []
|
||||
for (project, _) in sorted(deps.items()):
|
||||
if project not in built:
|
||||
BuildProject(project, built, projects, deps)
|
||||
|
||||
print("-- --")
|
||||
|
||||
|
||||
def PrintVCProj(projects):
|
||||
|
||||
for project in projects:
|
||||
print("-------------------------------------")
|
||||
print("-------------------------------------")
|
||||
print(project)
|
||||
print(project)
|
||||
print(project)
|
||||
print("-------------------------------------")
|
||||
print("-------------------------------------")
|
||||
|
||||
project_path = os.path.abspath(
|
||||
os.path.join(os.path.dirname(sys.argv[1]), projects[project][2])
|
||||
)
|
||||
|
||||
pretty = pretty_vcproj
|
||||
argv = [
|
||||
"",
|
||||
project_path,
|
||||
"$(SolutionDir)=%s\\" % os.path.dirname(sys.argv[1]),
|
||||
]
|
||||
argv.extend(sys.argv[3:])
|
||||
pretty.main(argv)
|
||||
|
||||
|
||||
def main():
|
||||
# check if we have exactly 1 parameter.
|
||||
if len(sys.argv) < 2:
|
||||
print('Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0])
|
||||
return 1
|
||||
|
||||
(projects, deps) = ParseSolution(sys.argv[1])
|
||||
PrintDependencies(projects, deps)
|
||||
PrintBuildOrder(projects, deps)
|
||||
|
||||
if "--recursive" in sys.argv:
|
||||
PrintVCProj(projects)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
339
node_modules/node-gyp/gyp/tools/pretty_vcproj.py
generated
vendored
Normal file
339
node_modules/node-gyp/gyp/tools/pretty_vcproj.py
generated
vendored
Normal file
@@ -0,0 +1,339 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Make the format of a vcproj really pretty.
|
||||
|
||||
This script normalize and sort an xml. It also fetches all the properties
|
||||
inside linked vsprops and include them explicitly in the vcproj.
|
||||
|
||||
It outputs the resulting xml to stdout.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from xml.dom.minidom import parse
|
||||
from xml.dom.minidom import Node
|
||||
|
||||
__author__ = "nsylvain (Nicolas Sylvain)"
|
||||
ARGUMENTS = None
|
||||
REPLACEMENTS = dict()
|
||||
|
||||
|
||||
def cmp(x, y):
|
||||
return (x > y) - (x < y)
|
||||
|
||||
|
||||
class CmpTuple:
|
||||
"""Compare function between 2 tuple."""
|
||||
|
||||
def __call__(self, x, y):
|
||||
return cmp(x[0], y[0])
|
||||
|
||||
|
||||
class CmpNode:
|
||||
"""Compare function between 2 xml nodes."""
|
||||
|
||||
def __call__(self, x, y):
|
||||
def get_string(node):
|
||||
node_string = "node"
|
||||
node_string += node.nodeName
|
||||
if node.nodeValue:
|
||||
node_string += node.nodeValue
|
||||
|
||||
if node.attributes:
|
||||
# We first sort by name, if present.
|
||||
node_string += node.getAttribute("Name")
|
||||
|
||||
all_nodes = []
|
||||
for (name, value) in node.attributes.items():
|
||||
all_nodes.append((name, value))
|
||||
|
||||
all_nodes.sort(CmpTuple())
|
||||
for (name, value) in all_nodes:
|
||||
node_string += name
|
||||
node_string += value
|
||||
|
||||
return node_string
|
||||
|
||||
return cmp(get_string(x), get_string(y))
|
||||
|
||||
|
||||
def PrettyPrintNode(node, indent=0):
|
||||
if node.nodeType == Node.TEXT_NODE:
|
||||
if node.data.strip():
|
||||
print("{}{}".format(" " * indent, node.data.strip()))
|
||||
return
|
||||
|
||||
if node.childNodes:
|
||||
node.normalize()
|
||||
# Get the number of attributes
|
||||
attr_count = 0
|
||||
if node.attributes:
|
||||
attr_count = node.attributes.length
|
||||
|
||||
# Print the main tag
|
||||
if attr_count == 0:
|
||||
print("{}<{}>".format(" " * indent, node.nodeName))
|
||||
else:
|
||||
print("{}<{}".format(" " * indent, node.nodeName))
|
||||
|
||||
all_attributes = []
|
||||
for (name, value) in node.attributes.items():
|
||||
all_attributes.append((name, value))
|
||||
all_attributes.sort(CmpTuple())
|
||||
for (name, value) in all_attributes:
|
||||
print('{} {}="{}"'.format(" " * indent, name, value))
|
||||
print("%s>" % (" " * indent))
|
||||
if node.nodeValue:
|
||||
print("{} {}".format(" " * indent, node.nodeValue))
|
||||
|
||||
for sub_node in node.childNodes:
|
||||
PrettyPrintNode(sub_node, indent=indent + 2)
|
||||
print("{}</{}>".format(" " * indent, node.nodeName))
|
||||
|
||||
|
||||
def FlattenFilter(node):
|
||||
"""Returns a list of all the node and sub nodes."""
|
||||
node_list = []
|
||||
|
||||
if node.attributes and node.getAttribute("Name") == "_excluded_files":
|
||||
# We don't add the "_excluded_files" filter.
|
||||
return []
|
||||
|
||||
for current in node.childNodes:
|
||||
if current.nodeName == "Filter":
|
||||
node_list.extend(FlattenFilter(current))
|
||||
else:
|
||||
node_list.append(current)
|
||||
|
||||
return node_list
|
||||
|
||||
|
||||
def FixFilenames(filenames, current_directory):
|
||||
new_list = []
|
||||
for filename in filenames:
|
||||
if filename:
|
||||
for key in REPLACEMENTS:
|
||||
filename = filename.replace(key, REPLACEMENTS[key])
|
||||
os.chdir(current_directory)
|
||||
filename = filename.strip("\"' ")
|
||||
if filename.startswith("$"):
|
||||
new_list.append(filename)
|
||||
else:
|
||||
new_list.append(os.path.abspath(filename))
|
||||
return new_list
|
||||
|
||||
|
||||
def AbsoluteNode(node):
|
||||
"""Makes all the properties we know about in this node absolute."""
|
||||
if node.attributes:
|
||||
for (name, value) in node.attributes.items():
|
||||
if name in [
|
||||
"InheritedPropertySheets",
|
||||
"RelativePath",
|
||||
"AdditionalIncludeDirectories",
|
||||
"IntermediateDirectory",
|
||||
"OutputDirectory",
|
||||
"AdditionalLibraryDirectories",
|
||||
]:
|
||||
# We want to fix up these paths
|
||||
path_list = value.split(";")
|
||||
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
|
||||
node.setAttribute(name, ";".join(new_list))
|
||||
if not value:
|
||||
node.removeAttribute(name)
|
||||
|
||||
|
||||
def CleanupVcproj(node):
|
||||
"""For each sub node, we call recursively this function."""
|
||||
for sub_node in node.childNodes:
|
||||
AbsoluteNode(sub_node)
|
||||
CleanupVcproj(sub_node)
|
||||
|
||||
# Normalize the node, and remove all extraneous whitespaces.
|
||||
for sub_node in node.childNodes:
|
||||
if sub_node.nodeType == Node.TEXT_NODE:
|
||||
sub_node.data = sub_node.data.replace("\r", "")
|
||||
sub_node.data = sub_node.data.replace("\n", "")
|
||||
sub_node.data = sub_node.data.rstrip()
|
||||
|
||||
# Fix all the semicolon separated attributes to be sorted, and we also
|
||||
# remove the dups.
|
||||
if node.attributes:
|
||||
for (name, value) in node.attributes.items():
|
||||
sorted_list = sorted(value.split(";"))
|
||||
unique_list = []
|
||||
for i in sorted_list:
|
||||
if not unique_list.count(i):
|
||||
unique_list.append(i)
|
||||
node.setAttribute(name, ";".join(unique_list))
|
||||
if not value:
|
||||
node.removeAttribute(name)
|
||||
|
||||
if node.childNodes:
|
||||
node.normalize()
|
||||
|
||||
# For each node, take a copy, and remove it from the list.
|
||||
node_array = []
|
||||
while node.childNodes and node.childNodes[0]:
|
||||
# Take a copy of the node and remove it from the list.
|
||||
current = node.childNodes[0]
|
||||
node.removeChild(current)
|
||||
|
||||
# If the child is a filter, we want to append all its children
|
||||
# to this same list.
|
||||
if current.nodeName == "Filter":
|
||||
node_array.extend(FlattenFilter(current))
|
||||
else:
|
||||
node_array.append(current)
|
||||
|
||||
# Sort the list.
|
||||
node_array.sort(CmpNode())
|
||||
|
||||
# Insert the nodes in the correct order.
|
||||
for new_node in node_array:
|
||||
# But don't append empty tool node.
|
||||
if new_node.nodeName == "Tool":
|
||||
if new_node.attributes and new_node.attributes.length == 1:
|
||||
# This one was empty.
|
||||
continue
|
||||
if new_node.nodeName == "UserMacro":
|
||||
continue
|
||||
node.appendChild(new_node)
|
||||
|
||||
|
||||
def GetConfiguationNodes(vcproj):
|
||||
# TODO(nsylvain): Find a better way to navigate the xml.
|
||||
nodes = []
|
||||
for node in vcproj.childNodes:
|
||||
if node.nodeName == "Configurations":
|
||||
for sub_node in node.childNodes:
|
||||
if sub_node.nodeName == "Configuration":
|
||||
nodes.append(sub_node)
|
||||
|
||||
return nodes
|
||||
|
||||
|
||||
def GetChildrenVsprops(filename):
|
||||
dom = parse(filename)
|
||||
if dom.documentElement.attributes:
|
||||
vsprops = dom.documentElement.getAttribute("InheritedPropertySheets")
|
||||
return FixFilenames(vsprops.split(";"), os.path.dirname(filename))
|
||||
return []
|
||||
|
||||
|
||||
def SeekToNode(node1, child2):
|
||||
# A text node does not have properties.
|
||||
if child2.nodeType == Node.TEXT_NODE:
|
||||
return None
|
||||
|
||||
# Get the name of the current node.
|
||||
current_name = child2.getAttribute("Name")
|
||||
if not current_name:
|
||||
# There is no name. We don't know how to merge.
|
||||
return None
|
||||
|
||||
# Look through all the nodes to find a match.
|
||||
for sub_node in node1.childNodes:
|
||||
if sub_node.nodeName == child2.nodeName:
|
||||
name = sub_node.getAttribute("Name")
|
||||
if name == current_name:
|
||||
return sub_node
|
||||
|
||||
# No match. We give up.
|
||||
return None
|
||||
|
||||
|
||||
def MergeAttributes(node1, node2):
|
||||
# No attributes to merge?
|
||||
if not node2.attributes:
|
||||
return
|
||||
|
||||
for (name, value2) in node2.attributes.items():
|
||||
# Don't merge the 'Name' attribute.
|
||||
if name == "Name":
|
||||
continue
|
||||
value1 = node1.getAttribute(name)
|
||||
if value1:
|
||||
# The attribute exist in the main node. If it's equal, we leave it
|
||||
# untouched, otherwise we concatenate it.
|
||||
if value1 != value2:
|
||||
node1.setAttribute(name, ";".join([value1, value2]))
|
||||
else:
|
||||
# The attribute does not exist in the main node. We append this one.
|
||||
node1.setAttribute(name, value2)
|
||||
|
||||
# If the attribute was a property sheet attributes, we remove it, since
|
||||
# they are useless.
|
||||
if name == "InheritedPropertySheets":
|
||||
node1.removeAttribute(name)
|
||||
|
||||
|
||||
def MergeProperties(node1, node2):
|
||||
MergeAttributes(node1, node2)
|
||||
for child2 in node2.childNodes:
|
||||
child1 = SeekToNode(node1, child2)
|
||||
if child1:
|
||||
MergeProperties(child1, child2)
|
||||
else:
|
||||
node1.appendChild(child2.cloneNode(True))
|
||||
|
||||
|
||||
def main(argv):
|
||||
"""Main function of this vcproj prettifier."""
|
||||
global ARGUMENTS
|
||||
ARGUMENTS = argv
|
||||
|
||||
# check if we have exactly 1 parameter.
|
||||
if len(argv) < 2:
|
||||
print(
|
||||
'Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
|
||||
"[key2=value2]" % argv[0]
|
||||
)
|
||||
return 1
|
||||
|
||||
# Parse the keys
|
||||
for i in range(2, len(argv)):
|
||||
(key, value) = argv[i].split("=")
|
||||
REPLACEMENTS[key] = value
|
||||
|
||||
# Open the vcproj and parse the xml.
|
||||
dom = parse(argv[1])
|
||||
|
||||
# First thing we need to do is find the Configuration Node and merge them
|
||||
# with the vsprops they include.
|
||||
for configuration_node in GetConfiguationNodes(dom.documentElement):
|
||||
# Get the property sheets associated with this configuration.
|
||||
vsprops = configuration_node.getAttribute("InheritedPropertySheets")
|
||||
|
||||
# Fix the filenames to be absolute.
|
||||
vsprops_list = FixFilenames(
|
||||
vsprops.strip().split(";"), os.path.dirname(argv[1])
|
||||
)
|
||||
|
||||
# Extend the list of vsprops with all vsprops contained in the current
|
||||
# vsprops.
|
||||
for current_vsprops in vsprops_list:
|
||||
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
|
||||
|
||||
# Now that we have all the vsprops, we need to merge them.
|
||||
for current_vsprops in vsprops_list:
|
||||
MergeProperties(configuration_node, parse(current_vsprops).documentElement)
|
||||
|
||||
# Now that everything is merged, we need to cleanup the xml.
|
||||
CleanupVcproj(dom.documentElement)
|
||||
|
||||
# Finally, we use the prett xml function to print the vcproj back to the
|
||||
# user.
|
||||
# print dom.toprettyxml(newl="\n")
|
||||
PrettyPrintNode(dom.documentElement)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
Reference in New Issue
Block a user