#!/usr/bin/perl # Run perldoc on this file for documentation. # For the benefit of HTML viewers: #
$|++; my %data; my %transient; my %externalized_functions; my %datatypes; my %locations; # Maps eval-numbers to attribute names sub meta::define_form { my ($namespace, $delegate) = @_; $datatypes{$namespace} = $delegate; *{"meta::${namespace}::implementation"} = $delegate; *{"meta::$namespace"} = sub { my ($name, $value, %options) = @_; chomp $value; $data{"${namespace}::$name"} = $value unless $options{no_binding}; $delegate->($name, $value) unless $options{no_delegate}}} sub meta::eval_in { my ($what, $where) = @_; # Obtain next eval-number and alias it to the designated location @locations{eval('__FILE__') =~ /\(eval (\d+)\)/} = ($where); my $result = eval $what; $@ =~ s/\(eval \d+\)/$where/ if $@; warn $@ if $@; $result} meta::define_form 'meta', sub { my ($name, $value) = @_; meta::eval_in($value, "meta::$name")}; meta::meta('configure', <<'__'); # A function to configure transients. Transients can be used to store any number of # different things, but one of the more common usages is type descriptors. sub meta::configure { my ($datatype, %options) = @_; $transient{$_}{$datatype} = $options{$_} for keys %options; } __ meta::meta('externalize', <<'__'); # Function externalization. Data types should call this method when defining a function # that has an external interface. sub meta::externalize { my ($name, $attribute, $implementation) = @_; $externalized_functions{$name} = $attribute; *{"::$name"} = $implementation || $attribute; } __ meta::meta('functor::editable', <<'__'); # An editable type. This creates a type whose default action is to open an editor # on whichever value is mentioned. This can be changed using different flags. sub meta::functor::editable { my ($typename, %options) = @_; meta::configure $typename, %options; meta::define_form $typename, sub { my ($name, $value) = @_; $options{on_bind} && &{$options{on_bind}}($name, $value); meta::externalize $options{prefix} . $name, "${typename}::$name", sub { my $attribute = "${typename}::$name"; my ($command, @new_value) = @_; return &{$options{default}}(retrieve($attribute)) if ref $options{default} eq 'CODE' and not defined $command; return edit($attribute) if $command eq 'edit' or $options{default} eq 'edit' and not defined $command; return associate($attribute, @new_value ? join(' ', @new_value) : join('', )) if $command eq '=' or $command eq 'import' or $options{default} eq 'import' and not defined $command; return retrieve($attribute)}}} __ meta::meta('type::alias', <<'__'); meta::configure 'alias', inherit => 0; meta::define_form 'alias', sub { my ($name, $value) = @_; meta::externalize $name, "alias::$name", sub { # Can't pre-tokenize because shell::tokenize doesn't exist until the library:: # namespace has been evaluated (which will be after alias::). shell::run(shell::tokenize($value), shell::tokenize(@_)); }; }; __ meta::meta('type::bootstrap', <<'__'); # Bootstrap attributes don't get executed. The reason for this is that because # they are serialized directly into the header of the file (and later duplicated # as regular data attributes), they will have already been executed when the # file is loaded. meta::configure 'bootstrap', extension => '.pl', inherit => 1; meta::define_form 'bootstrap', sub {}; __ meta::meta('type::cache', <<'__'); meta::configure 'cache', inherit => 0; meta::define_form 'cache', \&meta::bootstrap::implementation; __ meta::meta('type::cached_dependency', <<'__'); meta::configure 'cached_dependency', inherit => 0, extension => ''; meta::define_form 'cached_dependency', \&meta::bootstrap::implementation; __ meta::meta('type::configuration', <<'__'); meta::functor::editable 'configuration', inherit => 0, extension => '.conf', default => sub { # Any lines starting with #, with or without leading whitespace, are treated as comments. # Comments are not parsed in option text; that is, you could specify an option that contained # a # and the # and following text would be considered part of that option. my ($data) = @_; my @options = grep /:/o && ! /^\h*#/o && ! /^\h*$/o, split(/\v+/o, $data); s/^\h+//o for @options; my @key_values = map split(/\s*:\s*/o, $_, 2), @options; $key_values[$_ << 1] and $key_values[$_ << 1] =~ s/\s/_/go for 0 .. @key_values >> 1; $key_values[$_ << 1] and $key_values[$_ << 1] = lc $key_values[$_ << 1] for 0 .. @key_values >> 1; @key_values; }; __ meta::meta('type::data', 'meta::functor::editable \'data\', extension => \'\', inherit => 0, default => \'cat\';'); meta::meta('type::function', <<'__'); meta::configure 'function', extension => '.pl', inherit => 1; meta::define_form 'function', sub { my ($name, $value) = @_; meta::externalize $name, "function::$name", meta::eval_in("sub {\n$value\n}", "function::$name"); }; __ meta::meta('type::hook', <<'__'); meta::configure 'hook', extension => '.pl', inherit => 0; meta::define_form 'hook', sub { my ($name, $value) = @_; *{"hook::$name"} = meta::eval_in("sub {\n$value\n}", "hook::$name"); }; __ meta::meta('type::inc', <<'__'); meta::configure 'inc', inherit => 1, extension => '.pl'; meta::define_form 'inc', sub { use File::Path 'mkpath'; use File::Basename qw/basename dirname/; my ($name, $value) = @_; my $tmpdir = basename($0) . '-' . $$; my $filename = "/tmp/$tmpdir/$name"; push @INC, "/tmp/$tmpdir" unless grep /^\/tmp\/$tmpdir$/, @INC; mkpath(dirname($filename)); unless (-e $filename) { open my $fh, '>', $filename; print $fh $value; close $fh; } }; __ meta::meta('type::internal_function', <<'__'); meta::configure 'internal_function', extension => '.pl', inherit => 1; meta::define_form 'internal_function', sub { my ($name, $value) = @_; *{$name} = meta::eval_in("sub {\n$value\n}", "internal_function::$name"); }; __ meta::meta('type::js', 'meta::functor::editable \'js\', extension => \'.js\', inherit => 1;'); meta::meta('type::library', <<'__'); meta::configure 'library', extension => '.pl', inherit => 1; meta::define_form 'library', sub { my ($name, $value) = @_; meta::eval_in($value, "library::$name"); }; __ meta::meta('type::message_color', <<'__'); meta::configure 'message_color', extension => '', inherit => 1; meta::define_form 'message_color', sub { my ($name, $value) = @_; terminal::color($name, $value); }; __ meta::meta('type::meta', <<'__'); # This doesn't define a new type. It customizes the existing 'meta' type # defined in bootstrap::initialization. Note that horrible things will # happen if you redefine it using the editable functor. meta::configure 'meta', extension => '.pl', inherit => 1; __ meta::meta('type::note', 'meta::functor::editable \'note\', extension => \'.sdoc\', inherit => 0, default => \'edit\';'); meta::meta('type::parent', <<'__'); meta::define_form 'parent', \&meta::bootstrap::implementation; meta::configure 'parent', extension => '', inherit => 1; __ meta::meta('type::retriever', <<'__'); meta::configure 'retriever', extension => '.pl', inherit => 1; meta::define_form 'retriever', sub { my ($name, $value) = @_; $transient{retrievers}{$name} = meta::eval_in("sub {\n$value\n}", "retriever::$name"); }; __ meta::meta('type::sdoc', <<'__'); # A meta-type for other types. So retrieve('js::main') will work if you have # the attribute 'sdoc::js::main'. The filename will be main.js.sdoc. meta::functor::editable 'sdoc', inherit => 1, extension => sub { extension_for(attribute($_[0])) . '.sdoc'; }; __ meta::meta('type::state', <<'__'); # Allows temporary or long-term storage of states. Nothing particularly insightful # is done about compression, so storing alternative states will cause a large # increase in size. Also, states don't contain other states -- otherwise the size # increase would be exponential. # States are created with the save-state function. meta::configure 'state', inherit => 0, extension => '.pl'; meta::define_form 'state', \&meta::bootstrap::implementation; __ meta::meta('type::template', <<'__'); meta::configure 'template', extension => '.pl', inherit => 1; meta::define_form 'template', sub { my ($name, $value) = @_; meta::externalize "template::$name", "template::$name", meta::eval_in("sub {\n$value\n}", "template::$name"); }; __ meta::meta('type::watch', 'meta::functor::editable \'watch\', prefix => \'watch::\', inherit => 1, extension => \'.pl\', default => \'cat\';'); meta::alias('cloc', 'loc js::core/caterwaul\\.'); meta::alias('e', 'edit sdoc::js::core/caterwaul'); meta::alias('ec', 'edit sdoc::js::core/caterwaul.compiler'); meta::alias('eg', 'edit sdoc::js::core/caterwaul.global'); meta::alias('ei', 'edit sdoc::js::core/caterwaul.init'); meta::alias('em', 'edit sdoc::js::core/caterwaul.macroexpander'); meta::alias('emo', 'edit sdoc::js::core/caterwaul.module'); meta::alias('eo', 'edit sdoc::js::core/caterwaul.composition'); meta::alias('ep', 'edit sdoc::js::core/caterwaul.parser'); meta::alias('epd', 'edit sdoc::js::core/caterwaul.parser-data'); meta::alias('eps', 'edit sdoc::js::core/caterwaul.precompile-support'); meta::alias('er', 'edit sdoc::js::core/caterwaul.precompile'); meta::alias('et', 'edit sdoc::js::core/caterwaul.tree'); meta::alias('eu', 'edit sdoc::js::core/caterwaul.utilities'); meta::alias('lsc', 'ls -a sdoc::js::core/'); meta::alias('lse', 'ls -a sdoc::js::extensions/'); meta::alias('lst', 'ls -a sdoc::js::.*test/.*'); meta::alias('me', 'macroexpand'); meta::alias('meopt', 'macroexpander-optimization'); meta::alias('rloc', 'loc js::(?!.*test|.*format|unit|minify|macroexpand|precompile|modules|core/debug)'); meta::bootstrap('html', <<'__'); __ meta::bootstrap('initialization', <<'__'); #!/usr/bin/perl # Run perldoc on this file for documentation. # For the benefit of HTML viewers: #
$|++; my %data; my %transient; my %externalized_functions; my %datatypes; my %locations; # Maps eval-numbers to attribute names sub meta::define_form { my ($namespace, $delegate) = @_; $datatypes{$namespace} = $delegate; *{"meta::${namespace}::implementation"} = $delegate; *{"meta::$namespace"} = sub { my ($name, $value, %options) = @_; chomp $value; $data{"${namespace}::$name"} = $value unless $options{no_binding}; $delegate->($name, $value) unless $options{no_delegate}}} sub meta::eval_in { my ($what, $where) = @_; # Obtain next eval-number and alias it to the designated location @locations{eval('__FILE__') =~ /\(eval (\d+)\)/} = ($where); my $result = eval $what; $@ =~ s/\(eval \d+\)/$where/ if $@; warn $@ if $@; $result} meta::define_form 'meta', sub { my ($name, $value) = @_; meta::eval_in($value, "meta::$name")}; __ meta::bootstrap('perldoc', <<'__'); =head1 Self-modifying Perl script =head2 Original implementation by Spencer Tipping L The prototype for this script is licensed under the terms of the MIT source code license. However, this script in particular may be under different licensing terms. To find out how this script is licensed, please contact whoever sent it to you. Alternatively, you may run it with the 'license' argument if they have specified a license that way. You should not edit this file directly. For information about how it was constructed, go to L. For quick usage guidelines, run this script with the 'usage' argument. =cut __ meta::cache('parent-identification', <<'__'); /home/spencertipping/bin/configuration aa772900bb5b925cb84346bd72a4249d /home/spencertipping/bin/node-base da62d84a9e81832f089520c172982c1a /home/spencertipping/bin/object 99aeabc9ec7fe80b1b39f5e53dc7e49e /home/spencertipping/bin/repository 05bc3036c343fdb8aec5b0be12a9b19e /home/spencertipping/conjectures/perl-objects/sdoc a1e8480e579614c01dabeecf0f963bcc notes a9e5975593ed5d90d943ad98405c71e5 object 99aeabc9ec7fe80b1b39f5e53dc7e49e preprocessor 70dae4b46eb4e06798ec6f38d17d4c7b __ meta::configuration('dependencies', <<'__'); # Named dependencies: #caterwaul.all.js: http://spencertipping.com/caterwaul/caterwaul.all.min.js #montenegro.server.js: http://spencertipping.com/montenegro/montenegro.server.js __ meta::data('author', 'Spencer Tipping'); meta::data('default-action', 'shell'); meta::data('edit::no-save', '1'); meta::data('libraries', <<'__'); # URLs of libraries to be downloaded into the lib/ directory. http://spencertipping.com/caterwaul/caterwaul.all.js http://spencertipping.com/montenegro/montenegro.server.js __ meta::data('license', <<'__'); MIT License Copyright (c) 2010 Spencer Tipping Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __ meta::data('main', 'server.js'); meta::data('name', 'node-base'); meta::data('permanent-identity', '97d938428ee6c4d2a505f6f35bad0906'); meta::data('quiet', '1'); meta::data('watching', '1'); meta::function('alias', <<'__'); my ($name, @stuff) = @_; return ls('-a', '^alias::') unless defined $name; @stuff ? around_hook('alias', @_, sub {associate("alias::$name", join ' ', @stuff)}) : retrieve("alias::$name") || "Undefined alias $name"; __ meta::function('b', <<'__'); my @extensions = select_keys('--criteria' => "sdoc::js::extensions/(.*/)?$_[0]"); edit($extensions[0]); __ meta::function('cat', 'join "\\n", retrieve(@_);'); meta::function('cc', <<'__'); # Stashes a quick one-line continuation. (Used to remind me what I was doing.) @_ ? associate('data::current-continuation', hook('set-cc', join(' ', @_))) : retrieve('data::current-continuation'); __ meta::function('ccc', 'rm(\'data::current-continuation\');'); meta::function('child', <<'__'); around_hook('child', @_, sub { my ($child_name) = @_; clone($child_name); enable(); qx($child_name update-from $0 -n); disable()}); __ meta::function('cloc', 'loc(\'modules/caterwaul\\.(?!format)[^/]+$\', \'caterwaul$\')'); meta::function('clone', <<'__'); for (grep length, @_) { around_hook('clone', $_, sub { hypothetically(sub { rm('data::permanent-identity'); file::write($_, serialize(), noclobber => 1); chmod(0700, $_)})})} __ meta::function('cp', <<'__'); my $from = shift @_; my $value = retrieve($from); associate($_, $value) for @_; __ meta::function('create', <<'__'); my ($name, $value) = @_; around_hook('create', $name, $value, sub { return edit($name) if exists $data{$name}; associate($name, defined $value ? $value : ''); edit($name) unless defined $value}); __ meta::function('ct', 'create("sdoc::js::test/$_[0]");'); meta::function('current-state', <<'__'); my @valid_keys = grep ! /^state::/, sort keys %data; my @ordered_keys = (grep(/^meta::/, @valid_keys), grep(! /^meta::/, @valid_keys)); join "\n", map serialize_single($_), @ordered_keys; __ meta::function('disable', 'hook(\'disable\', chmod_self(sub {$_[0] & 0666}));'); meta::function('dupdate', <<'__'); # Update the repository based on the dependencies it lists. use LWP::Simple (); rm(grep /^cached_dependency::/, keys %data); my %dependencies = dependencies(); for (keys %dependencies) { terminal::info("Retrieving $dependencies{$_} as $_"); associate("cached_dependency::$_", LWP::Simple::get($dependencies{$_}))} __ meta::function('edit', <<'__'); my ($name, %options) = @_; my $extension = extension_for($name); die "$name is virtual or does not exist" unless exists $data{$name}; die "$name is inherited; use 'edit $name -f' to edit anyway" unless is($name, '-u') || is($name, '-d') || exists $options{'-f'}; around_hook('edit', @_, sub { associate($name, invoke_editor_on($data{$name} || "# Attribute $name", %options, attribute => $name, extension => $extension), execute => $name !~ /^bootstrap::/)}); save() unless $data{'data::edit::no-save'}; ''; __ meta::function('enable', 'hook(\'enable\', chmod_self(sub {$_[0] | $_[0] >> 2}));'); meta::function('export', <<'__'); # Exports data into a text file. # export attr1 attr2 attr3 ... file.txt my $name = pop @_; @_ or die 'Expected filename'; file::write($name, join "\n", retrieve(@_)); __ meta::function('extern', '&{$_[0]}(retrieve(@_[1 .. $#_]));'); meta::function('gjs', <<'__'); # Runs GJS on a collection of source files and arguments. The format is: # gjs([@source_strings], @process_args); my ($sources, @args) = @_; with_exported(@$sources, sub { hook('before-gjs', $_[0], @args); sh('gjs', $_[0], @args); hook('after-gjs', $_[0], @args); }); __ meta::function('grep', <<'__'); # Looks through attributes for a pattern. Usage is grep pattern [options], where # [options] is the format as provided to select_keys. my ($pattern, @args) = @_; my ($options, @criteria) = separate_options(@args); my @attributes = select_keys(%$options, '--criteria' => join('|', @criteria)); $pattern = qr/$pattern/; my @m_attributes; my @m_line_numbers; my @m_lines; for my $k (@attributes) { next unless length $k; my @lines = split /\n/, retrieve($k); for (0 .. $#lines) { next unless $lines[$_] =~ $pattern; push @m_attributes, $k; push @m_line_numbers, $_ + 1; push @m_lines, '' . ($lines[$_] // '')}} unless ($$options{'-C'}) { s/($pattern)/\033[1;31m\1\033[0;0m/g for @m_lines; s/^/\033[1;34m/o for @m_attributes; s/^/\033[1;32m/o && s/$/\033[0;0m/o for @m_line_numbers} table_display([@m_attributes], [@m_line_numbers], [@m_lines]); __ meta::function('hash', 'fast_hash(@_);'); meta::function('hook', <<'__'); my ($hook, @args) = @_; $transient{active_hooks}{$hook} = 1; dangerous('', sub {&$_(@args)}) for grep /^hook::${hook}::/, sort keys %data; @args; __ meta::function('hooks', 'join "\\n", sort keys %{$transient{active_hooks}};'); meta::function('identity', 'retrieve(\'data::permanent-identity\') || associate(\'data::permanent-identity\', fast_hash(rand() . name() . serialize()));'); meta::function('import', <<'__'); my $name = pop @_; associate($name, @_ ? join('', map(file::read($_), @_)) : join('', )); __ meta::function('import-bundle', <<'__'); eval join '', ; die $@ if $@; __ meta::function('initial-state', '$transient{initial};'); meta::function('is', <<'__'); my ($attribute, @criteria) = @_; my ($options, @stuff) = separate_options(@criteria); attribute_is($attribute, %$options); __ meta::function('line', <<'__'); # Prints a line with some context. This is useful when a test fails. my ($line, $part) = @_; my @lines = split /\n/, $part eq '-a' ? retrieve('pp::js::caterwaul.all') : retrieve('pp::js::caterwaul'); for ($line - 5 .. $line + 5) { print "\033[1;32m" if $_ == $line; printf "%04d: %s\n", $_, $lines[$_ - 1]; print "\033[0;0m" if $_ == $line; } __ meta::function('load-state', <<'__'); around_hook('load-state', @_, sub { my ($state_name) = @_; my $state = retrieve("state::$state_name"); terminal::state('saving current state into _...'); &{'save-state'}('_'); delete $data{$_} for grep ! /^state::/, keys %data; %externalized_functions = (); terminal::state("restoring state $state_name..."); meta::eval_in($state, "state::$state_name"); terminal::error(hook('load-state-failed', $@)) if $@; reload(); verify()}); __ meta::function('load-time', <<'__'); sub load { my ($runtime, $file) = @_; terminal::info("$runtime: loading $file"); bench(sub {sh("$runtime $file > /dev/null 2>&1")})} load 'gjs', 'build/caterwaul.core.js'; load 'gjs', 'build/caterwaul.js'; load 'gjs', 'build/caterwaul.pre.js'; load 'node', 'build/caterwaul.core.js'; load 'node', 'build/caterwaul.js'; load 'node', 'build/caterwaul.pre.js'; __ meta::function('loc', <<'__'); # Counts SLOC, whitespace, and total LOC in the codebase. hook('before-loc', @_); my $criteria = join '|', @_; my @attributes = grep s/^sdoc:://, select_keys('--criteria' => $criteria); my $tcomments = 0; my $twhitespace = 0; my $tsource = 0; my $line = sub { my ($source, $whitespace, $comments, $name) = @_; $source ||= 1; # Prevent divide-by-zero errors sprintf "%5d total, %4d SLOC, %5d[%4d%%] whitespace, %5d[%4d%%] comment [%s]", $source + $whitespace + $comments, $source, $whitespace, int($whitespace / $source * 100), $comments, int($comments / $source * 100), $name}; my $loc = sub { my @lines = map split(/\n/, $_), retrieve($_[0]); $tcomments += (my $comments = grep /^\s*\/\// || /^\s*#/, @lines); $twhitespace += (my $whitespace = grep /^\s*$/, @lines); $tsource += (my $source = @lines - $comments - $whitespace); &$line($source, $whitespace, $comments, $_[0])}; terminal::info(map &$loc($_), @attributes); terminal::info(&$line($tsource, $twhitespace, $tcomments, 'total')); hook('after-loc', @_); __ meta::function('lock', 'hook(\'lock\', chmod_self(sub {$_[0] & 0555}));'); meta::function('ls', <<'__'); my ($options, @criteria) = separate_options(@_); my ($external, $shadows, $sizes, $flags, $long, $hashes, $parent_hashes) = @$options{qw(-e -s -z -f -l -h -p)}; $sizes = $flags = $hashes = $parent_hashes = 1 if $long; return table_display([grep ! exists $data{$externalized_functions{$_}}, sort keys %externalized_functions]) if $shadows; my $criteria = join('|', @criteria); my @definitions = select_keys('--criteria' => $criteria, %$options); my %inverses = map {$externalized_functions{$_} => $_} keys %externalized_functions; my @externals = map $inverses{$_}, grep length, @definitions; my @internals = grep length $inverses{$_}, @definitions; my @sizes = map sprintf('%6d %6d', length(serialize_single($_)), length(retrieve($_))), @{$external ? \@internals : \@definitions} if $sizes; my @flags = map {my $k = $_; join '', map(is($k, "-$_") ? $_ : '-', qw(d i m u))} @definitions if $flags; my @hashes = map fast_hash(retrieve($_)), @definitions if $hashes; my %inherited = parent_attributes(grep /^parent::/o, keys %data) if $parent_hashes; my @parent_hashes = map $inherited{$_} || '-', @definitions if $parent_hashes; join "\n", map strip($_), split /\n/, table_display($external ? [grep length, @externals] : [@definitions], $sizes ? ([@sizes]) : (), $flags ? ([@flags]) : (), $hashes ? ([@hashes]) : (), $parent_hashes ? ([@parent_hashes]) : ()); __ meta::function('ls-a', 'ls(\'-ad\', @_);'); meta::function('m', <<'__'); my @modules = select_keys('--criteria' => "sdoc::js::modules/caterwaul.$_[0]"); edit($modules[0]); __ meta::function('mv', <<'__'); my ($from, $to) = @_; die "'$from' does not exist" unless exists $data{$from}; associate($to, retrieve($from)); rm($from); __ meta::function('name', <<'__'); my $name = $0; $name =~ s/^.*\///; $name; __ meta::function('node', <<'__'); # Runs node on a collection of source files and arguments. The format is: # node([@source_strings], @process_args); my ($sources, @args) = @_; with_exported(@$sources, sub { hook('before-node', $_[0], @args); sh('node', $_[0], @args); hook('after-node', $_[0], @args); }); __ meta::function('node-custom', <<'__'); # Runs node on a collection of source files and arguments. The format is: # &{'node-custom'}([@source_strings], [@node_arguments], @process_args); my ($sources, $node_args, @args) = @_; with_exported(@$sources, sub { hook('before-node-custom', @$node_args, $_[0], @args); sh('node', @$node_args, $_[0], @args); hook('after-node-custom', @$node_args, $_[0], @args); }); __ meta::function('note', <<'__'); # Creates a note with a given name, useful for jotting things down. create("note::$_[0]"); __ meta::function('notes', 'ls(\'-a\', \'^note::\');'); meta::function('parents', 'join "\\n", grep s/^parent:://o, sort keys %data;'); meta::function('perl', <<'__'); my $result = eval(join ' ', @_); $@ ? terminal::error($@) : $result; __ meta::function('precompile', <<'__'); terminal::info("precompiling $_[0]"); node([qw|pp::js::caterwaul.all js::precompile|], @_); __ meta::function('preprocess', <<'__'); # Implements a simple preprocessing language. # Syntax follows two forms. One is the 'line form', which gives you a way to specify arguments inline # but not spanning multiple lines. The other is 'block form', which gives you access to both one-line # arguments and a block of lines. The line parameters are passed in verbatim, and the block is # indentation-adjusted and then passed in as a second parameter. (Indentation is adjusted to align # with the name of the command.) # # Here are the forms: # # - line arguments to function # # - block line arguments << eof # block contents # block contents # ... # - eof my ($string, %options) = @_; my $expansions = 0; my $old_string = ''; my $limit = $options{expansion_limit} || 100; my @pieces = (); sub adjust_spaces { my ($spaces, $string) = @_; $string =~ s/^$spaces //mg; chomp $string; $string; } while ($old_string ne $string and $expansions++ < $limit) { $old_string = $string; while ((my @pieces = split /(^(\h*)-\h \S+ \h* \V* <<\h*(\w+)$ \n .*? ^\2-\h\3$)/xms, $string) > 1 and $expansions++ < $limit) { $pieces[1 + ($_ << 2)] =~ /^ (\h*)-\h(\S+)\h*(\V*)<<\h*(\w+)$ \n(.*?) ^\1-\h\4 $/xms && $externalized_functions{"template::$2"} and $pieces[1 + ($_ << 2)] = &{"template::$2"}($3, adjust_spaces($1, $5)) for 0 .. $#pieces / 4; @pieces[2 + ($_ << 2), 3 + ($_ << 2)] = '' for 0 .. $#pieces / 4; $string = join '', @pieces; } if ((my @pieces = split /^(\h*-\h \S+ \h* .*)$/xom, $string) > 1) { $pieces[1 + ($_ << 1)] =~ /^ \h*-\h(\S+)\h*(.*)$/xom && $externalized_functions{"template::$1"} and $pieces[1 + ($_ << 1)] = &{"template::$1"}($2) for 0 .. $#pieces >> 1; $string = join '', @pieces; } } $string; __ meta::function('reload', 'around_hook(\'reload\', sub {execute($_) for grep ! /^bootstrap::/, keys %data});'); meta::function('render', <<'__'); hook('before-render'); hook('after-render'); __ meta::function('repl', 'node([\'pp::js::caterwaul.all\', \'id::require("repl").start("caterwaul> ").context.caterwaul = caterwaul\']);'); meta::function('repl-configuration', <<'__'); node(['id::var unique = function () {return 1};', 'pp::js::core/caterwaul.utilities', 'pp::js::core/caterwaul.module', 'pp::js::core/caterwaul.configuration', 'id::require("repl").start("caterwaul> ").context.module = module']); __ meta::function('repl-core', 'node([\'pp::js::caterwaul\', \'id::require("repl").start("caterwaul> ").context.caterwaul = caterwaul\']);'); meta::function('repl-module', 'node([\'id::var unique = function () {return 1};\', \'pp::js::core/caterwaul.utilities\', \'pp::js::core/caterwaul.module\', \'id::require("repl").start("caterwaul> ").context.module = module\']);'); meta::function('replc', 'node([\'pp::js::core/caterwaul\', \'id::require("repl").start("caterwaul core> ").context.caterwaul = caterwaul\']);'); meta::function('replp', 'node([\'pp::js::modules/caterwaul.all.pre\', \'id::require("repl").start("caterwaul> ").context.caterwaul = caterwaul\']);'); meta::function('rm', <<'__'); around_hook('rm', @_, sub { exists $data{$_} or terminal::warning("$_ does not exist") for @_; delete @data{@_}}); __ meta::function('run-forever', <<'__'); # Runs your application indefinitely, restarting each time it fails. # There's a one-second delay between restarts to prevent a tight loop. # Takes one argument, which is the function to run forever. my ($f, @args) = @_; hook('bin/before-run-forever'); &$f(@args) while sleep 0.1 && ! -f 'stop'; hook('bin/after-run-forever'); __ meta::function('save', 'around_hook(\'save\', sub {dangerous(\'\', sub {file::write($0, serialize()); $transient{initial} = state()}) if verify()});'); meta::function('save-state', <<'__'); # Creates a named copy of the current state and stores it. my ($state_name) = @_; around_hook('save-state', $state_name, sub { associate("state::$state_name", &{'current-state'}(), execute => 1)}); __ meta::function('sdoc', <<'__'); # Applies SDoc processing to a file or attribute. Takes the file or attribute # name as the first argument and returns the processed text. my %comments_for_extension = qw|c /*,*/ cpp // cc // h // java // py # rb # pl # pm # ml (*,*) js // hs -- sh # lisp ;;; lsp ;;; s ; scm ;;; sc ;;; as // html mli (*,*) cs // vim " elisp ; bas ' ada -- asm ; awk # bc # boo # tex % fss (*,*) erl % scala // hx // io // j NB. lua -- n // m % php // sql -- pov // pro % r # self "," tcl # texi @c tk # csh # vala // vbs ' v /*,*/ vhdl -- ss ;;; haml -# sass /*,*/ scss /*,*/ css /*,*/ fig /|; # No extension suggests a shebang line, which generally requires # to denote a comment. $comments_for_extension{''} = '#'; my $generated_string = 'Generated by SDoc'; sub is_code {map /^\s*[^A-Z\|\s]/o, @_} sub is_blank {map /^\n/o, @_} sub comment {my ($text, $s, $e) = @_; join "\n", map("$s $_$e", split /\n/, $text)} sub paragraphs {map split(/(\n{2,})/, $_), @_} my ($filename) = @_; # Two possibilities here. One is that the filename is an attribute, in which case # we want to look up the extension in the transients table. The other is that # it's a real filename. my ($extension) = $filename =~ /\.sdoc$/io ? $filename =~ /\.(\w+)\.sdoc$/igo : $filename =~ /\.(\w+)$/igo; my ($other_extension) = extension_for(attribute($filename)); $other_extension =~ s/^\.//o; my ($start, $end) = split /,/o, $comments_for_extension{lc($other_extension || $extension)}; join '', map(is_code($_) || is_blank($_) ? ($_ =~ /^\s*c\n(.*)$/so ? $1 : $_) : comment($_, $start, $end), paragraphs retrieve($filename)), "\n$start $generated_string $end\n"; __ meta::function('sdoc-html', <<'__'); # Converts SDoc to logically-structured HTML. Sections end up being nested, # and code sections and examples are marked as such. For instance, here is some # sample output: #
#

Foo

#

This is a paragraph...

#

This is another paragraph...

#
int main () {return 0;}
#
int main () {return 0} // Won't compile
#
#

Bar

# ... #
#
# It is generally good about escaping things that would interfere with HTML, # but within text paragraphs it lets you write literal HTML. The heuristic is # that known tags that are reasonably well-formed are allowed, but unknown ones # are escaped. my ($attribute) = @_; my @paragraphs = split /\n(?:\s*\n)+/, retrieve($attribute); my $known_tags = join '|', qw[html head body meta script style link title div a span input button textarea option select form label iframe blockquote code caption table tbody tr td th thead tfoot img h1 h2 h3 h4 h5 h6 li ol ul noscript p pre samp sub sup var canvas audio video]; my $section_level = 0; my @markup; my $indent = sub {' ' x ($_[0] || $section_level)}; my $unindent = sub {my $spaces = ' ' x ($section_level - 1); s/^$spaces//gm}; my $escape_all = sub {s/&/&/g; s//>/g}; my $escape_some = sub {s/&/&/g; s/<(?!\/|($known_tags)[^>]*>.*<\/\1>)/</g}; my $code = sub {&$escape_all(); &$unindent(); push @markup, &$indent() . "
$_
"}; my $quoted = sub {&$escape_all(); &$unindent(); s/^\|\s?//; push @markup, &$indent() . "
$_
"}; my $paragraph = sub {&$escape_some(); push @markup, &$indent() . "

$_

"}; my $section = sub {my $h = $_[0] > 6 ? 6 : $_[0]; push @markup, &$indent($_[0] - 1) . "
", &$indent($_[0]) . "$2"}; my $close_section = sub {push @markup, &$indent($_[0]) . "
"}; my $title = sub { my $indentation = (length($1) >> 1) + 1; &$close_section($section_level) while $section_level-- >= $indentation; &$section($indentation); $section_level = $indentation; }; for (@paragraphs) { &$code(), next unless /^\h*[A-Z|]/; &$quoted(), next if /^\h*\|/; &$title(), s/^.*\n// if /^(\s*)(\S.*)\.\n([^\n]+)/ and length("$1$2") - 10 < length($3); &$paragraph(); } &$close_section($section_level) while $section_level--; join "\n", @markup; __ meta::function('sdocp', <<'__'); # Renders an attribute as SDocP. This logic was taken directly from the sdoc script. my $attribute = retrieve($_[0]); sub escape {my @results = map {s/\\/\\\\/go; s/\n/\\n/go; s/'/\\'/go; $_} @_; wantarray ? @results : $results[0]} "sdocp('" . escape($_[0]) . "', '" . escape($attribute) . "');"; __ meta::function('serialize', <<'__'); my ($options, @criteria) = separate_options(@_); my $partial = $$options{'-p'}; my $criteria = join '|', @criteria; my @attributes = map serialize_single($_), select_keys(%$options, '-m' => 1, '--criteria' => $criteria), select_keys(%$options, '-M' => 1, '--criteria' => $criteria); my @final_array = @{$partial ? \@attributes : [retrieve('bootstrap::initialization'), @attributes, 'internal::main();', '', '__END__']}; join "\n", @final_array; __ meta::function('serialize_single', <<'__'); # Serializes a single attribute and optimizes for content. my $name = $_[0] || $_; my $contents = $data{$name}; my $meta_function = 'meta::' . namespace($name); my $invocation = attribute($name); my $escaped = $contents; $escaped =~ s/\\/\\\\/go; $escaped =~ s/'/\\'/go; return "$meta_function('$invocation', '$escaped');" unless $escaped =~ /\v/; my $delimiter = '__' . fast_hash($contents); my $chars = 2; ++$chars until $chars >= length($delimiter) || index("\n$contents", "\n" . substr($delimiter, 0, $chars)) == -1; $delimiter = substr($delimiter, 0, $chars); "$meta_function('$invocation', <<'$delimiter');\n$contents\n$delimiter"; __ meta::function('sh', 'system(@_);'); meta::function('shell', <<'__'); terminal::cc(retrieve('data::current-continuation')) if length $data{'data::current-continuation'}; shell::repl(); __ meta::function('size', <<'__'); my $size = 0; $size += length $data{$_} for keys %data; sprintf "% 7d % 7d", length(serialize()), $size; __ meta::function('snapshot', <<'__'); my ($name) = @_; file::write(my $finalname = temporary_name($name), serialize(), noclobber => 1); chmod 0700, $finalname; hook('snapshot', $finalname); __ meta::function('state', <<'__'); my @keys = sort keys %data; my $hash = fast_hash(fast_hash(scalar @keys) . join '|', @keys); $hash = fast_hash("$data{$_}|$hash") for @keys; $hash; __ meta::function('t', <<'__'); my @tests = select_keys('--criteria' => "sdoc::js::test/(.*\/)?$_[0]"); edit($tests[0]); __ meta::function('test', <<'__'); # Unit tests for caterwaul. # In the past this was done differently. It would run node once for each test, which involved a lot of startup overhead and took a while. Now the tests are all run in a single file, so node # gets run just once. This should be significantly faster. my ($options, @args) = separate_options(@_); my @tests = grep s/^sdoc::js::test\//pp::js::test\//, sort keys %data; render(); unless ($$options{'-p'}) { terminal::info('running ' . join(' ', @tests)); node([qw(caterwaul.js extensions/std.js extensions/dev.js), @tests]) unless $$options{'-N'}; gjs([qw(caterwaul.js extensions/std.js extensions/dev.js), @tests]) if $$options{'-a'}; } terminal::info('with precompiled caterwaul'); node([qw(caterwaul.js build/extensions/std.pre.js build/extensions/dev.pre.js), @tests]) unless $$options{'-N'}; gjs([qw(caterwaul.js build/extensions/std.pre.js build/extensions/dev.pre.js), @tests]) if $$options{'-a'}; __ meta::function('testq-pre', <<'__'); my ($options, @stuff) = separate_options(@_); my @tests = grep $_ !~ /^js::test\/lib/, grep s/^sdoc:://, select_keys('--criteria' => "^sdoc::js::.*\\.?test"); terminal::warning('running tests without rendering'); node([qw(caterwaul.all.pre.js test/lib/unit.js), @tests]) unless $$options{'-N'}; gjs([qw(caterwaul.all.pre.js test/lib/unit.js), @tests]) if $$options{'-a'}; __ meta::function('touch', 'associate($_, \'\') for @_;'); meta::function('unlock', 'hook(\'unlock\', chmod_self(sub {$_[0] | 0200}));'); meta::function('update', '&{\'update-from\'}(@_, grep s/^parent:://o, sort keys %data);'); meta::function('update-from', <<'__'); # Upgrade all attributes that aren't customized. Customization is defined when the data type is created, # and we determine it here by checking for $transient{inherit}{$type}. # Note that this assumes you trust the remote script. If you don't, then you shouldn't update from it. around_hook('update-from-invocation', separate_options(@_), sub { my ($options, @targets) = @_; my %parent_id_cache = cache('parent-identification'); my %already_seen; @targets or return; my @known_targets = grep s/^parent:://, parent_ordering(map "parent::$_", grep exists $data{"parent::$_"}, @targets); my @unknown_targets = grep ! exists $data{"parent::$_"}, @targets; @targets = (@known_targets, @unknown_targets); my $save_state = ! ($$options{'-n'} || $$options{'--no-save'}); my $no_parents = $$options{'-P'} || $$options{'--no-parent'} || $$options{'--no-parents'}; my $force = $$options{'-f'} || $$options{'--force'}; my $clobber_divergent = $$options{'-D'} || $$options{'--clobber-divergent'}; &{'save-state'}('before-update') if $save_state; for my $target (@targets) { dangerous("updating from $target", sub { around_hook('update-from', $target, sub { my $identity = $parent_id_cache{$target} ||= join '', qx($target identity); next if $already_seen{$identity}; $already_seen{$identity} = 1; my $attributes = join '', qx($target ls -ahiu); my %divergent; die "skipping unreachable $target" unless $attributes; for my $to_rm (split /\n/, retrieve("parent::$target")) { my ($name, $hash) = split(/\s+/, $to_rm); next unless exists $data{$name}; my $local_hash = fast_hash(retrieve($name)); if ($clobber_divergent or $hash eq $local_hash or ! defined $hash) {rm($name)} else {terminal::info("preserving local version of divergent attribute $name (use update -D to clobber it)"); $divergent{$name} = retrieve($name)}} associate("parent::$target", $attributes) unless $no_parents; dangerous('', sub {eval qx($target serialize -ipmu)}); dangerous('', sub {eval qx($target serialize -ipMu)}); map associate($_, $divergent{$_}), keys %divergent unless $clobber_divergent; reload()})})} cache('parent-identification', %parent_id_cache); if (verify()) {hook('update-from-succeeded', $options, @targets); terminal::info("Successfully updated. Run 'load-state before-update' to undo this change.") if $save_state} elsif ($force) {hook('update-from-failed', $options, @targets); terminal::warning('Failed to verify: at this point your object will not save properly, though backup copies will be created.', 'Run "load-state before-update" to undo the update and return to a working state.') if $save_state} else {hook('update-from-failed', $options, @targets); terminal::error('Verification failed after the upgrade was complete.'); terminal::info("$0 has been reverted to its pre-upgrade state.", "If you want to upgrade and keep the failure state, then run 'update-from $target --force'.") if $save_state; return &{'load-state'}('before-update') if $save_state}}); __ meta::function('usage', '"Usage: $0 action [arguments]\\nUnique actions (run \'$0 ls\' to see all actions):" . ls(\'-u\');'); meta::function('verify', <<'__'); file::write(my $other = $transient{temporary_filename} = temporary_name(), my $serialized_data = serialize()); chomp(my $observed = join '', qx|perl '$other' state|); unlink $other if my $result = $observed eq (my $state = state()); terminal::error("Verification failed; expected $state but got $observed from $other") unless $result; hook('after-verify', $result, observed => $observed, expected => $state); $result; __ meta::hook('after-render::base-files', <<'__'); file::write(attribute($_) . '.js', retrieve("pp::$_"), mkpath => 1) for grep s/^sdoc::js::/js::/ && /^js::(extensions|core)\//, sort keys %data; file::write('build/caterwaul.js', retrieve('pp::js::caterwaul'), mkpath => 1); file::write('build/caterwaul.node.js', retrieve('pp::js::caterwaul.node'), mkpath => 1); my @extensions = qw/std dev ui/; precompile("extensions/$_.js") for @extensions; mkdir 'build/extensions'; rename "extensions/$_.pre.js", "build/extensions/$_.pre.js" for @extensions; __ meta::hook('after-render::notes', <<'__'); # Render each note into an SDoc file. file::write('doc/notes/' . attribute($_) . '.sdoc', retrieve($_)) for grep /^note::/, keys %data; __ meta::hook('after-render::tests', 'file::write(\'build/\' . attribute($_) . \'.js\', retrieve("pp::$_"), mkpath => 1) for grep s/^sdoc::js::/js::/ && /^js::test\\//, sort keys %data;'); meta::hook('after-render::tools', 'file::write(\'build/tools/precompile.js\', retrieve(\'pp::js::tools/precompile\'), mkpath => 1);'); meta::internal_function('around_hook', <<'__'); # around_hook('hookname', @args, sub { # stuff; # }); # Invokes 'before-hookname' on @args before the sub runs, invokes the # sub on @args, then invokes 'after-hookname' on @args afterwards. # The after-hook is not invoked if the sub calls 'die' or otherwise # unwinds the stack. my $hook = shift @_; my $f = pop @_; hook("before-$hook", @_); my $result = &$f(@_); hook("after-$hook", @_); $result; __ meta::internal_function('associate', <<'__'); my ($name, $value, %options) = @_; die "Namespace does not exist" unless exists $datatypes{namespace($name)}; $data{$name} = $value; execute($name) if $options{'execute'}; $value; __ meta::internal_function('attribute', <<'__'); my ($name) = @_; $name =~ s/^[^:]*:://; $name; __ meta::internal_function('attribute_is', <<'__'); my ($a, %options) = @_; my %inherited = parent_attributes(grep /^parent::/o, sort keys %data) if grep exists $options{$_}, qw/-u -U -d -D/; my $criteria = $options{'--criteria'} || $options{'--namespace'} && "^$options{'--namespace'}::" || '.'; my %tests = ('-u' => sub {! $inherited{$a}}, '-d' => sub {$inherited{$a} && fast_hash(retrieve($a)) ne $inherited{$a}}, '-i' => sub {$transient{inherit}{namespace($a)}}, '-s' => sub {$a =~ /^state::/o}, '-m' => sub {$a =~ /^meta::/o}); return 0 unless scalar keys %tests == scalar grep ! exists $options{$_} || &{$tests{$_}}(), keys %tests; return 0 unless scalar keys %tests == scalar grep ! exists $options{uc $_} || ! &{$tests{$_}}(), keys %tests; $a =~ /$criteria/ __ meta::internal_function('bench', <<'__'); use Time::HiRes qw/gettimeofday tv_interval/; my ($f) = @_; my $start_time = [gettimeofday]; &$f(); my $duration = tv_interval($start_time); terminal::info("$duration seconds elapsed"); __ meta::internal_function('cache', <<'__'); my ($name, %pairs) = @_; if (%pairs) {associate("cache::$name", join "\n", map {$pairs{$_} =~ s/\n//g; "$_ $pairs{$_}"} sort keys %pairs)} else {map split(/\s/, $_, 2), split /\n/, retrieve("cache::$name")} __ meta::internal_function('chmod_self', <<'__'); my ($mode_function) = @_; my (undef, undef, $mode) = stat $0; chmod &$mode_function($mode), $0; __ meta::internal_function('dangerous', <<'__'); # Wraps a computation that may produce an error. my ($message, $computation) = @_; terminal::info($message) if $message; my @result = eval {&$computation()}; terminal::warning(translate_backtrace($@)), return undef if $@; wantarray ? @result : $result[0]; __ meta::internal_function('debug_trace', <<'__'); terminal::debug(join ', ', @_); wantarray ? @_ : $_[0]; __ meta::internal_function('dep', <<'__'); # A variadic function to prepend cached_dependency:: onto things. # Used like this: dep(qw/caterwaul.all.js montenegro.server.js/) map "cached_dependency::$_", @_; __ meta::internal_function('execute', <<'__'); my ($name, %options) = @_; my $namespace = namespace($name); eval {&{"meta::$namespace"}(attribute($name), retrieve($name))}; warn $@ if $@ && $options{'carp'}; __ meta::internal_function('exported', <<'__'); # Allocates a temporary file containing the concatenation of attributes you specify, # and returns the filename. The filename will be safe for deletion anytime. my $filename = temporary_name(); file::write($filename, cat(@_)); $filename; __ meta::internal_function('extension_for', <<'__'); my $extension = $transient{extension}{namespace($_[0])}; $extension = &$extension($_[0]) if ref $extension eq 'CODE'; $extension || ''; __ meta::internal_function('fast_hash', <<'__'); my ($data) = @_; my $piece_size = length($data) >> 3; my @pieces = (substr($data, $piece_size * 8) . length($data), map(substr($data, $piece_size * $_, $piece_size), 0 .. 7)); my @hashes = (fnv_hash($pieces[0])); push @hashes, fnv_hash($pieces[$_ + 1] . $hashes[$_]) for 0 .. 7; $hashes[$_] ^= $hashes[$_ + 4] >> 16 | ($hashes[$_ + 4] & 0xffff) << 16 for 0 .. 3; $hashes[0] ^= $hashes[8]; sprintf '%08x' x 4, @hashes[0 .. 3]; __ meta::internal_function('file::read', <<'__'); my $name = shift; open my($handle), "<", $name; my $result = join "", <$handle>; close $handle; $result; __ meta::internal_function('file::write', <<'__'); use File::Path 'mkpath'; use File::Basename 'dirname'; my ($name, $contents, %options) = @_; die "Choosing not to overwrite file $name" if $options{noclobber} and -f $name; mkpath(dirname($name)) if $options{mkpath}; open my($handle), $options{append} ? '>>' : '>', $name or die "Can't open $name for writing"; print $handle $contents; close $handle; __ meta::internal_function('fnv_hash', <<'__'); # A rough approximation to the Fowler-No Voll hash. It's been 32-bit vectorized # for efficiency, which may compromise its effectiveness for short strings. my ($data) = @_; my ($fnv_prime, $fnv_offset) = (16777619, 2166136261); my $hash = $fnv_offset; my $modulus = 2 ** 32; $hash = ($hash ^ ($_ & 0xffff) ^ ($_ >> 16)) * $fnv_prime % $modulus for unpack 'L*', $data . substr($data, -4) x 8; $hash; __ meta::internal_function('hypothetically', <<'__'); # Applies a temporary state and returns a serialized representation. # The original state is restored after this, regardless of whether the # temporary state was successful. my %data_backup = %data; my ($side_effect) = @_; my $return_value = eval {&$side_effect()}; %data = %data_backup; die $@ if $@; $return_value; __ meta::internal_function('internal::main', <<'__'); disable(); $SIG{'INT'} = sub {snapshot(); exit 1}; $transient{initial} = state(); chomp(my $default_action = retrieve('data::default-action')); my $function_name = shift(@ARGV) || $default_action || 'usage'; terminal::warning("unknown action: '$function_name'") and $function_name = 'usage' unless $externalized_functions{$function_name}; around_hook('main-function', $function_name, @ARGV, sub { dangerous('', sub { chomp(my $result = &$function_name(@ARGV)); print "$result\n" if $result})}); save() unless state() eq $transient{initial}; END { enable(); } __ meta::internal_function('invoke_editor_on', <<'__'); my ($data, %options) = @_; my $editor = $options{editor} || $ENV{VISUAL} || $ENV{EDITOR} || die 'Either the $VISUAL or $EDITOR environment variable should be set to a valid editor'; my $options = $options{options} || $ENV{VISUAL_OPTS} || $ENV{EDITOR_OPTS} || ''; my $attribute = $options{attribute}; $attribute =~ s/\//-/g; my $filename = temporary_name() . "-$attribute$options{extension}"; file::write($filename, $data); system("$editor $options '$filename'"); my $result = file::read($filename); unlink $filename; $result; __ meta::internal_function('is_locked', '!((stat($0))[2] & 0222);'); meta::internal_function('namespace', <<'__'); my ($name) = @_; $name =~ s/::.*$//; $name; __ meta::internal_function('parent_attributes', <<'__'); my $attributes = sub {my ($name, $value) = split /\s+/o, $_; $name => ($value || 1)}; map &$attributes(), split /\n/o, join("\n", retrieve(@_)); __ meta::internal_function('parent_ordering', <<'__'); # Topsorts the parents by dependency chain. The simplest way to do this is to # transitively compute the number of parents referred to by each parent. my @parents = @_; my %all_parents = map {$_ => 1} @parents; my %parents_of = map { my $t = $_; my %attributes = parent_attributes($_); $t => [grep /^parent::/, keys %attributes]} @parents; my %parent_count; my $parent_count; $parent_count = sub { my ($key) = @_; return $parent_count{$key} if exists $parent_count{$key}; my $count = 0; $count += $parent_count->($_) + exists $data{$_} for @{$parents_of{$key}}; $parent_count{$key} = $count}; my %inverses; push @{$inverses{$parent_count->($_)} ||= []}, $_ for @parents; grep exists $all_parents{$_}, map @{$inverses{$_}}, sort keys %inverses; __ meta::internal_function('retrieve', <<'__'); my @results = map defined $data{$_} ? $data{$_} : retrieve_with_hooks($_), @_; wantarray ? @results : $results[0]; __ meta::internal_function('retrieve_with_hooks', <<'__'); # Uses the hooks defined in $transient{retrievers}, and returns undef if none work. my ($attribute) = @_; my $result = undef; defined($result = &$_($attribute)) and return $result for map $transient{retrievers}{$_}, sort keys %{$transient{retrievers}}; return undef; __ meta::internal_function('select_keys', <<'__'); my %options = @_; grep attribute_is($_, %options), sort keys %data; __ meta::internal_function('separate_options', <<'__'); # Things with one dash are short-form options, two dashes are long-form. # Characters after short-form are combined; so -auv4 becomes -a -u -v -4. # Also finds equivalences; so --foo=bar separates into $$options{'--foo'} eq 'bar'. # Stops processing at the -- option, and removes it. Everything after that # is considered to be an 'other' argument. # The only form not supported by this function is the short-form with argument. # To pass keyed arguments, you need to use long-form options. my @parseable; push @parseable, shift @_ until ! @_ or $_[0] eq '--'; my @singles = grep /^-[^-]/, @parseable; my @longs = grep /^--/, @parseable; my @others = grep ! /^-/, @parseable; my @singles = map /-(.{2,})/ ? map("-$_", split(//, $1)) : $_, @singles; my %options; $options{$1} = $2 for grep /^([^=]+)=(.*)$/, @longs; ++$options{$_} for grep ! /=/, @singles, @longs; ({%options}, @others, @_); __ meta::internal_function('strip', 'wantarray ? map {s/^\\s*|\\s*$//g; $_} @_ : $_[0] =~ /^\\s*(.*?)\\s*$/ && $1;'); meta::internal_function('table_display', <<'__'); # Displays an array of arrays as a table; that is, with alignment. Arrays are # expected to be in column-major order. sub maximum_length_in { my $maximum = 0; length > $maximum and $maximum = length for @_; $maximum; } my @arrays = @_; my @lengths = map maximum_length_in(@$_), @arrays; my @row_major = map {my $i = $_; [map $$_[$i], @arrays]} 0 .. $#{$arrays[0]}; my $format = join ' ', map "%-${_}s", @lengths; join "\n", map strip(sprintf($format, @$_)), @row_major; __ meta::internal_function('temporary_name', <<'__'); use File::Temp 'tempfile'; my (undef, $temporary_filename) = tempfile("$0." . 'X' x 4, OPEN => 0); $temporary_filename; __ meta::internal_function('translate_backtrace', <<'__'); my ($trace) = @_; $trace =~ s/\(eval (\d+)\)/$locations{$1 - 1}/g; $trace; __ meta::internal_function('with_exported', <<'__'); # Like exported(), but removes the file after running some function. # Usage is with_exported(@files, sub {...}); my $f = pop @_; my $name = exported(@_); my $result = eval {&$f($name)}; terminal::warning("$@ when running with_exported()") if $@; unlink $name; $result; __ meta::library('shell', <<'__'); # Functions for shell parsing and execution. package shell; use Term::ReadLine; sub tokenize {grep length, split /\s+|("[^"\\]*(?:\\.)?")/o, join ' ', @_}; sub parse { my ($fn, @args) = @_; s/^"(.*)"$/\1/o, s/\\\\"/"/go for @args; {function => $fn, args => [@args]}} sub execute { my %command = %{$_[0]}; die "undefined command: $command{function}" unless exists $externalized_functions{$command{function}}; &{"::$command{function}"}(@{$command{args}})} sub run {execute(parse(tokenize(@_)))} sub prompt { my %options = @_; my $name = $options{name} // ::name(); my $state = $options{state} // ::state(); my $other = $state ne $transient{initial} ? 33 : 30; my $locked = ::is_locked() ? "\033[1;31mlocked\033[0;0m" : ''; my $cc = length ::retrieve('data::current-continuation') ? "\033[1;36mcc\033[0;0m" : ''; "\033[1;32m$name\033[1;${other}m" . substr($state, 0, 4) . "\033[0;0m$cc$locked\033[1;34m$options{stuff}\033[0;0m "} sub repl { my %options = @_; my $term = new Term::ReadLine "$0 shell"; $term->ornaments(0); my $attribs = $term->Attribs; $attribs->{completion_entry_function} = $attribs->{list_completion_function}; my $autocomplete = $options{autocomplete} || sub {[sort keys %data, sort keys %externalized_functions]}; my $prompt = $options{prompt} || \&prompt; my $parse = $options{parse} || sub {parse(tokenize(@_))}; my $command = $options{command} || sub {my ($command) = @_; ::around_hook('shell-command', $command, sub {print ::dangerous('', sub {execute($command)}), "\n"})}; &$command(&$parse($_)) while ($attribs->{completion_word} = &$autocomplete(), defined($_ = $term->readline(&$prompt())))} __ meta::library('terminal', <<'__'); # Functions for nice-looking terminal output. package terminal; my $process = ::name(); sub message {print STDERR "[$_[0]] $_[1]\n"} sub color { my ($name, $color) = @_; *{"terminal::$name"} = sub {chomp($_), print STDERR "\033[1;30m$process(\033[1;${color}m$name\033[1;30m)\033[0;0m $_\n" for map join('', $_), @_}} my %preloaded = (info => 32, progress => 32, state => 34, debug => 34, warning => 33, error => 31); color $_, $preloaded{$_} for keys %preloaded; __ meta::message_color('cc', '36'); meta::message_color('state', 'purple'); meta::message_color('states', 'yellow'); meta::message_color('test', 'purple'); meta::message_color('watch', 'blue'); meta::note('core-caterwaul-simplicity', <<'__'); Absence of a core object system. At first I tried to write caterwaul using an object system to represent its replicative nature, but this turned out to be a major pain. I guess I'm not very good at designing object systems. In any case, now my goal is to make the caterwaul global as simple as possible. Here's the new configuration interface: | var my_caterwaul = caterwaul(function (code) { return this.macroexpand(code, this.standard_macros, this.html_macros); }); Because this is such a common use case you can abbreviate it: | var my_caterwaul = caterwaul.macroexpander('standard_macros', 'html_macros'); But that's it. No more named configurations, no tconfigure(), nothing like that. This has the advantage that no heavy-lifting object system is necessary. __ meta::note('macroexpander-optimization', <<'__'); Optimization constraints for macroexpansion. This is a challenging problem. It looks like the total complexity of loading all modules is close to 7000, which isn't too bad by itself. However, there are more than 100 macros during some of these configurations, and that increases the amount of work by a factor of 100. It isn't possible to achieve sublinear macroexpansion time in the number of syntax nodes, but I think it is possible to achieve it in the number of macros. I'm not quite sure how this should work, but probably some kind of indexing strategy is in order. Masked indexing. This is probably the most obvious way to solve the problem. The idea is that macroexpand() first builds an index of nodes, then consults the index to determine which macros should be invoked. This should be roughly O(n) in the size of the tree. The implementation is made challenging by the fact that syntax node patterns aren't necessarily easy to index. The only way I can think of to go about this sensibly is to compile some kind of mixed mask from all of the macro patterns, and then hash each syntax node based on that mask. Another possibility is to arrange macro patterns by common factors. Maybe Caterwaul's macroexpander knows how to do things like identify structurally identical trees and dispatch on some hash of the data, or some such. This involves some fairly advanced decision-making on Caterwaul's part, but I'm fairly confident that a closed-form optimization strategy exists. Problem statement. Formally speaking, the problem is to minimize the number of pattern-node match operations. Scanning through all of the nodes up-front is a definite possibility, though each node visit is somewhat expensive. Match operations are worse, however, since they may have to dig down several layers before rejecting a match, and in the meantime they will have consed up and concatenated many arrays to build the partial match data. Potential algorithms for indexing. There are a lot of commonalities among patterns used for macros, and deriving the structure may be prohibitive. However, I'd like to try anyway. So to state it more clearly, there are a few possibilities: | 1. We use an algorithm that is invariant on its input. This is optimal if no structure can be derived from syntax patterns, which obviously isn't the case. 2. We use an algorithm that has heuristics about macros. This may work, but it breaks down for non-Javascript use cases (and it would be cool if Caterwaul were general-purpose). 3. We use an adaptive optimizer to find the best indexing strategy. We can either predict it or test it empirically. I'm axing (1) right off the bat just because it's no fun. This leaves (2) and (3). Let's see where (3) takes us. Adaptive optimization. In this case we know two things ahead of time. First, we know the list of macros that we'll need to apply to the syntax tree, and second we have a fixed number of scans over the tree (ideally just one) to compute whatever we want to about them. For the sake of comparison, the number to beat is 750000. This means that we can do up to 100 operations per syntax node and still beat the naive solution. Ideally we do better, but that's a high limit. (Hopefully whatever adaptation needs to happen will happen on the macro end, not the syntax node end -- there will be at most around 1000 macros, and that would be a pathological case.) Pattern hashing. It's fairly straightforward to identify high-information hash regions within the pattern space. All we have to do then is to build a table and do a very fast lookup on each node. However, what happens when we have two different schemas? For example: | ([] (let _ _)) ([] (foo _ _)) ([] (bar _ _)) ([] (bif _ _)) This is the first schema, where the path [1].data provides a perfect hash (reduces each tree to at most one match, and from there recursive descent is the only option). However, suppose we also have these macros: | ([] (let bif _)) ([] (let baz _)) ([] (let bok _)) Now the first scheme is perfect only for a subset of inputs. Because of the overlap (which there is in the real world too), we need a multi-stage algorithm to handle failover scenarios like this one. One important idea here is that we want to generalize to a /family/ of macros, not construct a perfect hash. It's acceptable to have to try a couple of different things; we just need to bound the number. Ideally we do the math to find the optimum; i.e. generate more hashes until the overhead of hashing each node outweighs the overhead of each test in a subset of cases. (There may not be such a moment, for example if two macros have the same pattern.) Along these lines, it's worth coming up with some simplifications. Pattern matching is transitive; if A matches pattern B and B matches pattern C, then A matches pattern C. This means that we can remove earlier macros that are matched by later macro-patterns. (Here the earlier macro is B, the later one is A, and C is the syntax tree we'll be matching.) This should simplify the hashing by removing all duplicate cases, and is O(n^2) in the number of macros. (In fact we can treat macro patterns as some kind of fully-ordered space to get O(n log n).) Syntactic heterogeneity. How heterogeneous is the syntax? Half of the nodes must be operators or other punctuation, and this is chosen out of a set of 20. The other half must be identifiers of some sort. There's probably a curve that dictates how likely each identifier is to be seen elsewhere, but I'm also guessing that the pattern is fractal; that is, each subtree has about the same amount of information density. If this is the case then it's reasonable to use this as a limit for how much information can be derived. Interrupting thought: Actually, it doesn't matter how much information is present in the syntax tree. The reason is that the macro patterns will end up dictating that; all we need to do is distinguish among them, and we're given at least that much information. Constraints. Because of terrible string-concat performance from some Javascript runtimes, it's important not to allocate new strings during this hashing process. So any hashing that's done will either (1) have to be on a single field, or (2) have to use numbers rather than strings. (2) is probably a more robust solution. Interning strings -> integers. This has some cost initially, and depending on the JS runtime it may or may not make sense. The idea is to unify symbols into hashable integers rather than leaving them as strings. It has the advantage of using little extra space and allowing strings to be used in low-overhead arithmetic ways (by their bijection onto numbers). Because we don't want to keep the global index table around forever, it's important to use only local indexes. This has important consequences, perhaps most significantly that we'll have to regenerate the index for each macroexpansion cycle. I don't consider this too much of a problem. It's nice to have optimizations not pervade the overall system design too much. Node ordering (irrelevant -- see below). If we want to skip the post-indexing traversal step altogether (which sounds attractive to me), then we need to know the right order to expand stuff. It goes outside-in and left-to-right, so a pre-order tree is required. The rule will be that as we're constructing the indexes we also maintain a counter that gets incremented for each node. (Conveniently, the depth-first traversal built in to nodes does what we need here.) Then each list of nodes to be transformed will be sorted by this counter. While the asymptotic complexity is worse than a regular depth-first search, it should be much faster in practice. Interrupting thought: Why not just transform the nodes as soon as we hit them? No need to build these lists at all. Therefore node ordering is irrelevant to the optimization problem. __ meta::note('math', <<'__'); Mathematical constructs in caterwaul 1.0. Caterwaul is no longer just about transforming syntax in convenient ways. It's also about doing totally off the hook stuff with macros and adding structure to its extensions. | 1. Macros should have standard forms. One common one is the "adverb", which includes things like when[], unless[], where[], se[], re[], etc. 2. Expressions should have well-defined destructuring semantics (see below). Destructuring. The lvalue() macro needs to become significantly more capable. Here's an example: | Foo = fc[x, y][this.x = x, this.y = y]; Bar = fc[x, y][this.x = x, this.y = y]; f(new Foo(x, y)) = x + y; f(new Bar(x, y)) = x * y; f(new Foo(3, 4)) // returns 7 f(new Bar(3, 4)) // returns 12 | f(x + 1) = Number(x) * 2 f(4) // returns 6 f('41') // returns 8 I'm not 100% sure how to do the failover for pattern matching because it requires knowledge of previous cases. The generated code would have to end up looking like this: | Foo = ... Bar = ... f = function (gs_value) { var gs_bindings = null; if (gs_value.constructor === Foo && Foo.gs_caterwaul_invert && (gs_bindings = Foo.gs_caterwaul_invert(gs_value))) return (function (x, y) {return x + y}).call(this, gs_bindings.x, gs_bindings.y); else if (gs_value.constructor === Bar && Bar.gs_caterwaul_invert && (gs_bindings = Bar.gs_caterwaul_invert(gs_value))) return (function (x, y) {return x * y}).call(this, gs_bindings.x, gs_bindings.y); else return undefined; }; Implementing function inversion. It's possible to invert a function. All we have to do is assign it combinatory semantics and then invert each combinator in turn. For instance: | f(x, y) = {foo: x, bar: y} // If this definition is made... f.gs_caterwaul_invert(o) = o && {x: o.foo, y: o.bar} // this one will be too. Inversion can happen even over closures: | f(g)(x, y) = {foo: x, bar: y, bif: g}; f(g).gs_caterwaul_invert(o) = o && o.bif === g && {x: o.foo, y: o.bar}; f.gs_caterwaul_invert(g)(o) = o && o.bif === g && {x: o.foo, y: o.bar}; Failover. This is really tricky. We have to know in advance whether f has been defined. For example: | f(x, y) = x + y; f(new Foo(bar)) = bar.x + bar.y; What does the generated code look like? (Especially considering that each of these is an imperative statement.) Interesting case: | f(x, y) = x + y; f(3, 4); // should return 7 f(x + 1, y) = x * y; f(3, 4); // should return 8 Actually, we don't care whether f is already defined; we can assume it: | f(x, y) = x + y f(new Foo(bar)) = bar.x * bar.y; Here's the code: | f = function (x, y) {return x + y}; // No modifiers; gets inlined into an erasing assignment f = (function (old_f) { var this_case = function (x, y) {return x * y}; return function (gensym) { var gensym_result = Foo && Foo.gensym_caterwaul_invert && Foo.gensym_caterwaul_invert(gensym); if (gensym_result) return this_case.call(this, gensym_result.x, gensym_result.y); else return old_f.apply(this, arguments); }; }).call(this, typeof f === 'undefined' ? function () {throw new Error('no matching case for f')} : f); It's true that this imposes a linear-time overhead in the number of cases, but it's the best we can do for dynamic assignment. Consequences of ubiquitous inversion. One nice side-effect of assigning an inverse to every function is that you can have pattern-matching functions invert each other: | f(x, y) = x + y g(f(x, 1)) = x * 2 g(10) // -> 18 Perhaps more interestingly: | modifier(m, x) = m + ' ' + x; variable(n, v) = n + ' = ' + v; g(modifier('var', variable('x', value))) = alert('x is assigned ' + value); We'd have to detect the presence of a constant in the destructuring pattern and propagate it through the combinatory logic somehow. (This is computationally expensive!) However, it's also really important to be able to do. Anonymous destructuring functions. There isn't a good reason fn[] couldn't take on destructuring semantics. The only difficulty is having multiple alternatives. | fn[new Foo(x, y)][x + y] fn[x + 1][x * (x + 1)] Ultimately the problem is that Javascript doesn't have a way to represent parameter-based multiple dispatch. So pattern matching is something that gets superimposed at the last minute. __ meta::note('q', 'Object system is now implemented, so at this point the priority is to get stuff working with it.'); meta::note('wrappers', <<'__'); Dealing with Javascript suckage. Javascript's builtin types are a mess. There isn't a reasonable hierarchy for things that are ordered (though < is magic enough to work properly most of the time), arrays aren't really extensible, objects are a disaster, etc. Part of this is due to the crossover between the method table and the key-value pairing present on all references. But in any case, it would be really nice to have a better interface for these things. Tempting as it is to rebuild all of these types using real data structures, caterwaul isn't trying to solve that problem. So caterwaul internally and externally uses regular old Javascript types whenever practical. (A notable exception is the global caterwaul function, which is an instance of itself under the caterwaul object system. But it produces and accepts standard Javascript arrays and objects in general.) It should be very easy to wrap standard data types however. So caterwaul provides a series of monads __ meta::parent('/home/spencertipping/bin/configuration', <<'__'); meta::type::configuration d67e10a128e6b1d958c5b9d3bbe25aa4 parent::/home/spencertipping/bin/object 4e114ee5933f8a10ce856c55da4aea34 __ meta::parent('/home/spencertipping/bin/node-base', <<'__'); function::loc 36e0cabf1fe1c1bcaa3c8c708bd82ca0 function::node 3522fde8f76947a5f70bca33f1ee0016 function::node-custom c2f4063798c997ec7f78a3543b1240b3 function::render 4962b4a2e6bc800fd5c11b97550b623e function::run-forever 76175932a2d2692fc802856c28d0848d internal_function::dep bad9b934374b176318ed2295b63130bc message_color::test 14e993fdf2c62df353613c243dc9053b meta::type::js f7a0080116fecebf7abb030b0156f44d parent::/home/spencertipping/bin/repository 3e7c06920f556c7ca97aa67b18470f03 parent::/home/spencertipping/conjectures/perl-objects/sdoc d13d5c65b3faaef717d55f9a59e6e842 __ meta::parent('/home/spencertipping/bin/object', <<'__'); bootstrap::html 9b839f4c0c8d6bb18bf6d9d0e670e497 bootstrap::initialization b39f25c213ceb0418551084d1bddeb20 bootstrap::perldoc c63395cbc6f7160b603befbb2d9b6700 function::alias 28744564997657da45ab16cd5b441104 function::cat a19fdfda461f9a0aa01978dff2e2c2f7 function::cc c4d52b1d8f52a480f07b81b93c3aac7b function::ccc 2351344fc688518c75aa4ab3acec1c4a function::child f69646398c3123d3d939a7f2b3156606 function::clone 54e00ff2103e54423d4c9febb97ce063 function::cp e5fee448a74ecbf4ae215e6b43dfc048 function::create 090c342a2dc304b39c643d53350474a0 function::current-state d83ae43551c0f58d1d0ce576402a315a function::disable 49db26fcd680ca34c1f52629a7375d2f function::edit c31d73791820a10ec910af043f67d4eb function::enable 37af2d2e603e2455be227ae3c5a42c3a function::export 388e0cc60507443cb1c0cc3e2658cfef function::extern 6a9fa8c2a8a4eaae9fe8d38e402145e4 function::grep 3ad630ca30f1abae3cf28ef6293f0e00 function::hash 7a903f90f2c8ed27af7e030f407d9f7b function::hook d74d8e2b611342af6a0897e0bd62e6e6 function::hooks 230122bdc8929884e45b2f78a7743e2e function::identity 37106ce13a0200af001d361ce7e81e57 function::import ac86cbe9c9fb12fc8cef2cc88e80c01e function::initial-state e21ba3519838b221a7b2d4e8a7544e7f function::is 1d3401965e4720ed972470b54b447db0 function::load-state ea18db867bd62a294e067f60e6975dcf function::lock 9bf21fee2f0f809131d43553bde82fa5 function::ls 61a247f0a582f63d85b5d5a963a46893 function::mv 52e95180e3c7019116bd798e0da0fdda function::name 6848cbc257e4b6d7441b25acb04e23c9 function::parents f94c3a5addbc92fe7f90d198fa701484 function::perl 986a274c013b77fe08d29726ce3799fe function::reload c57ff432c3ffd91a5506cd3eb8bf50c9 function::rm 7cecfb1691a7bf86741f00058bcc54ca function::save 181da4858ac39c157dbf38e6bac7a0d2 function::save-state 863e4d9fa75ca198ef7a755248d1002a function::serialize 5148e8ca46eeb3e297f76d098e496bcf function::serialize_single e7a22f806b580d5c63982ffc7f218a1a function::sh 9647fa9227bef6c139a79d0dd4acc8b4 function::shell dc8238ad70b1e02eaf230c4f1bd21690 function::size 140728e163286a0f3f514f468c475cfc function::snapshot d3d84a364524eeb8ee90623f545187e8 function::state 119111f84c3e32a5536838ac84bc6f10 function::touch 819878bc64df094d3323c7050f2c3e97 function::unlock 8fc9bd69f3466f0b54ee2c6965f68cea function::update 4de1a6a4085836590a3b1ef997f9d5ea function::update-from beead29d50b0b204bc9a8dc6f07d0003 function::usage b36ead828ad566c8e3919f3b40fb99e6 function::verify d31b85fffd464ddf516d2afeb63dcbde internal_function::around_hook e1cd17b80d4e8165df9c94facd9f239b internal_function::associate fc4f785bcf3ffe3225a73a1fdd314703 internal_function::attribute 62efb9f22157835940af1d5feae98d98 internal_function::attribute_is c88e0f87cf807ab0af73ca10032b54b2 internal_function::cache c119f9d7ea9a147c6d526a6283fb119a internal_function::chmod_self b13487447c65f2dc790bd6b21dde89dd internal_function::dangerous 4b8343178d6d4d1b760d61b1cfda008c internal_function::debug_trace 77644ab45a770a6e172680f659911507 internal_function::execute 4b4efc33bc6767a7aade7f427eedf83f internal_function::exported 27414e8f2ceeaef3555b9726e690eb0f internal_function::extension_for 65e48f50f20bc04aa561720b03bf494c internal_function::fast_hash ac70f469e697725cfb87629833434ab1 internal_function::file::read 186bbcef8f6f0dd8b72ba0fdeb1de040 internal_function::file::write eb7b1efebe0db73378b0cce46681788d internal_function::fnv_hash 8d001a3a7988631bab21a41cee559758 internal_function::hypothetically 33ee2e1595d3877bd1d9accaa72305c8 internal_function::internal::main 435a9e83ac803960745d9aa5aac6c75f internal_function::invoke_editor_on 1448132d5294a4b8390b4a684d8a78f9 internal_function::is_locked 42c3c89625863a31105d1df49a2a762f internal_function::namespace 93213d60cafb9627e0736b48cd1f0760 internal_function::parent_attributes 480776f176ab728c6b7450287be5782a internal_function::parent_ordering 87bee77390de5b9e9c6e07f9af9eb70a internal_function::retrieve 0b6f4342009684fdfa259f45ac75ae37 internal_function::retrieve_with_hooks 5186a0343624789d08d1cc2084550f3d internal_function::select_keys 1cd075b241905d9ab3199e680e86dced internal_function::separate_options d47e8ee23fe55e27bb523c9fcb2f5ca1 internal_function::strip 4af6a470effeed94c0dd9800d01f7d66 internal_function::table_display 8a6897e093f36bf05477a3889b84a61d internal_function::temporary_name 0fb1402061581b69822f913631b4a9d9 internal_function::translate_backtrace 06fad3d85833a6484e426401b95e0206 internal_function::with_exported fc4f32c46d95c6deed0414364d1c7410 library::shell 528f486cc4d9eb390e4c350b8727c751 library::terminal c52308d05ebb4ff61c5fc36e6d9c7a8a message_color::cc 6249446f73b3c5af2404c322c150e57b message_color::state 14e993fdf2c62df353613c243dc9053b message_color::states 152a940086f7cee6110528a09af7dd78 meta::configure 25976e07665878d3fae18f050160343f meta::externalize 9141b4e8752515391385516ae94b23b5 meta::functor::editable e3d2ede6edf65ffe2123584b2bd5dab7 meta::type::alias 28fe15dd61f4902ed5180d8604d15d97 meta::type::bootstrap 297d03fb32df03b46ea418469fc4e49e meta::type::cache 52eac0d7b550a358cc86803fe1a9d921 meta::type::data 58d8027f20099b28a159eaac67314051 meta::type::function d93b3cc15693707dac518e3d6b1f5648 meta::type::hook f55a3f728ddfb90204dff3fe5d86845c meta::type::inc c95915391b969734305f2f492d5ca8e3 meta::type::internal_function 34abb44c67c7e282569e28ef6f4d62ab meta::type::library b6dd78120e6d787acdb5c1629f7f1896 meta::type::message_color 794bf137c425293738f07636bcfb5c55 meta::type::meta 640f25635ce2365b0648962918cf9932 meta::type::parent 607e9931309b1b595424bedcee5dfa45 meta::type::retriever 6e847a9d205e4a5589765a3366cdd115 meta::type::state c1f29670be26f1df6100ffe4334e1202 retriever::file b8e7aefc98b8341260d91f21dc61d749 retriever::id a791a5735e9b4f2cb8b99fd39dc17bc3 __ meta::parent('/home/spencertipping/bin/repository', <<'__'); function::dupdate 1c0273217c5b9f2756bb14a4a00aa7e2 meta::type::cached_dependency e9455b403cbff27bbcc41d917fef482f parent::/home/spencertipping/bin/configuration 4217fa0ccd97c9eecfbb342e8f740271 __ meta::parent('/home/spencertipping/conjectures/perl-objects/sdoc', <<'__'); function::sdoc 060cfa349e629eb90a82b87a8ba00c1d function::sdoc-html 5fffe6139a81ff67ee063ebcbfadad2a function::sdocp 8b7ed5bbd537234ae53c0691b6d02c97 meta::type::sdoc 392c65eddae300e2aa67014b85884979 parent::/home/spencertipping/bin/object 4e114ee5933f8a10ce856c55da4aea34 retriever::html-sdoc 54d22ab83d9f3f2e27ef51033b4817a7 retriever::sdoc 6de8ec1f1436fb8e7477b533c321081b retriever::sdocp fef74cd94fa8761618662802f0bfc171 __ meta::parent('notes', <<'__'); function::note bcbfeac6dd2112f47296265444570a6e function::notes 15b3aca0cba3b327a984928154eda2b5 meta::type::note 7ca6b375a66ecbfd14bef5bcefeb6643 parent::object 4e114ee5933f8a10ce856c55da4aea34 __ meta::parent('object', <<'__'); bootstrap::html 9b839f4c0c8d6bb18bf6d9d0e670e497 bootstrap::initialization b39f25c213ceb0418551084d1bddeb20 bootstrap::perldoc c63395cbc6f7160b603befbb2d9b6700 function::alias 28744564997657da45ab16cd5b441104 function::cat a19fdfda461f9a0aa01978dff2e2c2f7 function::cc c4d52b1d8f52a480f07b81b93c3aac7b function::ccc 2351344fc688518c75aa4ab3acec1c4a function::child f69646398c3123d3d939a7f2b3156606 function::clone 54e00ff2103e54423d4c9febb97ce063 function::cp e5fee448a74ecbf4ae215e6b43dfc048 function::create 090c342a2dc304b39c643d53350474a0 function::current-state d83ae43551c0f58d1d0ce576402a315a function::disable 49db26fcd680ca34c1f52629a7375d2f function::edit c31d73791820a10ec910af043f67d4eb function::enable 37af2d2e603e2455be227ae3c5a42c3a function::export 388e0cc60507443cb1c0cc3e2658cfef function::extern 6a9fa8c2a8a4eaae9fe8d38e402145e4 function::grep 3ad630ca30f1abae3cf28ef6293f0e00 function::hash 7a903f90f2c8ed27af7e030f407d9f7b function::hook d74d8e2b611342af6a0897e0bd62e6e6 function::hooks 230122bdc8929884e45b2f78a7743e2e function::identity 37106ce13a0200af001d361ce7e81e57 function::import ac86cbe9c9fb12fc8cef2cc88e80c01e function::initial-state e21ba3519838b221a7b2d4e8a7544e7f function::is 1d3401965e4720ed972470b54b447db0 function::load-state ea18db867bd62a294e067f60e6975dcf function::lock 9bf21fee2f0f809131d43553bde82fa5 function::ls 61a247f0a582f63d85b5d5a963a46893 function::mv 52e95180e3c7019116bd798e0da0fdda function::name 6848cbc257e4b6d7441b25acb04e23c9 function::parents f94c3a5addbc92fe7f90d198fa701484 function::perl 986a274c013b77fe08d29726ce3799fe function::reload c57ff432c3ffd91a5506cd3eb8bf50c9 function::rm 7cecfb1691a7bf86741f00058bcc54ca function::save 181da4858ac39c157dbf38e6bac7a0d2 function::save-state 863e4d9fa75ca198ef7a755248d1002a function::serialize 5148e8ca46eeb3e297f76d098e496bcf function::serialize_single e7a22f806b580d5c63982ffc7f218a1a function::sh 9647fa9227bef6c139a79d0dd4acc8b4 function::shell dc8238ad70b1e02eaf230c4f1bd21690 function::size 140728e163286a0f3f514f468c475cfc function::snapshot d3d84a364524eeb8ee90623f545187e8 function::state 119111f84c3e32a5536838ac84bc6f10 function::touch 819878bc64df094d3323c7050f2c3e97 function::unlock 8fc9bd69f3466f0b54ee2c6965f68cea function::update 4de1a6a4085836590a3b1ef997f9d5ea function::update-from beead29d50b0b204bc9a8dc6f07d0003 function::usage b36ead828ad566c8e3919f3b40fb99e6 function::verify d31b85fffd464ddf516d2afeb63dcbde internal_function::around_hook e1cd17b80d4e8165df9c94facd9f239b internal_function::associate fc4f785bcf3ffe3225a73a1fdd314703 internal_function::attribute 62efb9f22157835940af1d5feae98d98 internal_function::attribute_is c88e0f87cf807ab0af73ca10032b54b2 internal_function::cache c119f9d7ea9a147c6d526a6283fb119a internal_function::chmod_self b13487447c65f2dc790bd6b21dde89dd internal_function::dangerous 4b8343178d6d4d1b760d61b1cfda008c internal_function::debug_trace 77644ab45a770a6e172680f659911507 internal_function::execute 4b4efc33bc6767a7aade7f427eedf83f internal_function::exported 27414e8f2ceeaef3555b9726e690eb0f internal_function::extension_for 65e48f50f20bc04aa561720b03bf494c internal_function::fast_hash ac70f469e697725cfb87629833434ab1 internal_function::file::read 186bbcef8f6f0dd8b72ba0fdeb1de040 internal_function::file::write eb7b1efebe0db73378b0cce46681788d internal_function::fnv_hash 8d001a3a7988631bab21a41cee559758 internal_function::hypothetically 33ee2e1595d3877bd1d9accaa72305c8 internal_function::internal::main 435a9e83ac803960745d9aa5aac6c75f internal_function::invoke_editor_on 1448132d5294a4b8390b4a684d8a78f9 internal_function::is_locked 42c3c89625863a31105d1df49a2a762f internal_function::namespace 93213d60cafb9627e0736b48cd1f0760 internal_function::parent_attributes 480776f176ab728c6b7450287be5782a internal_function::parent_ordering 87bee77390de5b9e9c6e07f9af9eb70a internal_function::retrieve 0b6f4342009684fdfa259f45ac75ae37 internal_function::retrieve_with_hooks 5186a0343624789d08d1cc2084550f3d internal_function::select_keys 1cd075b241905d9ab3199e680e86dced internal_function::separate_options d47e8ee23fe55e27bb523c9fcb2f5ca1 internal_function::strip 4af6a470effeed94c0dd9800d01f7d66 internal_function::table_display 8a6897e093f36bf05477a3889b84a61d internal_function::temporary_name 0fb1402061581b69822f913631b4a9d9 internal_function::translate_backtrace 06fad3d85833a6484e426401b95e0206 internal_function::with_exported fc4f32c46d95c6deed0414364d1c7410 library::shell 528f486cc4d9eb390e4c350b8727c751 library::terminal c52308d05ebb4ff61c5fc36e6d9c7a8a message_color::cc 6249446f73b3c5af2404c322c150e57b message_color::state 14e993fdf2c62df353613c243dc9053b message_color::states 152a940086f7cee6110528a09af7dd78 meta::configure 25976e07665878d3fae18f050160343f meta::externalize 9141b4e8752515391385516ae94b23b5 meta::functor::editable e3d2ede6edf65ffe2123584b2bd5dab7 meta::type::alias 28fe15dd61f4902ed5180d8604d15d97 meta::type::bootstrap 297d03fb32df03b46ea418469fc4e49e meta::type::cache 52eac0d7b550a358cc86803fe1a9d921 meta::type::data 58d8027f20099b28a159eaac67314051 meta::type::function d93b3cc15693707dac518e3d6b1f5648 meta::type::hook f55a3f728ddfb90204dff3fe5d86845c meta::type::inc c95915391b969734305f2f492d5ca8e3 meta::type::internal_function 34abb44c67c7e282569e28ef6f4d62ab meta::type::library b6dd78120e6d787acdb5c1629f7f1896 meta::type::message_color 794bf137c425293738f07636bcfb5c55 meta::type::meta 640f25635ce2365b0648962918cf9932 meta::type::parent 607e9931309b1b595424bedcee5dfa45 meta::type::retriever 6e847a9d205e4a5589765a3366cdd115 meta::type::state c1f29670be26f1df6100ffe4334e1202 retriever::file b8e7aefc98b8341260d91f21dc61d749 retriever::id a791a5735e9b4f2cb8b99fd39dc17bc3 __ meta::parent('preprocessor', <<'__'); function::preprocess 66e539d29e9afa903569efad0eb7c886 meta::type::template 25f4d6eafb1d3eea6d5d3d9a71a5623e parent::object 4e114ee5933f8a10ce856c55da4aea34 retriever::pp f4a8c288d69963d6ebc5ce0bf7794777 template::comment 7f8e9be5cd7c865fa64efe3123f62b38 template::eval eb0b1058649eb2d833f348540516b358 template::failing_conditional 5c593329b434a7044f68cec4b77e8ed9 template::include e0624844a65ae41e0217dd871fc0dbfb template::pinclude 5ba61c3034a4b183881936aec30d2be9 __ meta::retriever('file', '-f $_[0] ? file::read($_[0]) : undef;'); meta::retriever('html-sdoc', <<'__'); my ($attribute) = @_; return undef unless $attribute =~ s/^html::/sdoc::/ and exists $data{$attribute}; &{'sdoc-html'}($attribute); __ meta::retriever('id', '$_[0] =~ /^id::/ ? substr($_[0], 4) : undef;'); meta::retriever('pp', <<'__'); return undef unless namespace($_[0]) eq 'pp'; my $attr = retrieve(attribute($_[0])); defined $attr ? preprocess($attr) : undef; __ meta::retriever('sdoc', 'exists $data{"sdoc::$_[0]"} ? sdoc("sdoc::$_[0]") : undef;'); meta::retriever('sdocp', <<'__'); my $attribute = attribute($_[0]); exists $data{"sdoc::$attribute"} ? sdocp("sdoc::$attribute") : undef; __ meta::sdoc('js::caterwaul', <<'__'); Core caterwaul build with version ID | Spencer Tipping Licensed under the terms of the MIT source code license - pinclude pp::js::core/caterwaul - eval "caterwaul.version('" . fast_hash(retrieve('pp::js::core/caterwaul')) . "');" __ meta::sdoc('js::caterwaul.all', <<'__'); This file isn't rendered -- it's just used internally for node testing. - pinclude pp::js::caterwaul - pinclude pp::js::extensions/std - pinclude pp::js::extensions/dev - pinclude pp::js::extensions/ui __ meta::sdoc('js::caterwaul.node', <<'__'); CommonJS-compatible Caterwaul build | Spencer Tipping Licensed under the terms of the MIT source code license - pinclude pp::js::caterwaul caterwaul.merge(exports, caterwaul); __ meta::sdoc('js::core/caterwaul', <<'__'); Introduction. Caterwaul is a Javascript-to-Javascript compiler. Visit http://spencertipping.com/caterwaul/caterwaul.html for information about how and why you might use it. (function (f) {return f(f, (function (x) {return function () {return ++x}})(0))})(function (initializer, unique, undefined) { - pinclude pp::js::core/caterwaul.utilities - pinclude pp::js::core/caterwaul.global - pinclude pp::js::core/caterwaul.parser-data - pinclude pp::js::core/caterwaul.tree - pinclude pp::js::core/caterwaul.parser - pinclude pp::js::core/caterwaul.compiler - pinclude pp::js::core/caterwaul.macroexpander - pinclude pp::js::core/caterwaul.precompile-support - pinclude pp::js::core/caterwaul.init return caterwaul = caterwaul_global = caterwaul_global.clone()}); __ meta::sdoc('js::core/caterwaul.compiler', <<'__'); Environment-dependent compilation. It's possible to bind variables from 'here' (i.e. this runtime environment) inside a compiled function. The way we do it is to create a closure using a gensym. (Another reason that gensyms must really be unique.) Here's the idea. We use the Function constructor to create an outer function, bind a bunch of variables directly within that scope, and return the function we're compiling. The variables correspond to gensyms placed in the code, so the code will have closure over those variables. An optional second parameter 'environment' can contain a hash of variable->value bindings. These will be defined as locals within the compiled function. New in caterwaul 0.6.5 is the ability to specify a 'this' binding to set the context of the expression being evaluated. caterwaul_global.compile = function (tree, environment) { var vars = [], values = [], bindings = merge({}, this._environment || {}, environment || {}, tree.bindings()), s = gensym(); for (var k in bindings) if (own.call(bindings, k)) vars.push(k), values.push(bindings[k]); var variable_definitions = map(function (v) {return v === 'this' ? '' : 'var ' + v + '=' + s + '.' + v}, vars).join(';'), return_statement = 'return(' + tree.toString() + ')', function_body = variable_definitions + ';' + return_statement; try {return (new Function(s, function_body)).call(bindings['this'], bindings)} catch (e) {throw new Error(e + ' while compiling ' + function_body)}}; __ meta::sdoc('js::core/caterwaul.global', <<'__'); Global caterwaul variable. Caterwaul creates a global symbol, caterwaul. Like jQuery, there's a mechanism to get the original one back if you don't want to replace it. You can call caterwaul.deglobalize() to return caterwaul and restore the global that was there when Caterwaul was loaded (might be useful in the unlikely event that someone else named their library Caterwaul). Note that deglobalize() is available only on the global caterwaul() function. var calls_init = function () {var f = function () {return f.init.apply(f, arguments)}; return f}, original_global = typeof caterwaul === 'undefined' ? undefined : caterwaul, caterwaul_global = calls_init(); caterwaul_global.deglobalize = function () {caterwaul = original_global; return caterwaul_global}; Version management and reinitialization. There's an interesting case that comes up when loading a global caterwaul. If we detect that the caterwaul we just loaded has the same version as the one that's already there, we revert back to the original. This is very important for precompilation and the reason for it is subtle. Precompilation relies on tracing to determine the compiled form of each function handed to caterwaul, so if that caterwaul is replaced for any reason then the traces won't happen. A very common setup is something like this: | Often you'll want to precompile the whole bundle, since caterwaul.js includes behaviors that aren't necessarily precompiled and you might get better minification. To do this, it's tempting to precompile the whole bundle of caterwaul, the extensions, and your code. Without version checking, however, the traces would be lost and nothing would happen. There is, of course, a pathological failure case in all of this. If you load three caterwauls [why?] and the second of the three has a different version than the other two, then you'll still get precompiled erasure. I personally don't care about this case. You'd have to be insane to do crazy stuff like this and expect precompilation to work. caterwaul_global.version = function (v) {return v ? (this._version = v, original_global && original_global.version() === v ? this.deglobalize() : this) : this._version}; caterwaul_global.reinitialize = function (transform) {var c = (transform || function (x) {return x})(this.initializer); return c(c, this.unique).version(this.version())}; Utility methods. These are available for use by compiler functions or the end user. merge(caterwaul_global, { merge: merge, map: map, rmap: rmap, flatten: flatten, gensym: gensym, unique: unique, initializer: initializer, variadic: function (f) {return function () {for (var r = [], i = 0, l = arguments.length; i < l; ++i) r.push(f.call(this, arguments[i])); return r}}, right_variadic: function (f) {return function () {for (var r = [], i = 0, l = arguments.length - 1, x = arguments[l]; i < l; ++i) r.push(f.call(this, arguments[i], x)); return r}}}); __ meta::sdoc('js::core/caterwaul.init', <<'__'); Init method. This runs a compilation stage. If the input is a function or string, then the function or string is parsed, run through the macroexpander, and compiled. caterwaul_global.clone = function (f) {return se(this.merge(calls_init(), this, this.instance_methods(), {constructor: this}), function () { delete this._id, delete this._macros, delete this._environment})}; Compiler instance methods/attributes. These are installed on each generated compiler function. You can change some of them if you know what you're doing (for instance, you can create a compiler for a different programming language by changing the 'parse' function to handle different input). Unlike caterwaul < 1.0 there is no support for cloning a compiler function. However, you can compose things nicely by doing stuff like this: | var my_caterwaul = caterwaul(function (code) {...}); var other_caterwaul = caterwaul(my_caterwaul); other_caterwaul.parse = function (x) {...}; In this example, other_caterwaul delegates its macroexpansion to my_caterwaul, but it uses a custom parse function. caterwaul_global.instance_methods = function () { return {compile: this.compile, parse: this.parse, macroexpand: this.macroexpand, syntax: this.syntax, ref: this.ref, id: this.syntax_common.id, init_function: this.init_function || this.macroexpand, instance_methods: this.instance_methods, ensure_syntax: this.ensure_syntax, ensure_pattern: this.ensure_pattern, ensure_expander: this.ensure_expander, environment: function (e) {return arguments.length ? (this._environment = e, this) : this._environment}, macros: function () {return arguments.length ? (this._macros = this.flatten.apply(this, arguments), this) : this._macros}, toString: function () {return '[caterwaul instance ' + this.id() + ']'}, init: function (f, environment) {return this.is_precompiled(f) || this.init_not_precompiled(f, environment)}, init_not_precompiled: function (f, environment) {return f.constructor === this.syntax ? this.init_function(f) : this.compile(this(this.parse(f)), environment)}}}; __ meta::sdoc('js::core/caterwaul.macroexpander', <<'__'); Macroexpansion. Caterwaul's main purpose is to transform your code, and the easiest way to transform things is through macroexpansion. The idea is to locate syntax nodes with a given pattern and rewrite them somehow. For example, suppose we wanted to define a macro to enable the postfix /log modifier. Here's how it might look: | x /log -> (function (it) {console.log(it); return it})(x) The macro needs to first identify things of the form '_something /log' and transform them accordingly. Here's a macro to do that: | var m = caterwaul.macro('_something /log', '(function (it) {console.log(it); return it})(_something)'); Building macros. Caterwaul gives you several ways to build macros. The simplest is to use caterwaul.macro() as shown above. It will parse each string, using the first as a pattern and the second as a template. It then fills in the values on the right from the ones on the left and re-expands the result. In place of each string, caterwaul.macro() can take either a syntax tree or a function. The function on the left should take a syntax tree, try to match the pattern against it, and return either a match object or false. The function on the right should take a match object and return a new syntax tree. (It won't be invoked if the left function returned false.) Macro function internals. Caterwaul's macros are just functions from syntax to syntax. They return false if they don't match a particular node. So, for example, the macro '_x * 2' -> '_x << 1' would return 'a << 1' on 'a * 2', but would return false on 'a * 3'. The macroexpander knows to descend into child nodes when a macroexpander returns false. If a macroexpander returns a value then that value is taken and no further expansion is performed. (This is necessary if we want to implement literal macros -- that is, literal(x) -> x and x isn't macroexpanded further.) If a macro wants to re-expand stuff it should use 'this.expand', which invokes the macroexpander on a tree. Most of the time macros will do this, and it's done automatically by caterwaul.macro() when you use a string or a syntax tree as the expansion. You'll have to call this.expand() if you're using a function as an expander. caterwaul_global.ensure_syntax = function (thing) {return thing && thing.constructor === String ? this.parse(thing) : thing}; caterwaul_global.ensure_pattern = function (pattern) {return pattern.constructor === String ? this.ensure_pattern(this.parse(pattern)) : pattern.constructor === this.syntax ? function (tree) {return pattern.match(tree)} : pattern}; caterwaul_global.ensure_expander = function (expander) {return expander.constructor === String ? this.ensure_expander(this.parse(expander)) : expander.constructor === this.syntax ? function (match) {return this.expand(expander.replace(match))} : expander}; caterwaul_global.macro = caterwaul_global.right_variadic(function (pattern, expander) {var new_pattern = this.ensure_pattern(pattern), new_expander = this.ensure_expander(expander); return se(function (tree) {var match = new_pattern.call(this, tree); return match && new_expander.call(this, match)}, function () {this.pattern = pattern, this.expander = expander})}); Macroexpander logic. This behaves just like the pre-1.0 macroexpander, except that the patterns and expanders are now fused. The macro functions are also evaluated under a different context; rather than being bound to the caterwaul function they came from, they are bound to a context object that gives them a way to re-expand stuff under the same set of macros. It also provides the caterwaul function that is performing the expansion. (Though you shouldn't modify the macro list from inside a macro -- this pre-1.0 feature is now removed.) Just like previous versions of caterwaul the macros are matched last-to-first. This means that the /last/ matching macro is used, allowing you to easily override stuff. Also, the macroexpand() function takes optional extra parameters; these are either macros or arrays of macros to be added to the macro list stored on the caterwaul function. caterwaul_global.macroexpand = function (tree) {for (var macros = arguments.length ? [].concat(this._macros || []) : this._macros || [], i = 1, l = arguments.length, x; i < l; ++i) (x = arguments[i]) instanceof Array ? macros.push.apply(macros, x) : macros.push(x); var context = {caterwaul: this, macros: macros, expand: function (tree) { return tree.rmap(function (node) { for (var new_node = null, i = macros.length - 1; i >= 0; --i) if (new_node = macros[i].call(context, node)) return new_node})}}; return context.expand(this.ensure_syntax(tree))}; __ meta::sdoc('js::core/caterwaul.parser', <<'__'); Parsing. There are two distinct parts to parsing Javascript. One is parsing the irregular statement-mode expressions such as 'if (condition) {...}' and 'function f(x) {...}'; the other is parsing expression-mode stuff like arithmetic operators. In Rebase I tried to model everything as an expression, but that failed sometimes because it required that each operator have fixed arity. In particular this was infeasible for keywords such as 'break', 'continue', 'return', and some others (any of these can be nullary or unary). It also involved creating a bizarre hack for 'case x:' inside a switch block. This hack made the expression passed in to 'case' unavailable, as it would be buried in a ':' node. Caterwaul fixes these problems by using a proper context-free grammar. However, it's much looser than most grammars because it doesn't need to validate anything. Correspondingly, it can be much faster as well. Instead of guessing and backtracking as a recursive-descent parser would, it classifies many different branches into the same basic structure and fills in the blanks. One example of this is the () {} pair, which occurs in a bunch of different constructs, including function () {}, if () {}, for () {}, etc. In fact, any time a () group is followed by a {} group we can grab the token that precedes () (along with perhaps one more in the case of function f () {}), and group that under whichever keyword is responsible. Syntax folding. The first thing to happen is that parenthetical, square bracket, and braced groups are folded up. This happens in a single pass that is linear in the number of tokens, and other foldable tokens (including unary and binary operators) are indexed by associativity. The following pass runs through these indexes from high to low precedence and folds tokens into trees. By this point all of the parentheticals have been replaced by proper nodes (here I include ?: groups in parentheticals, since they behave the same way). Finally, high-level rules are applied to the remaining keywords, which are bound last. This forms a complete parse tree. Doing all of this efficiently requires a linked list rather than an array. This gets built during the initial paren grouping stage. Arrays are used for the indexes, which are left-to-right and are later processed in the order indicated by the operator associativity. That is, left-associative operators are processed 0 .. n and right associative are processed n .. 0. Keywords are categorized by behavior and folded after all of the other operators. Semicolons are folded last, from left to right. There are some corner cases due to Javascript's questionable heritage from C-style syntax. For example, most constructs take either syntax blocks or semicolon-delimited statements. Ideally, else, while, and catch are associated with their containing if, do, and try blocks, respectively. This can be done easily, as the syntax is folded right-to-left. Another corner case would come up if there were any binary operators with equal precedence and different associativity. Javascript doesn't have them however, and it wouldn't make much sense to; it would render expressions such as 'a op1 b op2 c' ambiguous if op1 and op2 shared precedence but each wanted to bind first. (I mention this because at first I was worried about it, but now I realize it isn't an issue.) Notationally (for easier processing later on), a distinction is made between invocation and grouping, and between dereferencing and array literals. Dereferencing and function invocation are placed into their own operators, where the left-hand side is the thing being invoked or dereferenced and the right-hand side is the paren-group or bracket-group that is responsible for the operation. Also, commas inside these groups are flattened into a single variadic (possibly nullary) comma node so that you don't have to worry about the tree structure. This is the case for all left-associative operators; right-associative operators preserve their hierarchical folding. Parse/lex shared logic. Lexing Javascript is not entirely straightforward, primarily because of regular expression literals. The first implementation of the lexer got things right 99% of the time by inferring the role of a / by its preceding token. The problem comes in when you have a case like this: | if (condition) /foo/.test(x) In this case, (condition) will be incorrectly inferred to be a regular expression (since the close-paren terminates an expression, usually), and /foo/ will be interpreted as division by foo. We mark the position before a token and then just increment the position. The token, then, can be retrieved by taking a substring from the mark to the position. This eliminates the need for intermediate concatenations. In a couple of cases I've gone ahead and done them anyway -- these are for operators, where we grab the longest contiguous substring that is defined. I'm not too worried about the O(n^2) complexity due to concatenation; they're bounded by four characters. OK, so why use charAt() instead of regular expressions? It's a matter of asymptotic performance. V8 implements great regular expressions (O(1) in the match length for the (.*)$ pattern), but the substring() method is O(n) in the number of characters returned. Firefox implements O(1) substring() but O(n) regular expression matching. Since there are O(n) tokens per document of n characters, any O(n) step makes lexing quadratic. So I have to use the only reliably constant-time method provided by strings, charAt() (or in this case, charCodeAt()). Of course, building strings via concatenation is also O(n^2), so I also avoid that for any strings that could be long. This is achieved by using a mark to indicate where the substring begins, and advancing i independently. The span between mark and i is the substring that will be selected, and since each substring both requires O(n) time and consumes n characters, the lexer as a whole is O(n). (Though perhaps with a large constant.) Parse function. As mentioned earlier, the parser and lexer aren't distinct. The lexer does most of the heavy lifting; it matches parens and brackets, arranges tokens into a hierarchical linked list, and provides an index of those tokens by their fold order. It does all of this by streaming tokens into a micro-parser whose language is grouping and that knows about the oddities required to handle regular expression cases. In the same function, though as a distinct case, the operators are folded and the syntax is compiled into a coherent tree form. The input to the parse function can be anything whose toString() produces valid Javascript code. caterwaul_global.parse = function (input) { Lex variables. s, obviously, is the string being lexed. mark indicates the position of the stream, while i is used for lookahead. The difference is later read into a token and pushed onto the result. c is a temporary value used to store the current character code. re is true iff a slash would begin a regular expression. esc is a flag indicating whether the next character in a string or regular expression literal is escaped. exp indicates whether we've seen the exponent marker in a number. close is used for parsing single and double quoted strings; it contains the character code of the closing quotation mark. t is the token to be processed. Parse variables. grouping_stack and gs_top are used for paren/brace/etc. matching. head and parent mark two locations in the linked syntax tree; when a new group is created, parent points to the opener (i.e. (, [, ?, or {), while head points to the most recently added child. (Hence the somewhat complex logic in push().) indexes[] determines reduction order, and contains references to the nodes in the order in which they should be folded. invocation_nodes is an index of the nodes that will later need to be flattened. The push() function manages the mechanics of adding a node to the initial linked structure. There are a few cases here; one is when we've just created a paren group and have no 'head' node; in this case we append the node as 'head'. Another case is when 'head' exists; in that case we update head to be the new node, which gets added as a sibling of the old head. var s = input.toString(), mark = 0, c = 0, re = true, esc = false, dot = false, exp = false, close = 0, t = '', i = 0, l = s.length, cs = function (i) {return s.charCodeAt(i)}, grouping_stack = [], gs_top = null, head = null, parent = null, indexes = map(function () {return []}, parse_reduce_order), invocation_nodes = [], all_nodes = [empty], new_node = function (n) {return all_nodes.push(n), n}, push = function (n) {return head ? head._sibling(head = n) : (head = n._append_to(parent)), new_node(n)}, syntax_node = this.syntax; Main lex loop. This loop takes care of reading all of the tokens in the input stream. At the end, we'll have a linked node structure with paren groups. At the beginning, we set the mark to the current position (we'll be incrementing i as we read characters), munch whitespace, and reset flags. while ((mark = i) < l) { while (lex_space[c = cs(i)] && i < l) mark = ++i; esc = exp = dot = t = false; Miscellaneous lexing. This includes bracket resetting (the top case, where an open-bracket of any sort triggers regexp mode) and comment removal. Both line and block comments are removed by comparing against lex_slash, which represents /, and lex_star, which represents *. if (lex_bracket[c]) {t = !! ++i; re = lex_opener[c]} else if (c === lex_slash && cs(i + 1) === lex_star && (i += 2)) {while (++i < l && cs(i) !== lex_slash || cs(i - 1) !== lex_star); t = ! ++i} else if (c === lex_slash && cs(i + 1) === lex_slash) {while (++i < l && ! lex_eol[cs(i)]); t = false} Regexp and string literal lexing. These both take more or less the same form. The idea is that we have an opening delimiter, which can be ", ', or /; and we look for a closing delimiter that follows. It is syntactically illegal for a string to occur anywhere that a slash would indicate division (and it is also illegal to follow a string literal with extra characters), so reusing the regular expression logic for strings is not a problem. (This follows because we know ahead of time that the Javascript is valid.) else if (lex_quote[c] && (close = c) && re && ! (re = ! (t = s.charAt(i)))) {while (++i < l && (c = cs(i)) !== close || esc) esc = ! esc && c === lex_back; while (++i < l && lex_regexp_suffix[cs(i)]) ; t = true} Numeric literal lexing. This is far more complex than the above cases. Numbers have several different formats, each of which requires some custom logic. The reason we need to parse numbers so exactly is that it influences how the rest of the stream is lexed. One example is '0.5.toString()', which is perfectly valid Javascript. What must be output here, though, is '0.5', '.', 'toString', '(', ')'; so we have to keep track of the fact that we've seen one dot and stop lexing the number on the second. Another case is exponent-notation: 3.0e10. The hard part here is that it's legal to put a + or - on the exponent, which normally terminates a number. Luckily we can safely skip over any character that comes directly after an E or e (so long as we're really in exponent mode, which I'll get to momentarily), since there must be at least one digit after an exponent. The final case, which restricts the logic somewhat, is hexadecimal numbers. These also contain the characters 'e' and 'E', but we cannot safely skip over the following character, and any decimal point terminates the number (since '0x5.toString()' is also valid Javascript). The same follows for octal numbers; the leading zero indicates that there will be no decimal point, which changes the lex mode (for example, '0644.toString()' is valid). So, all this said, there are different logic branches here. One handles guaranteed integer cases such as hex/octal, and the other handles regular numbers. The first branch is triggered whenever a number starts with zero and is followed by 'x' or a digit (for conciseness I call 'x' a digit), and the second case is triggered when '.' is followed by a digit, or when a digit starts. A trivial change, using regular expressions, would reduce this logic significantly. I chose to write it out longhand because (1) it's more fun that way, and (2) the regular expression approach has theoretically quadratic time in the length of the numbers, whereas this approach keeps things linear. Whether or not that actually makes a difference I have no idea. Finally, in response to a recently discovered failure case, a period must be followed by a digit if it starts a number. The failure is the string '.end', which will be lexed as '.en', 'd' if it is assumed to be a floating-point number. (In fact, any method or property beginning with 'e' will cause this problem.) else if (c === lex_zero && lex_integer[cs(i + 1)]) {while (++i < l && lex_integer[cs(i)]); re = ! (t = true)} else if (lex_float[c] && (c !== lex_dot || lex_decimal[cs(i + 1)])) {while (++i < l && (lex_decimal[c = cs(i)] || (dot ^ (dot |= c === lex_dot)) || (exp ^ (exp |= lex_exp[c] && ++i)))); while (i < l && lex_decimal[cs(i)]) ++i; re = ! (t = true)} Operator lexing. The 're' flag is reused here. Some operators have both unary and binary modes, and as a heuristic (which happens to be accurate) we can assume that anytime we expect a regular expression, a unary operator is intended. The only exception are ++ and --, which are always unary but sometimes are prefix and other times are postfix. If re is true, then the prefix form is intended; otherwise, it is postfix. For this reason I've listed both '++' and 'u++' (same for --) in the operator tables; the lexer is actually doing more than its job here by identifying the variants of these operators. The only exception to the regular logic happens if the operator is postfix-unary. (e.g. ++, --.) If so, then the re flag must remain false, since expressions like 'x++ / 4' can be valid. else if (lex_punct[c] && (t = re ? 'u' : '', re = true)) {while (i < l && lex_punct[cs(i)] && has(lex_op, t + s.charAt(i))) t += s.charAt(i++); re = ! has(lex_postfix_unary, t)} Identifier lexing. If nothing else matches, then the token is lexed as a regular identifier or Javascript keyword. The 're' flag is set depending on whether the keyword expects a value. The nuance here is that you could write 'x / 5', and it is obvious that the / means division. But if you wrote 'return / 5', the / would be a regexp delimiter because return is an operator, not a value. So at the very end, in addition to assigning t, we also set the re flag if the word turns out to be an operator. else {while (++i < l && lex_ident[cs(i)]); re = has(lex_op, t = s.substring(mark, i))} Token unification. t will contain true, false, or a string. If false, no token was lexed; this happens when we read a comment, for example. If true, the substring method should be used. (It's a shorthand to avoid duplicated logic.) For reasons that are not entirely intuitive, the lexer sometimes produces the artifact 'u;'. This is never useful, so I have a case dedicated to removing it. if (i === mark) throw new Error('Caterwaul lex error at "' + s.substr(mark, 40) + '" with leading context "' + s.substr(mark - 40, 40) + '" (probably a Caterwaul bug)'); if (t === false) continue; t = t === true ? s.substring(mark, i) : t === 'u;' ? ';' : t; Grouping and operator indexing. Now that we have a token, we need to see whether it affects grouping status. There are a couple of possibilities. If it's an opener, then we create a new group; if it's a matching closer then we close the current group and pop out one layer. (We don't check for matching here. Any code provided to Caterwaul will already have been parsed by the host Javascript interpreter, so we know that it is valid.) All operator indexing is done uniformly, left-to-right. Note that the indexing isn't strictly by operator. It's by reduction order, which is arguably more important. That's what the parse_inverse_order table does: it maps operator names to parse_reduce_order subscripts. (e.g. 'new' -> 2.) t === gs_top ? (grouping_stack.pop(), gs_top = grouping_stack[grouping_stack.length - 1], head = head ? head.p : parent, parent = null) : (has(parse_group, t) ? (grouping_stack.push(gs_top = parse_group[t]), parent = push(new_node(new syntax_node(t))), head = null) : push(new_node(new syntax_node(t))), has(parse_inverse_order, t) && indexes[parse_inverse_order[t]].push(head || parent)); // <- This is where the indexing happens Regexp flag special cases. Normally a () group wraps an expression, so a following / would indicate division. The only exception to this is when we have a block construct; in this case, the next token appears in statement-mode, which means that it begins, not modifies, a value. We'll know that we have such a case if (1) the immediately-preceding token is a close-paren, and (2) a block-accepting syntactic form occurs to its left. With all this trouble over regular expressions, I had to wonder whether it was possible to do it more cleanly. I don't think it is, unfortunately. Even lexing the stream backwards fails to resolve the ambiguity: | for (var k in foo) /foo/g.test(k) && bar(); In this case we won't know it's a regexp until we hit the 'for' keyword (or perhaps 'var', if we're being clever -- but a 'with' or 'if' would require complete lookahead). A perfectly valid alternative parse, minus the 'for' and 'var', is this: | ((k in foo) / (foo) / (g.test(k))) && bar(); The only case where reverse-lexing is useful is when the regexp has no modifiers. re |= t === ')' && head.l && has(parse_r_until_block, head.l.data)} Operator fold loop. This is the second major part of the parser. Now that we've completed the lex process, we can fold operators and syntax, and take care of some exception cases. First step: functions, calls, dots, and dereferences. I'm treating this differently from the generalized operator folding because of the syntactic inference required for call and dereference detection. Nothing has been folded at this point (with the exception of paren groups, which is appropriate), so if the node to the left of any ( or [ group is an operator, then the ( or [ is really a paren group or array literal. If, on the other hand, it is another value, then the group is a function call or a dereference. This folding goes left-to-right. The reason we also process dot operators is that they share the same precedence as calls and dereferences. Here's what a () or [] transform looks like: | quux <--> foo <--> ( <--> bar quux <--> () <--> bar \ / \ <-- This can be done by saying _.l.wrap(new node('()')).p.fold_r(). bif <--> , <--> baz --> foo ( _.l.wrap() returns l again, .p gets the wrapping node, and fold_r adds a child to it. \ bif <--> , <--> baz This is actually merged into the for loop below, even though it happens before other steps do (see 'Ambiguous parse groups'). Second step: fold operators. Now we can go through the list of operators, folding each according to precedence and associativity. Highest to lowest precedence here, which is just going forwards through the indexes[] array. The parse_index_forward[] array indicates which indexes should be run left-to-right and which should go right-to-left. for (var i = 0, l = indexes.length, forward, _; _ = indexes[i], forward = parse_index_forward[i], i < l; ++i) for (var j = forward ? 0 : _.length - 1, lj = _.length, inc = forward ? 1 : -1, node, data; node = _[j], data = node && node.data, forward ? j < lj : j >= 0; j += inc) Binary node behavior. The most common behavior is binary binding. This is the usual case for operators such as '+' or ',' -- they grab one or both of their immediate siblings regardless of what they are. Operators in this class are considered to be 'fold_lr'; that is, they fold first their left sibling, then their right. if (has(parse_lr, data)) node._fold_lr(); Ambiguous parse groups. As mentioned above, we need to determine whether grouping constructs are invocations or real groups. This happens to take place before other operators are parsed (which is good -- that way it reflects the precedence of dereferencing and invocation). The only change we need to make is to discard the explicit parenthetical or square-bracket grouping for invocations or dereferences, respectively. It doesn't make much sense to have a doubly-nested structure, where we have a node for invocation and another for the group on the right-hand side of that invocation. Better is to modify the group in-place to represent an invocation. We can't solve this problem here, but we can solve it after the parse has finished. I'm pushing these invocation nodes onto an index for the end. else if (has(parse_ambiguous_group, data) && node.l && (node.l.data === '.' || ! (has(lex_op, node.l.data) || has(parse_not_a_value, node.l.data)))) invocation_nodes.push(node.l._wrap(new_node(new syntax_node(data + parse_group[data]))).p._fold_r()); Unary left and right-fold behavior. Unary nodes have different fold directions. In this case, it just determines which side we grab the node from. I'm glad that Javascript doesn't allow stuff like '++x++', which would make the logic here actually matter. Because there isn't that pathological case, exact rigidity isn't required. else if (has(parse_l, data)) node._fold_l(); else if (has(parse_r, data)) node._fold_r(); Ternary operator behavior. This is kind of interesting. If we have a ternary operator, then it will be treated first as a group; just like parentheses, for example. This is the case because the ternary syntax is unambiguous for things in the middle. So, for example, '3 ? 4 : 5' initially parses out as a '?' node whose child is '4'. Its siblings are '3' and '5', so folding left and right is an obvious requirement. The only problem is that the children will be in the wrong order. Instead of (3) (4) (5), we'll have (4) (3) (5). So after folding, we do a quick swap of the first two to set the ordering straight. else if (has(parse_ternary, data)) {node._fold_lr(); var temp = node[1]; node[1] = node[0]; node[0] = temp} Grab-until-block behavior. Not quite as simple as it sounds. This is used for constructs such as 'if', 'function', etc. Each of these constructs takes the form ' [identifier] () {}', but they can also have variants that include ' () {}', ' () statement;', and most problematically ' () ;'. Some of these constructs also have optional child components; for example, 'if () {} else {}' should be represented by an 'if' whose children are '()', '{}', and 'else' (whose child is '{}'). The tricky part is that 'if' doesn't accept another 'if' as a child (e.g. 'if () {} if () {}'), nor does it accept 'for' or any number of other things. This discrimination is encoded in the parse_accepts table. There are some weird edge cases, as always. The most notable is what happens when we have nesting without blocks: | if (foo) bar; else bif; In this case we want to preserve the semicolon on the 'then' block -- that is, 'bar;' should be its child; so the semicolon is required. But the 'bif' in the 'else' case shouldn't have a semicolon, since that separates top-level statements. Because desperate situations call for desperate measures, there's a hack specifically for this in the syntax tree serialization. One more thing. Firefox rewrites syntax trees, and one of the optimizations it performs on object literals is removing quotation marks from regular words. This means that it will take the object {'if': 4, 'for': 1, etc.} and render it as {if: 4, for: 1, etc.}. As you can imagine, this becomes a big problem as soon as the word 'function' is present in an object literal. To prevent this from causing problems, I only collapse a node if it is not followed by a colon. (And the only case where any of these would legally be followed by a colon is as an object key.) else if (has(parse_r_until_block, data) && node.r && node.r.data !== ':') {for (var count = 0, limit = parse_r_until_block[data]; count < limit && node.r && ! has(parse_block, node.r.data); ++count) node._fold_r(); node.r && node.r.data !== ';' && node._fold_r(); if (has(parse_accepts, data) && parse_accepts[data] === (node.r && node.r.r && node.r.r.data)) node._fold_r().pop()._fold_r(); else if (has(parse_accepts, data) && parse_accepts[data] === (node.r && node.r.data)) node._fold_r()} Optional right-fold behavior. The return, throw, break, and continue keywords can each optionally take an expression. If the token to the right is an expression, then we take it, but if the token to the right is a semicolon then the keyword should be nullary. else if (has(parse_r_optional, data)) node.r && node.r.data !== ';' && node._fold_r(); Third step. Find all elements with right-pointers and wrap them with semicolon nodes. This is necessary because of certain constructs at the statement-level don't use semicolons; they use brace syntax instead. (e.g. 'if (foo) {bar} baz()' is valid, even though no semicolon precedes 'baz()'.) By this point everything else will already be folded. Note that this does some weird things to associativity; in general, you can't make assumptions about the exact layout of semicolon nodes. Fortunately semicolon is associative, so it doesn't matter in practice. And just in case, these nodes are 'i;' rather than ';', meaning 'inferred semicolon' -- that way it's clear that they aren't original. (They also won't appear when you call toString() on the syntax tree.) for (var i = all_nodes.length - 1, _; i >= 0; --i) (_ = all_nodes[i]).r && _._wrap(new_node(new syntax_node('i;'))).p._fold_r(); Fourth step. Flatten out all of the invocation nodes. As explained earlier, they are nested such that the useful data on the right is two levels down. We need to grab the grouping construct on the right-hand side and remove it so that only the invocation or dereference node exists. During the parse phase we built an index of all of these invocation nodes, so we can iterate through just those now. I'm preserving the 'p' pointers, though they're probably not useful beyond here. for (var i = 0, l = invocation_nodes.length, _, child; i < l; ++i) (child = (_ = invocation_nodes[i])[1] = _[1][0] || empty) && (child.p = _); while (head.p) head = head.p; Fifth step. Prevent a space leak by clearing out all of the 'p', 'l', and 'r' pointers. for (var i = all_nodes.length - 1, _; i >= 0; --i) delete (_ = all_nodes[i]).p, delete _.l, delete _.r; return head}; __ meta::sdoc('js::core/caterwaul.parser-data', <<'__'); Shared parser data. This data is used both for parsing and for serialization, so it's made available to all pieces of caterwaul. Precomputed table values. The lexer uses several character lookups, which I've optimized by using integer->boolean arrays. The idea is that instead of using string membership checking or a hash lookup, we use the character codes and index into a numerical array. This is guaranteed to be O(1) for any sensible implementation, and is probably the fastest JS way we can do this. For space efficiency, only the low 256 characters are indexed. High characters will trigger sparse arrays, which may degrade performance. Also, this parser doesn't handle Unicode characters properly; it assumes lower ASCII only. The lex_op table indicates which elements trigger regular expression mode. Elements that trigger this mode cause a following / to delimit a regular expression, whereas other elements would cause a following / to indicate division. By the way, the operator ! must be in the table even though it is never used. The reason is that it is a substring of !==; without it, !== would fail to parse. var lex_op = hash('. new ++ -- u++ u-- u+ u- typeof u~ u! ! * / % + - << >> >>> < > <= >= instanceof in == != === !== & ^ | && || ? = += -= *= /= %= &= |= ^= <<= >>= >>>= : , ' + 'return throw case var const break continue void else u; ;'), lex_table = function (s) {for (var i = 0, xs = [false]; i < 8; ++i) xs.push.apply(xs, xs); for (var i = 0, l = s.length; i < l; ++i) xs[s.charCodeAt(i)] = true; return xs}, lex_float = lex_table('.0123456789'), lex_decimal = lex_table('0123456789'), lex_integer = lex_table('0123456789abcdefABCDEFx'), lex_exp = lex_table('eE'), lex_space = lex_table(' \n\r\t'), lex_bracket = lex_table('()[]{}'), lex_opener = lex_table('([{'), lex_punct = lex_table('+-*/%&|^!~=<>?:;.,'), lex_eol = lex_table('\n\r'), lex_regexp_suffix = lex_table('gims'), lex_quote = lex_table('\'"/'), lex_slash = '/'.charCodeAt(0), lex_star = '*'.charCodeAt(0), lex_back = '\\'.charCodeAt(0), lex_x = 'x'.charCodeAt(0), lex_dot = '.'.charCodeAt(0), lex_zero = '0'.charCodeAt(0), lex_postfix_unary = hash('++ --'), lex_ident = lex_table('$_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'), Parse data. The lexer and parser aren't entirely separate, nor can they be considering the complexity of Javascript's grammar. The lexer ends up grouping parens and identifying block constructs such as 'if', 'for', 'while', and 'with'. The parser then folds operators and ends by folding these block-level constructs. parse_reduce_order = map(hash, ['function', '( [ . [] ()', 'new delete', 'u++ u-- ++ -- typeof u~ u! u+ u-', '* / %', '+ -', '<< >> >>>', '< > <= >= instanceof in', '== != === !==', '&', '^', '|', '&&', '||', 'case', '?', '= += -= *= /= %= &= |= ^= <<= >>= >>>=', ':', ',', 'return throw break continue void', 'var const', 'if else try catch finally for switch with while do', ';']), parse_associates_right = hash('= += -= *= /= %= &= ^= |= <<= >>= >>>= ~ ! new typeof u+ u- -- ++ u-- u++ ? if else function try catch finally for switch case with while do'), parse_inverse_order = (function (xs) {for (var o = {}, i = 0, l = xs.length; i < l; ++i) for (var k in xs[i]) has(xs[i], k) && (o[k] = i); return annotate_keys(o)})(parse_reduce_order), parse_index_forward = (function (rs) {for (var xs = [], i = 0, l = rs.length, _ = null; _ = rs[i], xs[i] = true, i < l; ++i) for (var k in _) if (has(_, k) && (xs[i] = xs[i] && ! has(parse_associates_right, k))) break; return xs})(parse_reduce_order), parse_lr = hash('[] . () * / % + - << >> >>> < > <= >= instanceof in == != === !== & ^ | && || = += -= *= /= %= &= |= ^= <<= >>= >>>= , : ;'), parse_r_until_block = annotate_keys({'function':2, 'if':1, 'do':1, 'catch':1, 'try':1, 'for':1, 'while':1, 'with':1, 'switch':1}), parse_accepts = annotate_keys({'if':'else', 'do':'while', 'catch':'finally', 'try':'catch'}), parse_invocation = hash('[] ()'), parse_r_optional = hash('return throw break continue else'), parse_r = hash('u+ u- u! u~ u++ u-- new typeof finally case var const void delete'), parse_block = hash('; {'), parse_invisible = hash('i;'), parse_l = hash('++ --'), parse_group = annotate_keys({'(':')', '[':']', '{':'}', '?':':'}), parse_ambiguous_group = hash('[ ('), parse_ternary = hash('?'), parse_not_a_value = hash('function if for while catch'), parse_also_expression = hash('function'); __ meta::sdoc('js::core/caterwaul.precompile-support', <<'__'); Precompilation support. This makes caterwaul precompilation-aware ahead of time. I'm doing this so that you can precompile caterwaul itself, which used to be responsible for a fair amount of loading time. var precompiled_internal_table = {}; caterwaul_global.precompiled_internal = function (f) {var k = gensym(); return precompiled_internal_table[k] = f, k}; caterwaul_global.is_precompiled = function (f) {return f.constructor === String && precompiled_internal_table[f]}; __ meta::sdoc('js::core/caterwaul.tree', <<'__'); Syntax data structures. There are two data structures used for syntax trees. At first, paren-groups are linked into doubly-linked lists, described below. These are then folded into immutable array-based specific nodes. At the end of folding there is only one child per paren-group. Doubly-linked paren-group lists. When the token stream is grouped into paren groups it has a hierarchical linked structure that conceptually has these pointers: | +--------+ +------ | node | ------+ | +-> | | <--+ | first | | +--------+ | | last | | parent parent | | V | | V +--------+ +--------+ | node | --- r --> | node | --- r ---/ /--- l --- | | <-- l --- | | +--------+ +--------+ The primary operation performed on this tree, at least initially, is repeated folding. So we have a chain of linear nodes, and one by one certain nodes fold their siblings underneath them, breaking the children's links and linking instead to the siblings' neighbors. For example, if we fold node (3) as a binary operator: | (1) <-> (2) <-> (3) <-> (4) <-> (5) (1) <--> (3) <--> (5) / \ / \ / \ / \ / \ --> / \ / \ / \ / \ (2) (4) <- No link between children / \ / \ (see 'Fold nodes', below) Fold nodes. Once a node has been folded (e.g. (3) in the diagram above), none of its children will change and it will gain no more children. The fact that none of its children will change can be shown inductively: suppose you've decided to fold the '+' in 'x + y' (here x and y are arbitrary expressions). This means that x and y are comprised of higher-precedence operators. Since there is no second pass back to high-precedence operators, x and y will not change nor will they interact with one another. The fact that a folded node never gains more children arrives from the fact that it is folded only once; this is by virtue of folding by index instead of by tree structure. (Though a good tree traversal algorithm also wouldn't hit the same node twice -- it's just less obvious when the tree is changing.) Anyway, the important thing about fold nodes is that their children don't change. This means that an array is a completely reasonable data structure to use for the children; it certainly makes the structure simpler. It also means that the only new links that must be added to nodes as they are folded are links to new children (via the array), and links to the new siblings. Once we have the array-form of fold nodes, we can build a query interface similar to jQuery, but designed for syntactic traversal. This will make routine operations such as macro transformation and quasiquoting far simpler later on. Both grouping and fold nodes are represented by the same data structure. In the case of grouping, the 'first' pointer is encoded as [0] -- that is, the first array element. It doesn't contain pointers to siblings of [0]; these are still accessed by their 'l' and 'r' pointers. As the structure is folded, the number of children of each paren group should be reduced to just one. At this point the remaining element's 'l' and 'r' pointers will both be null, which means that it is in hierarchical form instead of linked form. After the tree has been fully generated and we have the root node, we have no further use for the parent pointers. This means that we can use subtree sharing to save memory. Once we're past the fold stage, push() should be used instead of append(). append() works in a bidirectionally-linked tree context (much like the HTML DOM), whereas push() works like it does for arrays (i.e. no parent pointer). Syntax node functions. These functions are common to various pieces of syntax nodes. Not all of them will always make sense, but the prototypes of the constructors can be modified independently later on if it turns out to be an issue. var syntax_common = caterwaul_global.syntax_common = { Mutability. These functions let you modify nodes in-place. They're used during syntax folding and shouldn't really be used after that (hence the underscores). _replace: function (n) {return (n.l = this.l) && (this.l.r = n), (n.r = this.r) && (this.r.l = n), this}, _append_to: function (n) {return n && n._append(this), this}, _reparent: function (n) {return this.p && this.p[0] === this && (this.p[0] = n), this}, _fold_l: function (n) {return this._append(this.l && this.l._unlink(this) || empty)}, _append: function (n) {return (this[this.length++] = n) && (n.p = this), this}, _fold_r: function (n) {return this._append(this.r && this.r._unlink(this) || empty)}, _sibling: function (n) {return n.p = this.p, (this.r = n).l = this}, _fold_lr: function () {return this._fold_l()._fold_r()}, _fold_rr: function () {return this._fold_r()._fold_r()}, _wrap: function (n) {return n.p = this._replace(n).p, this._reparent(n), delete this.l, delete this.r, this._append_to(n)}, _unlink: function (n) {return this.l && (this.l.r = this.r), this.r && (this.r.l = this.l), delete this.l, delete this.r, this._reparent(n)}, These methods are OK for use after the syntax folding stage is over (though because syntax nodes are shared it's generally dangerous to go modifying them): pop: function () {return --this.length, this}, push: function (x) {return this[this.length++] = x || empty, this}, Identification. You can request that a syntax node identify itself, in which case it will give you an identifier if it hasn't already. The identity is not determined until the first time it is requested, and after that it is stable. As of Caterwaul 0.7.0 the mechanism works differently (i.e. isn't borked) in that it replaces the prototype definition with an instance-specific closure the first time it gets called. This may reduce the number of decisions in the case that the node's ID has already been computed. id: function () {var id = gensym(); return (this.id = function () {return id})()}, Traversal functions. each() is the usual side-effecting shallow traversal that returns 'this'. map() distributes a function over a node's children and returns the array of results, also as usual. Two variants, reach and rmap, perform the process recursively. reach is non-consing; it returns the original as a reference. rmap, on the other hand, follows some rules to cons a new tree. If the function passed to rmap() returns the node verbatim then its children are traversed. If it returns a distinct node, however, then traversal doesn't descend into the children of the newly returned tree but rather continues as if the original node had been a leaf. For example: | parent Let's suppose that a function f() has these mappings: / \ node1 node2 f(parent) = parent f(node1) = q / \ | f(node2) = node2 c1 c2 c3 In this example, f() would be called on parent, node1, node2, and c3 in that order. c1 and c2 are omitted because node1 was replaced by q -- and there is hardly any point in going through the replaced node's previous children. (Nor is there much point in forcibly iterating over the new node's children, since presumably they are already processed.) If a mapping function returns something falsy, it will have exactly the same effect as returning the node without modification. Using the old s() to do gensym-safe replacement requires that you invoke it only once, and this means that for complex macroexpansion you'll have a long array of values. This isn't ideal, so syntax trees provide a replace() function that handles replacement more gracefully: | qs[(foo(_foo), _before_bar + bar(_bar))].replace({_foo: qs[x], _before_bar: qs[3 + 5], _bar: qs[foo.bar]}) each: function (f) {for (var i = 0, l = this.length; i < l; ++i) f(this[i], i); return this}, map: function (f) {for (var n = new this.constructor(this), i = 0, l = this.length; i < l; ++i) n.push(f(this[i], i) || this[i]); return n}, reach: function (f) {f(this); this.each(function (n) {n.reach(f)}); return this}, rmap: function (f) {var r = f(this); return ! r || r === this ? this.map(function (n) {return n.rmap(f)}) : r.rmap === undefined ? new this.constructor(r) : r}, clone: function () {return this.rmap(function () {return false})}, collect: function (p) {var ns = []; this.reach(function (n) {p(n) && ns.push(n)}); return ns}, replace: function (rs) {return this.rmap(function (n) {if (! own.call(rs, n.data)) return n; var replacement = rs[n.data]; return replacement && replacement.constructor === String ? new n.constructor(replacement, Array.prototype.slice.call(n)) : replacement})}, Alteration. These functions let you make "changes" to a node by returning a modified copy. repopulated_with: function (xs) {return new this.constructor(this.data, xs)}, change: function (i, x) {return se(new this.constructor(this.data, Array.prototype.slice.call(this)), function (n) {n[i] = x})}, compose_single: function (i, f) {return this.change(i, f(this[i]))}, General-purpose traversal. This is a SAX-style traversal model, useful for analytical or scope-oriented tree traversal. You specify a callback function that is invoked in pre-post-order on the tree (you get events for entering and exiting each node, including leaves). Each time a node is entered, the callback is invoked with an object of the form {entering: node}, where 'node' is the syntax node being entered. Each time a node is left, the callback is invoked with an object of the form {exiting: node}. The return value of the function is not used. Any null nodes are not traversed, since they would fail any standard truthiness tests for 'entering' or 'exiting'. I used to have a method to perform scope-annotated traversal, but I removed it for two reasons. First, I had no use for it (and no tests, so I had no reason to believe that it worked). Second, Caterwaul is too low-level to need such a method. That would be more appropriate for an analysis extension. traverse: function (f) {f({entering: this}); f({exiting: this.each(function (n) {n.traverse(f)})}); return this}, Structural transformation. Having nested syntax trees can be troublesome. For example, suppose you're writing a macro that needs a comma-separated list of terms. It's a lot of work to dig through the comma nodes, each of which is binary. Javascript is better suited to using a single comma node with an arbitrary number of children. (This also helps with the syntax tree API -- we can use .map() and .each() much more effectively.) Any binary operator can be transformed this way, and that is exactly what the flatten() method does. (flatten() returns a new tree; it doesn't modify the original.) The tree flattening operation looks like this for a left-associative binary operator: | (+) / \ (+) (+) z -> / | \ / \ x y z x y This flatten() method returns the nodes along the chain of associativity, always from left to right. It is shallow, since generally you only need a localized flat tree. That is, it doesn't descend into the nodes beyond the one specified by the flatten() call. It takes an optional parameter indicating the operator to flatten over; if the operator in the tree differs, then the original node is wrapped in a unary node of the specified operator. The transformation looks like this: | (,) (+) | / \ .flatten(',') -> (+) x y / \ x y Because ',' is a binary operator, a ',' tree with just one operand will be serialized exactly as its lone operand would be. This means that plurality over a binary operator such as comma or semicolon degrades gracefully for the unary case (this sentence makes more sense in the context of macro definitions; see in particular 'let' and 'where' in std.bind). The unflatten() method performs the inverse transformation. It doesn't delete a converted unary operator in the tree case, but if called on a node with more than two children it will nest according to associativity. flatten: function (d) {d = d || this.data; return d !== this.data ? this.as(d) : ! (has(parse_lr, d) && this.length) ? this : has(parse_associates_right, d) ? se(new this.constructor(d), bind(function (n) {for (var i = this; i && i.data === d; i = i[1]) n.push(i[0]); n.push(i)}, this)) : se(new this.constructor(d), bind(function (n) {for (var i = this, ns = []; i.data === d; i = i[0]) i[1] && ns.push(i[1]); ns.push(i); for (i = ns.length - 1; i >= 0; --i) n.push(ns[i])}, this))}, unflatten: function () {var t = this, right = has(parse_associates_right, this.data); return this.length <= 2 ? this : se(new this.constructor(this.data), function (n) { if (right) for (var i = 0, l = t.length - 1; i < l; ++i) n = n.push(t[i]).push(i < l - 2 ? new t.constructor(t.data) : t[i])[1]; else for (var i = t.length - 1; i >= 1; --i) n = n.push(i > 1 ? new t.constructor(t.data) : t[0]).push(t[i])[0]})}, Wrapping. Sometimes you want your syntax tree to have a particular operator, and if it doesn't have that operator you want to wrap it in a node that does. Perhaps the most common case of this is when you have a possibly-plural node representing a variable or expression -- often the case when you're dealing with argument lists -- and you want to be able to assume that it's wrapped in a comma node. Calling node.as(',') will return the node if it's a comma, and will return a new comma node containing the original one if it isn't. as: function (d) {return this.data === d ? this : new this.constructor(d).push(this)}, Type detection and retrieval. These methods are used to detect the literal type of a node and to extract that value if it exists. You should use the as_x methods only once you know that the node does represent an x; otherwise you will get misleading results. (For example, calling as_boolean on a non-boolean will always return false.) Other methods are provided to tell you higher-level things about what this node does. For example, is_contextualized_invocation() tells you whether the node represents a call that can't be eta-reduced (if it were, then the 'this' binding would be lost). Wildcards are used for pattern matching and are identified by beginning with an underscore. This is a very frequently-called method, so I'm using a very inexpensive numeric check rather than a string comparison. The ASCII value for underscore is 95. is_string: function () {return /['"]/.test(this.data.charAt(0))}, as_escaped_string: function () {return this.data.substr(1, this.data.length - 2)}, is_number: function () {return /^-?(0x|\d|\.\d+)/.test(this.data)}, as_number: function () {return Number(this.data)}, is_boolean: function () {return this.data === 'true' || this.data === 'false'}, as_boolean: function () {return this.data === 'true'}, is_regexp: function () {return /^\/./.test(this.data)}, as_escaped_regexp: function () {return this.data.substring(1, this.data.lastIndexOf('/'))}, is_wildcard: function () {return this.data.charCodeAt(0) === 95}, has_grouped_block: function () {return has(parse_r_until_block, this.data)}, is_block: function () {return has(parse_block, this.data)}, is_blockless_keyword: function () {return has(parse_r_optional, this.data)}, is_null_or_undefined: function () {return this.data === 'null' || this.data === 'undefined'}, is_constant: function () {return this.is_number() || this.is_string() || this.is_boolean() || this.is_regexp() || this.is_null_or_undefined()}, left_is_lvalue: function () {return /=$/.test(this.data) || /\+\+$/.test(this.data) || /--$/.test(this.data)}, is_empty: function () {return !this.length}, has_parameter_list: function () {return this.data === 'function' || this.data === 'catch'}, has_lvalue_list: function () {return this.data === 'var' || this.data === 'const'}, is_dereference: function () {return this.data === '.' || this.data === '[]'}, is_invocation: function () {return this.data === '()'}, is_contextualized_invocation: function () {return this.is_invocation() && this[0].is_dereference()}, is_invisible: function () {return has(parse_invisible, this.data)}, is_binary_operator: function () {return has(parse_lr, this.data)}, is_prefix_unary_operator: function () {return has(parse_r, this.data)}, is_postfix_unary_operator: function () {return has(parse_l, this.data)}, is_unary_operator: function () {return this.is_prefix_unary_operator() || this.is_postfix_unary_operator()}, accepts: function (e) {return has(parse_accepts, this.data) && parse_accepts[this.data] === (e.data || e)}, Value construction. Syntax nodes sometimes represent hard references to values instead of just syntax. (See 'References' for more information.) In order to compile a syntax tree in the right environment you need a mapping of symbols to these references, which is what the bindings() method returns. (It also collects references for all descendant nodes.) It takes an optional argument to populate, in case you already had a hash set aside for bindings -- though it always returns the hash. A bug in Caterwaul 0.5 and earlier failed to bind falsy values. This is no longer the case; nodes which bind values should indicate that they do so by setting a binds_a_value attribute (ref nodes do this on the prototype), indicating that their value should be read from the 'value' property. (This allows other uses of a 'value' property while making it unambiguous whether a particular node intends to bind something.) bindings: function (hash) {var result = hash || {}; this.reach(function (n) {if (n.binds_a_value) result[n.data] = n.value}); return result}, Matching. Any syntax tree can act as a matching pattern to destructure another one. It's often much more fun to do things this way than it is to try to pick it apart by hand. For example, suppose you wanted to determine whether a node represents a function that immediately returns, and to know what it returns. The simplest way to do it is like this: | var tree = ... var match = caterwaul.parse('function (_) {return _value}').match(tree); if (match) { var value = match._value; ... } The second parameter 'variables' stores a running total of match data. You don't provide this; match() creates it for you on the toplevel invocation. The entire original tree is available as a match variable called '_'; for example: t.match(u)._ === u if u matches t. match: function (target, variables) {target = caterwaul_global.ensure_syntax(target); variables || (variables = {_: target}); if (this.is_wildcard()) return variables[this.data] = target, variables; else if (this.length === target.length && this.data === target.data) {for (var i = 0, l = this.length; i < l; ++i) if (! this[i].match(target[i], variables)) return null; return variables}}, Inspection and syntactic serialization. Syntax nodes can be both inspected (producing a Lisp-like structural representation) and serialized (producing valid Javascript code). Each representation captures stray links via the 'r' pointer. In the serialized representation, it is shown as a comment /* -> */ containing the serialization of whatever is to the right. This has the property that it will break tests but won't necessarily break code (though if it happens in the field then it's certainly a bug). Block detection is required for multi-level if/else statements. Consider this code: | if (foo) for (...) {} else bif; A naive approach (the one I was using before version 0.6) would miss the fact that the 'for' was trailed by a block, and insert a spurious semicolon, which would break compilation: | if (foo) for (...) {}; // <- note! else bif; What we do instead is dig through the tree and find out whether the last thing in the 'if' case ends with a block. If so, then no semicolon is inserted; otherwise we insert one. This algorithm makes serialization technically O(n^2), but nobody nests if/else blocks to such an extent that it would matter. ends_with_block: function () {var block = this[parse_r_until_block[this.data]]; return this.data === '{' || has(parse_r_until_block, this.data) && (this.data !== 'function' || this.length === 3) && block && block.ends_with_block()}, There's a hack here for single-statement if-else statements. (See 'Grab-until-block behavior' in the parsing code below.) Basically, for various reasons the syntax tree won't munch the semicolon and connect it to the expression, so we insert one automatically whenever the second node in an if, else, while, etc. isn't a block. Update for Caterwaul 0.6.6: I had removed mandatory spacing for unary prefix operators, but now it's back. The reason is to help out the host Javascript lexer, which can misinterpret postfix increment/decrement: x + +y will be serialized as x++y, which is invalid Javascript. The fix is to introduce a space in front of the second plus: x+ +y, which is unambiguous. Update for caterwaul 1.0: The serialize() method is now aggressively optimized for common cases. It also uses a flattened array-based concatenation strategy rather than the deeply nested approach from before. Optimized serialization cases. We can tell a lot about how to serialize a node based on just a few properties. For example, if the node has zero length then its serialization is simply its data. This is the leaf case, which is likely to be half of the total number of nodes in the whole syntax tree. If a node has length 1, then we assume a prefix operator unless we identify it as postfix. Otherwise we break it down by the kind of operator that it is. Nodes might be flattened, so we can't assume any upper bound on the arity regardless of what kind of operator it is. Realistically you shouldn't hand flattened nodes over to the compile() function, but it isn't the end of the world if you do. toString: function () {var xs = ['']; this.serialize(xs); return xs.join('')}, serialize: function (xs) {var l = this.length, d = this.data, semi = ';\n', push = function (x) {if (lex_ident[xs[xs.length - 1].charCodeAt(0)] === lex_ident[x.charCodeAt(0)]) xs.push(' ', x); else xs.push(x)}; switch (l) {case 0: if (has(parse_r_optional, d)) return push(d.replace(/^u/, '')); else if (has(parse_group, d)) return push(d), push(parse_group[d]); else return push(d); case 1: if (has(parse_r, d) || has(parse_r_optional, d)) return push(d.replace(/^u/, '')), this[0].serialize(xs); else if (has(parse_group, d)) return push(d), this[0].serialize(xs), push(parse_group[d]); else if (has(parse_lr, d)) return push('/* unary ' + d + ' node */'), this[0].serialize(xs); else return this[0].serialize(xs), push(d); case 2: if (has(parse_invocation, d)) return this[0].serialize(xs), push(d.charAt(0)), this[1].serialize(xs), push(d.charAt(1)); else if (has(parse_r_until_block, d)) return push(d), this[0].serialize(xs), this[1].serialize(xs); else if (has(parse_invisible, d)) return this[0].serialize(xs), this[1].serialize(xs); else if (d === ';') return this[0].serialize(xs), push(semi), this[1].serialize(xs); else return this[0].serialize(xs), push(d), this[1].serialize(xs); default: if (has(parse_ternary, d)) return this[0].serialize(xs), push(d), this[1].serialize(xs), push(':'), this[2].serialize(xs); else if (has(parse_r_until_block, d)) return this.accepts(this[2]) && ! this[1].ends_with_block() ? (push(d), this[0].serialize(xs), this[1].serialize(xs), push(semi), this[2].serialize(xs)) : (push(d), this[0].serialize(xs), this[1].serialize(xs), this[2].serialize(xs)); else return this.unflatten().serialize(xs)}}}; References. You can drop references into code that you're compiling. This is basically variable closure, but a bit more fun. For example: | caterwaul.compile(qs[fn_[_ + 1]].replace({_: caterwaul.ref(3)}))() // -> 4 What actually happens is that caterwaul.compile runs through the code replacing refs with gensyms, and the function is evaluated in a scope where those gensyms are bound to the values they represent. This gives you the ability to use a ref even as an lvalue, since it's really just a variable. References are always leaves on the syntax tree, so the prototype has a length of 0. caterwaul_global.ref = function (value) {if (value instanceof this.constructor) this.value = value.value, this.data = value.data; else this.value = value, this.data = gensym()}; merge(caterwaul_global.ref.prototype, syntax_common, {binds_a_value: true, length: 0}); Syntax node constructor. Here's where we combine all of the pieces above into a single function with a large prototype. Note that the 'data' property is converted from a variety of types; so far we support strings, numbers, and booleans. Any of these can be added as children. Also, I'm using an instanceof check rather than (.constructor ===) to allow array subclasses such as Caterwaul finite sequences to be used. caterwaul_global.syntax = function (data) {if (data instanceof this.constructor) this.data = data.data, this.length = 0; else {this.data = data && data.toString(); this.length = 0; for (var i = 1, l = arguments.length, _; _ = arguments[i], i < l; ++i) for (var j = 0, lj = _.length, it, c; _ instanceof Array ? (it = _[j], j < lj) : (it = _, ! j); ++j) this._append((c = it.constructor) === String || c === Number || c === Boolean ? new this.constructor(it) : it)}}; merge(caterwaul_global.syntax.prototype, syntax_common); var empty = caterwaul_global.empty = new caterwaul_global.syntax(''); __ meta::sdoc('js::core/caterwaul.utilities', <<'__'); Utility methods. Gensym is used to support qs[]. When we quote syntax, what we really intend to do is grab a syntax tree representing something; this entails creating a let-binding with the already-evaluated tree. (Note: Don't go and modify these qs[]-generated trees; you only get one for each qs[].) The ultimate code ends up looking like this (see 'Environment-dependent compilation' some distance below): | (function (a_gensym) { var v1 = a_gensym.gensym_1; var v2 = a_gensym.gensym_2; ... return ; }) ({gensym_1: v1, gensym_2: v2, ..., gensym_n: vn}); A note about gensym uniqueness. Gensyms are astronomically unlikely to collide, but there are some compromises made to make sure of this. First, gensyms are not predictable; the first one is randomized. This means that if you do have a collision, it may be intermittent (and that is probably a non-feature). Second, and this is a good thing, you can load Caterwaul multiple times without worrying about gensyms colliding between them. Each instance of Caterwaul uses its own system time and random number to seed the gensym generation, and the system time remains stable while the random number gets incremented. It is very unlikely that any collisions would happen. Bind() is the usual 'bind this function to some value' function. The only difference is that it supports rebinding; that is, if you have a function you've already bound to X, you can call bind on that function and some new value Y and get the original function bound to Y. The bound function has two attributes, 'original' and 'binding', that let bind() achieve this rebinding. Map() is an array map function, fairly standard really. I include it because IE doesn't provide Array.prototype.map. hash() takes a string, splits it on whitespace, and returns an object that maps each element to true. It's useful for defining sets. extend() takes a constructor function and zero or more extension objects, merging each extension object into the constructor function's prototype. The constructor function is then returned. It's a shorthand for defining classes. Se() stands for 'side-effect', and its purpose is to take a value and a function, pass the value into the function, and return either whatever the function returned or the value you gave it. It's used to initialize things statefully; for example: | return se(function () {return 5}, function (f) { f.sourceCode = 'return 5'; }); var qw = function (x) {return x.split(/\s+/)}, se = function (x, f) {return f && f.call(x, x) || x}, fail = function (m) {throw new Error(m)}, genval = (function (n, m, u) {return function () {return [u, n, ++m]}})(+new Date(), Math.random() * (1 << 30) >>> 0, unique()), gensym = function () {var v = genval(); return ['gensym', v[0].toString(36), v[1].toString(36), v[2].toString(36)].join('_')}, flatten = function () {for (var r = [], i = 0, l = arguments.length, x; i < l; ++i) (x = arguments[i]) instanceof Array ? r = r.concat(flatten.apply(this, x)) : r.push(x); return r}, map = function (f, xs) {for (var i = 0, ys = [], l = xs.length; i < l; ++i) ys.push(f(xs[i], i)); return ys}, rmap = function (f, xs) {return map(function (x) {return x instanceof Array ? rmap(f, x) : f(x)})}, hash = function (s) {for (var i = 0, xs = qw(s), o = {}, l = xs.length; i < l; ++i) o[xs[i]] = true; return annotate_keys(o)}, merge = function (o) {for (var i = 1, l = arguments.length, _; i < l; ++i) if (_ = arguments[i]) for (var k in _) has(_, k) && (o[k] = _[k]); return o}, Optimizations. The parser and lexer each assume valid input and do no validation. This is possible because any function passed in to caterwaul will already have been parsed by the Javascript interpreter; syntax errors would have caused an error there. This enables a bunch of optimization opportunities in the parser, ultimately making it not in any way recursive and requiring only three linear-time passes over the token stream. (An approximate figure; it actually does about 19 fractional passes, but not all nodes are reached.) Also, I'm not confident that all Javascript interpreters are smart about hash indexing. Particularly, suppose a hashtable has 10 entries, the longest of whose keys is 5 characters. If we throw a 2K string at it, it might very well hash that whole thing just to find that, surprise, the entry doesn't exist. That's a big performance hit if it happens very often. To prevent this kind of thing, I'm keeping track of the longest string in the hashtable by using the 'annotate_keys' function. 'has()' knows how to look up the maximum length of a hashtable to verify that the candidate is in it, resulting in the key lookup being only O(n) in the longest key (generally this ends up being nearly O(1), since I don't like to type long keys), and average-case O(1) regardless of the length of the candidate. As of Caterwaul 0.7.0 the _max_length property has been replaced by a gensym. This basically guarantees uniqueness, so the various hacks associated with working around the existence of the special _max_length key are no longer necessary. max_length_key = gensym(), annotate_keys = function (o) {var max = 0; for (var k in o) own.call(o, k) && (max = k.length > max ? k.length : max); o[max_length_key] = max; return o}, has = function (o, p) {return p != null && ! (p.length > o[max_length_key]) && own.call(o, p)}, own = Object.prototype.hasOwnProperty; __ meta::sdoc('js::core/debug/profile.start', <<'__'); Profiling extensions for performance tuning | Spencer Tipping Licensed under the terms of the MIT source code license This is the code used to start profiling. You'll also need to include js::core/debug/profile.stop to print the results. var all_profiled_functions = {}; var recursion = {}; Function.prototype.profiled = function (name, no_recursion) { var f = this; recursion[name] || (recursion[name] = no_recursion ? NaN : 0); return function () { var k = name + ' (' + ++recursion[name] + ')'; var data = all_profiled_functions[k] || (all_profiled_functions[k] = {invocations: 0, total: 0, max: 0}); var t1 = +new Date(); var result = f.apply(this, arguments); var time = +new Date() - t1; ++data.invocations; data.total += time; if (recursion[name] > 1) all_profiled_functions[name + ' (' + (recursion[name] - 1) + ')'].total -= time; time > data.max && (data.max = time); --recursion[name]; return result; }; }; __ meta::sdoc('js::core/debug/profile.stop', <<'__'); Profile end code var fs = []; for (var k in all_profiled_functions) all_profiled_functions[k].average = (all_profiled_functions[k].total / all_profiled_functions[k].invocations).toFixed(3), all_profiled_functions[k].name = k, fs.push(all_profiled_functions[k]); fs.sort(function (x, y) {return x.total - y.total}); typeof console === 'undefined' ? print(fs) : console.log(fs); __ meta::sdoc('js::extensions/dev', <<'__'); Caterwaul development extensions | Spencer Tipping Licensed under the terms of the MIT source code license Process extensions. These apply to the development process somehow. Precompilation is here because it's something that you'd do at dev-time (pre-deploy) but not something that you have to include in the standard library. (The core caterwaul support for precompilation is rudimentary and very small.) - pinclude pp::js::extensions/dev/precompile Development tooling. These assist the development process. Tracing is useful for finding bugs (you normally wouldn't use it in production code), and unit testing has obvious benefits. - pinclude pp::js::extensions/dev/trace - pinclude pp::js::extensions/dev/unit __ meta::sdoc('js::extensions/dev/precompile', <<'__'); Caterwaul precompiler | Spencer Tipping Licensed under the terms of the MIT source code license Precompilation logic. Even though Caterwaul operates as a runtime library, most of the time it will be used in a fairly static context. Precompilation can be done to bypass parsing, macroexpansion, and serialization of certain functions, significantly accelerating Caterwaul's loading speed. caterwaul.js_base()(function ($) { Precompiled output format. The goal of precompilation is to produce code whose behavior is identical to the original. Caterwaul can do this by taking a function whose behavior we want to emulate. It then executes the function with an annotated copy of the caterwaul compiler, tracing calls to compile(). It assumes, incorrectly in pathological cases, that the macroexpansion step does not side-effect against caterwaul or other escaping values. If it does, the precompiled code won't reflect those side-effects. (Though local side-effects, like defmacro[], will apply within the scope of the function being compiled -- as they normally would, in other words.) The output function performs side-effects necessary to emulate the behavior of your macroexpanded code. All other behavior performed by the precompiled function will be identical to the original. Here's an example of how it is used: | var f = caterwaul.precompile(function () { alert('hi'); caterwaul.js.base(function () { console.log(x /given.y, qs[foo]); })(); return 10; }); After this statement, f.toString() will look something like this (except all mashed together, because Caterwaul doesn't format generated code): | function () { alert('hi'); caterwaul.js.base(caterwaul.precompiled_internal((function () { var gensym_1 = new caterwaul.syntax('foo'); return function () { console.log(function (y) {return x}, gensym_1); })()))(); return 10; } The precompiled_internal() function returns a reference that will inform Caterwaul not to operate on the function in question. You should (almost) never use this method! It will break all kinds of stuff if you artificially mark functions as being precompiled when they are not. There are some very important things to keep in mind when precompiling things: | 1. Precompiling a function executes that function at compile time! This has some important consequences, perhaps most importantly that if you do something global, you could bork your precompiling environment. The other important consequence is that if some code paths aren't run, those paths won't be precompiled. Caterwaul can only precompile paths that it has traced. 2. Precompilation doesn't macroexpand the function being precompiled, even if the caterwaul function performing the precompilation has macros defined. 3. Most syntax tree refs can't be precompiled! If Caterwaul bumps into one it will throw an error. The only refs that it knows how to handle are (1) itself, and (2) references to syntax trees that don't contain other refs. If you want it to handle other refs, you'll need to write a macro that transforms them into something else before the precompiler sees them. (Actually, the standard library contains a fair amount of this kind of thing to avoid this very problem. Instead of using refs to generated values, it installs values onto the caterwaul global and generates indirect references to them.) 4. Caterwaul assumes that compilation is completely deterministic. Any nondeterminism won't be reflected. This generally isn't a problem, it just means that your code may have un-precompiled segments if the precompilation test run didn't cover all of those cases. For most code these concerns won't be a problem at all. But if you're doing anything especially dynamic you might run into one of them. Silliness of runtime precompilation. Obviously it doesn't do much good to precompile stuff at runtime -- the point of precompilation is to save time, but it's too late if the code is already running on an end-user system. Fortunately, precompilation is separable: | // Inside the precompiler: var f = caterwaul.precompile(first_code_chunk); var g = caterwaul.precompile(second_code_chunk); // Later, in end-user code: f(); g(); As a result, individual Javascript files can be precompiled separately, loaded separately, and run in their original order to perform their original behavior (minus pathological caveats above). $.precompile(f) = this.compile(remove_gensyms(traced.references, perform_substitution(traced.references, traced.annotated))) -where[traced = trace_execution(this, f)] -where[ Tracing function destinations. This is more subtle than you might think. We need to construct a custom traced caterwaul function to pass into the function being precompiled. This caterwaul function delegates macroexpansion to the original one but lets us know when anything is compiled. When a parse() call happens, we'll have a reference to the function being parsed. We can identify which function it came from (in the original syntax tree that is) by marking each of the initial functions with a unique gensym on the end of the parameter list: | function (x, y, z, gensym_foo_bar_bif) {...} This serves as a no-op that lets us track the function from its original parse tree into its final compiled state. Next the function may be macroexpanded. If so, we make sure the gensym tag is on the macroexpanded output (if the output of macroexpansion isn't a function, then it's a side-effect and we can't track it). Finally, the function will be compiled within some environment. This is where we go through the compilation bindings, serializing each one with the function. We then wrap this in an immediately-invoked anonymous function (to create a new scope and to simulate the one created by compile()), and this becomes the output. Note that for these patterns we need to use parse() because Spidermonkey optimizes away non-side-effectful function bodies. nontrivial_function_pattern = $.parse('function (_args) {_body}'), trivial_function_pattern = $.parse('function () {_body}'), nontrivial_function_gensym_template = $.parse('function (_args, _gensym) {_body}'), trivial_function_gensym_template = $.parse('function (_gensym) {_body}'), nontrivial_gensym_detection_pattern = nontrivial_function_gensym_template, trivial_gensym_detection_pattern = trivial_function_gensym_template, annotate_macro_generator(template)(references)(match) = result -effect[references[s] = {tree: result}] -where[s = $.gensym(), result = template.replace({_args: match._args, _gensym: s, _body: annotate_functions_in(match._body, references)})], mark_nontrivial_function_macro = annotate_macro_generator(nontrivial_function_gensym_template), mark_trivial_function_macro = annotate_macro_generator(trivial_function_gensym_template), Macroexpansion for function origins. The function annotation is done by a macro that matches against each embedded function. Only one level of precompilation is applied; if you have invocations of caterwaul from inside transformed functions, these sub-functions won't be identified and thus won't be precompiled. (It's actually impossible to precompile them in the general case since we don't ultimately know what part of the code they came from.) Note that the ordering of trivial and nontrivial cases here is important. Later macros take precedence over earlier ones, so we use the most specific case last and let it fall back to the more generic case. annotate_functions_in(tree, references) = $.macroexpand(tree, $.macro(trivial_function_pattern, mark_trivial_function_macro(references)), $.macro(nontrivial_function_pattern, mark_nontrivial_function_macro(references))), Also, an interesting failure case has to do with duplicate compilation: | var f = function () {...}; caterwaul.tconfiguration('std', 'foo', f); caterwaul.tconfiguration('macro', 'bar', f); In this example, f() will be compiled twice under two different configurations. But because the replacement happens against the original function (!) due to lack of flow analysis, we won't be able to substitute just one new function for the old one. In this case an error is thrown (see below). Compilation wrapper. Functions that get passed into compile() are assumed to be fully macroexpanded. If the function contains a gensym marker that we're familiar with, then we register the compiled function as the final form of the original. Once the to-be-compiled function returns, we'll have a complete table of marked functions to be converted. We can then do a final pass over the original source, replacing the un-compiled functions with compiled ones. function_key(tree) = matches._gensym.data -when.matches -where[matches = nontrivial_gensym_detection_pattern.match(tree) || trivial_gensym_detection_pattern .match(tree)], mark_as_compiled(references, k, tree, environment) = references[k] -effect- wobbly[new Error('detected multiple compilations of #{references[k].tree.serialize()}')] /when[references[k].compiled] -effect[references[k].compiled = tree, references[k].environment = environment] -when[k && references[k]], wrapped_compile(original, references)(tree, environment) = original.call(this, tree, environment) -effect- mark_as_compiled(references, function_key(tree), tree, $.merge({}, this._environment || {}, environment)), Generating compiled functions. This involves a few steps, including (1) signaling to the caterwaul function that the function is precompiled and (2) reconstructing the list of syntax refs. Already-compiled signaling. We don't necessarily know /why/ a particular function is being compiled, so it's beyond the scope of this module to try to produce a method call that bypasses this step. Rather, we just inform caterwaul that a function is going to be compiled ahead-of-time, and all caterwaul functions will bypass the compilation step automatically. To do this, we use the dangerous precompiled_internal() method, which returns a placeholder. signal_already_compiled(tree) = qs[caterwaul.precompiled_internal(_x)].replace({_x: tree}), Syntax ref serialization. This is the trickiest part. We have to identify ref nodes whose values we're familiar with and pull them out into their own gensym variables. We then create an anonymous scope for them, along with the compiled function, to simulate the closure capture performed by the compile() function. closure_template = $.parse('(function () {_vars; return (_value)}).call(this)'), closure_variable_template = $.parse('var _var = _value'), closure_null_template = $.parse('null'), escape_string(s) = '\'' + s.replace(/\\/g, '\\\\').replace(/\n/g, '\\n').replace(/'/g, '\\\'') + '\'', Detecting serializable values. Because it's so trivial to handle falsy things (they're all primitives), I've included that case here. Also, the pre-1.0 standard library apparently depends on it somehow. There's a nice optimization we can make here. Rather than using parse() to reconstruct syntax trees, we can actually go a step further and build the constructor invocations that will build them up from scratch. This should end up being just a bit faster than parsing, at the expense of larger code. (That said, the code should pack very well under gzip and/or minification.) Another advantage of this optimization is that you can change caterwaul's parse() function without causing problems. This lets you use a caterwaul function as a cross-compiler from another language without breaking native Javascript quotation. serialize_syntax(value) = value.length === 0 ? qs[new caterwaul.syntax(_name)].replace({_name: escape_string(value.data)}) : qs[new caterwaul.syntax(_name, _children)].replace({_name: escape_string(value.data), _children: children}) -where[children = new $.syntax(',', serialize_syntax(it) -over.value).unflatten()], serialize_ref(value, name, seen) = ! value ? '#{value}' : value.constructor === $.syntax ? seen[value.id()] || (seen[value.id()] = name) -returning- serialize_syntax(value) : wobbly[new Error('syntax ref value is not serializable: #{value}')], Variable table generation. Now we just dive through the syntax tree, find everything that binds a value, and install a variable for it. single_variable(name, value) = closure_variable_template.replace({_var: name, _value: value}), names_and_values_for(environment) = single_variable(it, environment[it]) -over_keys.environment, tree_variables(tree) = vars -effect- tree.reach(given.n in vars.push(single_variable(n.data, serialize_ref(n.value, n.data, seen))) -when[n && n.binds_a_value]) -where[vars = [], seen = {}], variables_for(tree, environment) = bind[all_variables = names_and_values_for(environment).concat(tree_variables(tree))] [all_variables.length ? new $.syntax(';', all_variables) : closure_null_template], Closure state generation. This is where it all comes together. Given an original function, we construct a replacement function that has been marked by caterwaul as being precompiled. precompiled_closure(tree, environment) = closure_template.replace({_vars: variables_for(tree, environment), _value: tree}), precompiled_function(tree, environment) = signal_already_compiled(precompiled_closure(tree, environment)), Substitution. Once the reference table is fully populated, we perform a final macroexpansion pass against the initial source tree. This time, rather than annotating functions, we replace them with their precompiled versions. The substitute_precompiled() function returns a closure that expects to be used as a macroexpander whose pattern is gensym_detection_pattern. substitute_precompiled(references)(match) = precompiled_function(ref.compiled, ref.environment) -when[ref && ref.compiled] -where[ref = references[match._gensym.data]], perform_substitution(references, tree) = $.macroexpand(tree, $.macro(trivial_gensym_detection_pattern, nontrivial_gensym_detection_pattern, substitute_precompiled(references))), Gensym removal. After we're done compiling we should nuke all of the gensyms we introduced to mark the functions. The remove_gensyms() function does this. reconstruct_original(references, match) = bind[new_match = {_body: remove_gensyms(references, match._body), _args: match._args}] [match._args ? nontrivial_function_pattern.replace(new_match) : trivial_function_pattern.replace(new_match)], remove_referenced_gensyms(references)(match) = reconstruct_original(references, match) -when[ref && ref.tree] -where[ref = references[match._gensym.data]], remove_gensyms(references, tree) = $.macroexpand(tree, $.macro(trivial_gensym_detection_pattern, nontrivial_gensym_detection_pattern, remove_referenced_gensyms(references))), Tracing. This is where we build the references hash. To do this, we first annotate the functions, build a traced caterwaul, and then run the function that we want to precompile. The traced caterwaul builds references for us. annotated_caterwaul(caterwaul, references) = caterwaul.clone() -effect[it.compile = wrapped_compile(it.compile, references)], trace_execution(caterwaul, f) = {references: references, annotated: annotated} -effect- caterwaul.compile(annotated, {caterwaul: annotated_caterwaul(caterwaul, references)})() -where[references = {}, annotated = annotate_functions_in($.parse(f), references)]]})(caterwaul); __ meta::sdoc('js::extensions/dev/trace', <<'__'); Code trace behavior | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. The 'tracer' function constructs a caterwaul compiler that invokes hooks before and after each expression. You can inspect the value after it is computed and can measure how long it takes to return (or whether it fails to return due to an exception being thrown). For example, here's a very simple profiler (it doesn't account for 'own time', just 'total time'): | var trace = caterwaul.tracer(function (expression) {timings[expression.id()] = timings[expression.id()] || 0; timers.push(+new Date())}, function (expression, value) {timings[expression.id()] += +new Date() - timers.pop()}); var timings = {}; var timers = []; var profiled = trace(function () {...}); // Annotations inserted during macroexpansion profiled(); // This run is measured Interface details. Tracing things involves modifying the generated expressions in a specific way. First, the tracer marks that an expression will be evaluated. This is done by invoking a 'start' function, which then alerts the before-evaluation listener. Then the tracer evaluates the original expression, capturing its output and alerting the listener in the process. Listeners are free to use and/or modify this value, but doing so may change how the program runs. (Note that value types are immutable, so in this case no modification will be possible.) There is currently no way to catch errors generated by the code. This requires a more aggressive and much slower method of tracing, and most external Javascript debuggers can give you a reasonable stack trace. (You can also deduce the error location by observing the discrepancy between before and after events.) Here is the basic transformation applied to each expression in the code (where qs[] indicates a reference to the syntax tree): | some_expression -> (before_hook(qs[some_expression]), after_hook(qs[some_expression], some_expression)) Note that when you're building up a caterwaul function you'll probably want to trace the code last. The reason is that traced code isn't much like the code going in due to the way the transformation works. Another side-effect of tracing is that some of the functions you're tracing will have transformed source code. For example, suppose you're tracing this: | xs.map(function (x) {return x + 1}); The sequence of values will include the trace-annotated version of function (x) {return x + 1}, and this function will be seriously gnarly. I've thought of ways to try to fix this, but I haven't come up with any good ones yet. If anyone finds one let me know. The hard part. If Javascript were any kind of sane language this module would be trivial to implement. Unfortunately, however, it is fairly challenging, primarily because of two factors. One is the role of statement-mode constructs, which can't be wrapped directly inside function calls. The other is method invocation binding, which requires either (1) no record of the value of the method itself, or (2) caching of the object. In this case I've written a special function to handle the caching to reduce the complexity of the generated code. caterwaul.js_base()(function ($) { $.tracer(before, after) = $.clone().macros(expression_macros, statement_macros, hook_macros) -effect [it.init_function(tree) = this.macroexpand(anon('S[_x]').replace({_x: tree}))] Expression-mode transformations. Assuming that we're in expression context, here are the transforms that apply. Notationally, H[] means 'hook this', D[] means 'hook this direct method call', I[] means 'hook this indirect method call', E[] means 'trace this expression recursively', and S[] means 'trace this statement recursively'. It's essentially a simple context-free grammar over tree expressions. -where [anon = $.anonymizer('E', 'S', 'H', 'D', 'I'), rule(p, e) = $.macro(anon(p), e.constructor === Function ? given.match in this.expand(e.call(this, match)) : anon(e)), expression_macros = [rule('E[_x]', 'H[_, _x]'), // Base case: identifier, literal, or empty function rule('E[]', 'null'), // Base case: oops, descended into nullary something assignment_operator(it) -over- qw('= += -= *= /= %= &= |= ^= <<= >>= >>>='), binary_operator(it) -over- qw('() [] + - * / % < > <= >= == != === !== in instanceof ^ & | && ||'), unary_operator(it) -over- qw('+ - ! ~'), rule('E[(_x)]', '(E[_x])'), // Destructuring of parens rule('E[++_x]', 'H[_, ++_x]'), rule('E[--_x]', 'H[_, --_x]'), // Increment/decrement (can't trace original value) rule('E[_x++]', 'H[_, _x++]'), rule('E[_x--]', 'H[_, _x--]'), rule('E[_x, _y]', 'E[_x], E[_y]'), // Preserve commas -- works in an argument list rule('E[_x._y]', 'H[_, E[_x]._y]'), // No tracing for constant attributes rule('E[_o._m(_xs)]', 'D[_, E[_o], _m, [E[_xs]]]'), // Use D[] to indicate direct method binding rule('E[_o[_m](_xs)]', 'I[_, E[_o], E[_m], [E[_xs]]]'), // Use I[] to indicate indirect method binding rule('E[typeof _x]', 'H[_, typeof _x]'), // No tracing for typeof since the value may not exist rule('E[void _x]', 'H[_, void E[_x]]'), // Normal tracing rule('E[delete _x]', 'H[_, delete _x]'), // Lvalue, so no tracing for the original rule('E[delete _x._y]', 'H[_, delete E[_x]._y]'), rule('E[delete _x[_y]]', 'H[_, delete E[_x][E[_y]]]'), rule('E[new _x(_y)]', 'H[_, new H[_x](E[_y])]'), // Hook the constructor to prevent method-handling from happening rule('E[{_ps}]', 'H[_, {E[_ps]}]'), // Hook the final object and distribute across k/v pairs (more) rule('E[_k: _v]', '_k: E[_v]'), // Ignore keys (which are constant) rule('E[[_xs]]', 'H[_, [E[_xs]]]'), // Hook the final array and distribute across elements rule('E[_x ? _y : _z]', 'H[_, E[_x] ? E[_y] : E[_z]]'), rule('E[function (_xs) {_body}]', 'H[_, function (_xs) {S[_body]}]')] // Trace body in statement mode rather than expression mode -where [assignment_operator(op) = [rule('E[_x = _y]', 'H[_, _x = E[_y]]'), rule('E[_x[_y] = _z]', 'H[_, E[_x][E[_y]] = E[_z]]'), rule('E[_x._y = _z]', 'H[_, E[_x]._y = E[_z]]')] -where [t(x) = anon(x).replace({'=': op}), rule(x, y) = $.macro(t(x), t(y))], binary_operator(op) = $.macro(anon('E[_x + _y]').replace({'+': op}), anon('H[_, E[_x] + E[_y]]').replace({'+': op})), unary_operator(op) = $.macro(anon('E[+_x]').replace({'u+': 'u#{op}'}), anon('H[_, +E[_x]]').replace({'u+': 'u#{op}'})), qw(s) = s.split(/\s+/)], Statement-mode transformations. A lot of the time this will drop back into expression mode. However, there are a few cases where we need disambiguation. One is the var statement, where we can't hook the result of the assignment. Another is the {} construct, which can be either a block or an object literal. There's some interesting stuff going on with = and commas. The reason is that sometimes you have var definitions, and they contain = and , trees that can't be traced quite the same way that they are in expressions. For example consider this: | var x = 5, y = 6; In this case we can't evaluate 'x = 5, y = 6' in expression context; if we did, it would produce H[x = H[5]], H[y = H[6]], and this is not valid Javascript within a var statement. Instead, we have to produce x = H[5], y = H[6]. The statement-mode comma and equals rules do exactly that. Note that we don't lose anything by doing this because in statement context the result of an assignment is never used anyway. statement_macros = [rule('S[_x]', 'E[_x]'), rule('S[for (_x) _y]', 'for (S[_x]) S[_y]'), rule('S[{_x}]', '{S[_x]}'), rule('S[for (_x; _y; _z) _body]', 'for (S[_x]; E[_y]; E[_z]) S[_body]'), rule('S[_x; _y]', 'S[_x]; S[_y]'), rule('S[while (_x) _y]', 'while (E[_x]) S[_y]'), rule('S[do _x; while (_y)]', 'do S[_x]; while (E[_y])'), rule('S[_x, _y]', 'S[_x], S[_y]'), rule('S[do {_x} while (_y)]', 'do {S[_x]} while (E[_y])'), rule('S[_x = _y]', '_x = E[_y]'), rule('S[var _xs]', 'var S[_xs]'), rule('S[try {_x} catch (_e) {_y}]', 'try {S[_x]} catch (_e) {S[_y]}'), rule('S[const _xs]', 'const S[_xs]'), rule('S[try {_x} catch (_e) {_y} finally {_z}]', 'try {S[_x]} catch (_e) {S[_y]} finally {S[_z]}'), rule('S[try {_x} finally {_y}]', 'try {S[_x]} finally {S[_y]}'), rule('S[return _x]', 'return E[_x]'), rule('S[function _f(_args) {_body}]', 'function _f(_args) {S[_body]}'), rule('S[return]', 'return'), rule('S[throw _x]', 'throw E[_x]'), rule('S[break _label]', 'break _label'), rule('S[if (_x) _y]', 'if (E[_x]) S[_y]'), rule('S[break]', 'break'), rule('S[if (_x) _y; else _z]', 'if (E[_x]) S[_y]; else S[_z]'), rule('S[continue _label]', 'continue _label'), rule('S[if (_x) {_y} else _z]', 'if (E[_x]) {S[_y]} else S[_z]'), rule('S[continue]', 'continue'), rule('S[switch (_c) {_body}]', 'switch (E[_c]) {S[_body]}'), rule('S[_label: _stuff]', '_label: S[_stuff]'), rule('S[with (_x) _y]', 'with (E[_x]) S[_y]')], Hook generation. Most of the actual hook generation code is fairly routine for JIT stuff. The patterns here don't actually expand into other state marker patterns; H, D, and I are all terminal. The [1] subscript is a hack. We want to grab the un-annotated tree, but all of the patterns have state markers on them. So we subscript by [1] to get the child of that state annotation. hook_macros = [rule('H[_tree, _x]', expression_hook (match._tree[1], match._x) -given.match), rule('D[_tree, _object, _method, [_parameters]]', direct_method_hook (match._tree[1], match) -given.match), rule('I[_tree, _object, _method, [_parameters]]', indirect_method_hook(match._tree[1], match) -given.match)] -where [before_hook(tree) = before(tree) /when.before, after_hook(tree, value) = after(tree, value) /when.after -returning- value, after_method_hook(tree, object, method, parameters) = before_hook(tree[0]) -then- after_hook(tree[0], resolved) -then- after_hook(tree, resolved.apply(object, parameters)) -where[resolved = object[method]], before_hook_ref = new $.ref(before_hook), after_hook_ref = new $.ref(after_hook), after_method_hook_ref = new $.ref(after_method_hook), quote_method_name(node) = '"#{node.data.replace(/(")/g, "\\$1")}"', expression_hook_template = $.parse('(_before(_tree), _after(_tree, _expression))'), indirect_method_hook_template = $.parse('(_before(_tree), _after(_tree, _object, _method, [_parameters]))'), expression_hook(original, tree) = expression_hook_template.replace({_before: before_hook_ref, _after: after_hook_ref, _tree: new $.ref(original), _expression: tree.as('(')}), method_hook(tree, object, method, parameters) = indirect_method_hook_template.replace({_before: before_hook_ref, _after: after_method_hook_ref, _tree: new $.ref(tree), _object: object, _method: method, _parameters: parameters}), direct_method_hook(tree, match) = method_hook(tree, match._object, quote_method_name(match._method), match._parameters), indirect_method_hook(tree, match) = method_hook(tree, match._object, match._method, match._parameters)]]})(caterwaul); __ meta::sdoc('js::extensions/dev/unit', <<'__'); Unit/integration testing behavior | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. This behavior provides words that are useful for unit testing. It also creates functions on caterwaul to define and handle unit tests. For example, using the unit testing library you can do stuff like this: | caterwaul.test(function () { 'foo'.length -should_be- 3; 'foo' -should_not_be- 'bar'; // etc }); caterwaul.js_base()(function ($) { $.assert(condition, message) = condition || wobbly[new Error(message)]; $.assertions = {should_be: given[a, b, statement] in $.assert(a === b, '#{statement.toString()}: #{a} !== #{b}'), should_not_be: given[a, b, statement] in $.assert(a !== b, '#{statement.toString()}: #{a} === #{b}')}; $.test_case_gensym = $.gensym(); $.test_words(language) = $.map(assertion_method, ['should_be', 'should_not_be']) -where [assertion_method(name) = language.parameterized_modifier(name, given.match in qs[caterwaul.assertions._name(_expression, _parameters, _ref)]. replace({_expression: match._expression, _parameters: match._parameters, _name: name, _ref: new $.ref(match._)}))]; $.test_base() = this.clone() -effect- it.macros((it.macros() || []).concat(it.test_words(it.js()))); $.test(f) = this.test_base()(f)()})(caterwaul); __ meta::sdoc('js::extensions/std', <<'__'); Caterwaul standard library | Spencer Tipping Licensed under the terms of the MIT source code license Internal libraries. These operate on caterwaul in some way, but don't necessarily have an effect on generated code. - pinclude pp::js::extensions/std/anonymize Language specializations. These provide configurations that specialize caterwaul to operate well with a given programming language. This is relevant because not all languages compile to Javascript the same way, and caterwaul should be able to adapt to the syntactic limitations of generated code (and thus be usable with non-Javascript languages like Coffeescript). Also included is a standard set of words that can be combined with the Javascript forms to produce useful macros. Together these form a base language that is used by other parts of the standard library. - pinclude pp::js::extensions/std/words - pinclude pp::js::extensions/std/js caterwaul.js_base = function () {var js = this.js(); return this.clone().macros(this.word_macros(js), js.macros)}; Libraries. These apply more advanced syntactic transforms to the code and can depend on everything above. - pinclude pp::js::extensions/std/inversion - pinclude pp::js::extensions/std/seq caterwaul.js_all = function () {var js = this.js(); return this.clone().macros(this.word_macros(js), js.macros, this.seq_macro(js))}; __ meta::sdoc('js::extensions/std/anonymize', <<'__'); Symbol anonymization | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. A recurring pattern in previous versions of caterwaul was to clone the global caterwaul function and set it up as a DSL processor by defining a macro that manually dictated tree traversal semantics. This was often difficult to implement because any context had to be encoded bottom-up and in terms of searching rather than top-down inference. This library tries to solve the problem by implementing a grammar-like structure for tree traversal. Use cases. One fairly obvious use case is code tracing. When we trace some code, we need to keep track of whether it should be interpreted in sequence or expression context. Although there are only two states here, it still is too complex for a single-layer macroexpander to handle gracefully; so we create two separate caterwaul functions that delegate control to one another. We then create a set of annotations to indicate which state or states should be chosen next. For example, here are some expansions from the tracing behavior: | E[_x = _y] -> H[_x = E[_y]] S[_x = _y] -> _x = E[_y] It's straightforward enough to define macros this way; all that needs to be done is to mark the initial state and put state information into the macro patterns. The hard part is making sure that the markers don't interfere with the existing syntax. This requires that all of the markers be replaced by gensyms before the macroexpansion happens. Gensym anonymizing. Replacing symbols in macro patterns is trivial with the replace() method. The only hard part is performing this same substitution on the macroexpansions. (In fact, this is impossible to do transparently given Turing-complete macros.) In order to work around this, strings are automatically expanded (because it's easy to do), but functions must call translate_state_markers() on any patterns they intend to use. This call must happen before substituting syntax into the patterns (!) because otherwise translate_state_markers() may rewrite code that happens to contain markers, thus reintroducing the collision problem that all of this renaming is intended to avoid. Usage. To anonymize a set of macros you first need to create an anonymizer. This is easy; you just give it a list of symbols to anonymize and then use that anonymizer to transform a series of macros (this process is non-destructive): | var anonymize = caterwaul.anonymizer('X', 'Y', 'Z'); var m = caterwaul.macro(anonymize('X[foo]'), ...); // Matches against gensym_1_aj49Az0_885nr1q[foo] Each anonymizer uses a separate symbol table. This means that two anonymizers that match against 'A' (or any other macro pattern) will always map them to different gensyms. (function ($) {$.anonymizer = function () {for (var translation_table = {}, i = 0, l = arguments.length; i < l; ++i) translation_table[arguments[i]] = $.gensym(); return function (node) {return $.ensure_syntax(node).replace(translation_table)}}})(caterwaul); __ meta::sdoc('js::extensions/std/inversion', <<'__'); Inversion behavior | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. Enabling this behavior results in two interesting things. First, every function will be automatically annotated with an inverse, which is stored as a gensym-encoded attribute on the function. Second, the lvalue behavior will be extended to allow functional and expression destructuring. It isn't possible to assign into a complex expression in JS grammar, so only parameters can be bound this way. Inversion isn't guaranteed to be accurate in the general case. All it guarantees is that it is accurate under the function being inverted. That is, if f is an invertible function and fi is its inverse, then x === fi(f(x)) isn't true in general. However, f(x) === f(fi(f(x))) generally is. Combinatory inversion. Each kind of expression has certain inversion semantics. Some of them perform runtime type detection to figure out how best to invert something. For example, the + operator is overloaded across strings and numbers, so we have to do a type check on the arguments before knowing which inversion to use. Also, different cases are taken depending on which operand is a constant. (Most binary operators fail with two variables.) Information gets lost when you invert stuff, as most operators are closed within a finite type. For example, suppose x | 3 = 7. We now don't know the lowest two bits of x, so we arbitrarily set them to zero for the purposes of destructuring. (Also, if x | 3 = 6, we reject the match because we know something about the bits set by |.) Inversion never degenerates into nondeterminism. That is, ambiguous multivariate cases are rejected immediately rather than explored. So, for example, if f(x, y) = x + y, you can't match against f(x, y) and expect it to work. You could match against f(x, 1) or f(5, y), though, since once the constants are propagated through the expression you will end up with an unambiguous way to invert the + operator. In some cases nondeterminism is eliminated through default behavior: if f(x, y) = x && y, then matching against f(x, y) = X will result in x = true, y = X when X is truthy, and x = X, y = undefined when X is falsy. || behaves similarly; x || y = X results in x = X, y = undefined when X is true, and x = false, y = X when X is falsy. Constructor inversion. Constructors are a bizarre case of function application, and it's possible to invert them with some accuracy. Basically, we track the assignment of parameters into named 'this' properties and construct the inverse based on corresponding properties of the object being matched against. For example, the constructor fc[x, y][this.x = x, this.y = y] is invertible by pulling .x and .y from the object. Decisional inversion. This isn't a joke; it's actually possible to invert a decisional sometimes. However, it may end up taking every branch. The idea is that you try the first branch; if it succeeds, then we assume the condition variable was true and return. If it fails, then we try the second branch and assume that the condition variable was false. So, for example: | f(cond, x, y) = cond ? {foo: x} : {bar: y}; g(f(b, x, y)) = 'got ' + b + ' with ' + [x, y]; g({foo: 10}) // returns 'got true with 10,undefined' g({bar: 10}) // returns 'got false with undefined,10' It's important to have decisional inversion because we might want to invert a pattern-matching function. For example: | foo('foo' + bar) = 'got a foo: ' + bar foo('bif' + bar) = 'got a bif: ' + bar g(foo(x)) = x g('got a foo: bar') // returns 'foobar' g('got a bif: bar') // returns 'bifbar' Recursive inversion. This also isn't a joke, though you can cause an infinite loop if you're not careful. You shouldn't really use this, but it's a natural side-effect of the way I'm representing inversions anyway. Here's an example: | power_of_two(n) = n === 0 ? 1 : 2 * power_of_two(n - 1); g(power_of_two(x)) = x; g(1) // -> 0 g(2) // -> 1 g(4) // -> 2 Here's what the inverse function looks like (modulo formatting, error checking, etc): | power_of_two_inverse(x) = x === 1 ? {n: 0} : {n: 1 + power_of_two_inverse(x / 2).n}; Don't use this feature! It's slow, it may infinite-loop, and it doesn't work for most recursive functions because of the nondeterminism limitation. I'm also not even going to guarantee that it works correctly in trivial cases like this, though if it doesn't it's probably because of a bug. __ meta::sdoc('js::extensions/std/js', <<'__'); Javascript-specific macros | Spencer Tipping Licensed under the terms of the MIT source code license (function ($) { Structured forms in Javascript. These aren't macros, but forms. Each language has its own ways of expressing certain idioms; in Javascript we can set up some sensible defaults to make macros more consistent. For example, caterwaul pre-1.0 had the problem of wildly divergent macros. The fn[] macro was always prefix and required parameters, whereas /se[] was always postfix and had a single optional parameter. /cps[] was similarly postfix, which was especially inappropriate considering that it could theoretically handle multiple parameters. In caterwaul 1.0, the macro author's job is reduced to specifying which words have which behavior; the language driver takes care of the rest. For instance, rather than specifying the full pattern syntax, you just specify a word and its definition with respect to an opaque expression and perhaps set of modifiers. Here are the standard Javascript macro forms: $.js = function () { var macro = function (name, expander) {return function (template) {return $.macro ($.parse(template).replace({_modifiers: $.parse(name)}), expander)}}; var macros = function (name, expander) {return function (template) {return result.modifier($.parse(template).replace({_modifiers: $.parse(name)}), expander)}}; var result = {modifier: this.right_variadic(function (name, expander) { return $.map(macro(name, expander), ['_expression /_modifiers', '_expression -_modifiers', '_expression |_modifiers', '_expression._modifiers', '_modifiers[_expression]', '_modifiers in _expression'])}), parameterized_modifier: this.right_variadic(function (name, expander) { return [$.map(macros(name, expander), ['_modifiers[_parameters]', '_modifiers._parameters']), $.map(macro(name, expander), ['_expression <_modifiers> _parameters', '_expression -_modifiers- _parameters'])]}), Javascript-specific shorthands. Javascript has some syntactic weaknesses that it's worth correcting. These don't relate to any structured macros, but are hacks designed to make JS easier to use. macros: [ Javascript intrinsic verbs. These are things that you can do in statement mode but not expression mode. this.macro('wobbly[_x]', '(function () {throw _x}).call(this)'), String interpolation. Javascript normally doesn't have this, but it's straightforward enough to add. This macro implements Ruby-style interpolation; that is, "foo#{bar}" becomes "foo" + bar. A caveat (though not bad one in my experience) is that single and double-quoted strings are treated identically. This is because Spidermonkey rewrites all strings to double-quoted form. This version of string interpolation is considerably more sophisticated than the one implemented in prior versions of caterwaul. It still isn't possible to reuse the same quotation marks used on the string itself, but you can now include balanced braces in the interpolated text. For example, this is now valid: | 'foo #{{bar: "bif"}.bar}' There are some caveats; if you have unbalanced braces (even in substrings), it will get confused and misread the boundary of your text. So stuff like this won't work properly: | 'foo #{"{" + bar}' // won't find the ending properly and will try to compile the closing brace function (node) { var s = node.data, q = s.charAt(0), syntax = $.syntax; if (q !== '\'' && q !== '"' || ! /#\{[^\}]+\}/.test(s)) return false; // DeMorgan's applied to (! ((q === ' || q === ") && /.../test(s))) for (var pieces = [], i = 1, l = s.length - 1, brace_depth = 0, got_hash = false, start = 1, c; i < l; ++i) if (brace_depth) if ((c = s.charAt(i)) === '}') --brace_depth || pieces.push(s.substring(start, i)) && (start = i + 1), got_hash = false; else brace_depth += c === '{'; else if ((c = s.charAt(i)) === '#') got_hash = true; else if (c === '{' && got_hash) pieces.push(s.substring(start, i - 1)), start = i + 1, ++brace_depth; else got_hash = false; pieces.push(s.substring(start, l)); for (var quoted = new RegExp('\\\\' + q, 'g'), i = 0, l = pieces.length; i < l; ++i) pieces[i] = i & 1 ? $.parse(pieces[i].replace(quoted, q)).as('(') : new syntax(q + pieces[i] + q); return new syntax('+', pieces).unflatten().as('(')}, Destructuring function creation. This is a beautiful hack made possible by Internet Explorer. We can intercept cases of assigning into a function and rewrite them to create a function body. For example, f(x) = y becomes the regular assignment f = function (x) {return y}. Because this macro is repeatedly applied we get currying for free. There's a special case. You can grab the whole arguments array by setting something equal to it. For example, f(xs = arguments) = xs[0] + xs[1]. This makes it easy to use binding constructs inside the body of the function without worrying whether you'll lose the function context. this.macro('_left(_args) = _right', '_left = (function (_args) {return _right})'), this.macro('_left(_var = arguments) = _right', '_left = (function () {var _var = arguments; return _right})')]}; return result}})(caterwaul); __ meta::sdoc('js::extensions/std/seq', <<'__'); Sequence comprehensions | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. Caterwaul pre-1.0 had a module called 'seq' that provided a finite and an infinite sequence class and localized operator overloading to make them easier to use. Using wrapper classes was both unnecessary (since most sequence operations were done inside the seq[] macro anyway) and problematic, as it required the user to remember to cast sequences back into arrays and such. It also reduced runtime performance and created a lot of unnecessary copying. Caterwaul 1.0 streamlines the seq[] macro by removing the sequence classes and operating directly on arrays or array-like things. Not everything in Javascript is an array, but I'm going to pretend that everything is (or at least looks like one) and rely on the [i] and .length properties. This allows the sequence library to (1) have a very thin design, and (2) compile down to tight loops without function calls. Notation. The notation is mostly a superset of the pre-1.0 sequence notation. Operators that have the same functionality as before (others are reserved for future meanings, but probably won't do what they used to): | * = map e.g. [1, 2, 3] *[x + 1] |seq -> [2, 3, 4] *! = each e.g. [1, 2, 3] *![console.log(x)] |seq -> [1, 2, 3] (and logs 1, 2, 3) / = foldl e.g. [1, 2, 3] /[x - next] |seq -> -4 /! = foldr e.g. [1, 2, 3] /![x - next] |seq -> 2 % = filter e.g. [1, 2, 3] %[x & 1] |seq -> [1, 3] %! = filter-not e.g. [1, 2, 3] %![x & 1] |seq -> [2] + = concatenate e.g. [1, 2, 3] + [4, 5] |seq -> [1, 2, 3, 4, 5] - = cartesian product e.g. [1, 2] - [3, 4] |seq -> [[1, 3], [1, 4], [2, 3], [2, 4]] ^ = zip e.g. [1, 2, 3] ^ [4, 5, 6] |seq -> [[1, 4], [2, 5], [3, 6]] | = exists e.g. [1, 2, 3] |[x === 2] |seq -> true & = forall e.g. [1, 2, 3] &[x < 3] |seq -> false Note that ^ has higher precedence than |, so we can use it in a sequence comprehension without interfering with the |seq macro (so long as the |seq macro is placed on the right). Modifiers. Modifiers are unary operators that come after the primary operator. These have the same (or similar) functionality as before: | ~ = interpret something in sequence context e.g. [[1], [2], [3]] *~[x *[x + 1]] |seq -> [[2], [3], [4]] x = rename the variable from 'x' e.g. [1, 2, 3] *y[y + 1] |seq -> [2, 3, 4] Here, 'x' means any identifier. Caterwaul 1.0 introduces some new stuff. The map function now has a new variant, *~!. Filter also supports this variant. Like other operators, they support variable renaming and sequence context. You can do this by putting those modifiers after the *~!; for instance, xs *~!~[exp] interprets 'exp' in sequence context. Similarly, *~!y[exp] uses 'y' rather than 'x'. | *~! = flatmap e.g. [1, 2, 3] *~![[x, x + 1]] |seq -> [1, 2, 2, 3, 3, 4] %~! = map/filter e.g. [1, 2, 3] %~![x & 1 && x + 1] |seq -> [2, 4] Variables. All of the variables from before are still available and the naming is still mostly the same. Each block has access to 'x', which is the immediate element. 'xi' is the index, and 'x0' is the alternative element for folds. Because all sequences are finite, a new variable 'xl' is available -- this is the total number of elements in the source sequence. The sequence object is no longer accessible because there may not be a concrete sequence. (I'm leaving room for cross-operation optimizations in the future.) The renaming is done exactly as before: | [1, 2, 3] *[x + 1] |seq -> [2, 3, 4] [1, 2, 3] *y[y + 1] |seq -> [2, 3, 4] [1, 2, 3] *[xi] |seq -> [0, 1, 2] [1, 2, 3] *foo[fooi] |seq -> [0, 1, 2] Word operators. Some operators are designed to work with objects, just like in prior versions. However, the precedence has been changed to improve ergonomics. For example, it's uncommon to use objects as an intermediate form because all of the sequence operators are built around arrays. Similarly, it's very common to unpack objects immediately before using them. Therefore the unpack operators should be very high precedence and the pack operator should have very low precedence: | {foo: 'bar'} /keys |seq -> ['foo'] {foo: 'bar'} /values |seq -> ['bar'] {foo: 'bar'} /pairs |seq -> [['foo', 'bar']] {foo: 'bar'} /pairs |object |seq -> {foo: 'bar'} Note that unlike regular modifiers you can't use a variety of operators with each word. Each one is defined for just one form. I may change this in the future, but I'm reluctant to start with it because it would remove a lot of syntactic flexibility. Numbers. Caterwaul 1.0 removes support for the infinite stream of naturals (fun though it was), since all sequences are now assumed to be finite and are strictly evaluated. So the only macro available is n[], which generates finite sequences of evenly-spaced numbers: | n[1, 10] |seq -> [1, 2, 3, 4, 5, 6, 7, 8, 9] n[10] |seq -> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] n[0, 10, 2] |seq -> [0, 2, 4, 6, 8] Generated code. Previously the code was factored into separate methods that took callback functions. (Basically the traditional map/filter/each arrangement in functional languages.) However, now the library optimizes the methods out of the picture. This means that now we manage all of the dataflow between the different sequence operators. I thought about allocating gensym variables -- one for each temporary result -- but this means that the temporary results won't be garbage-collected until the entire sequence comprehension is complete. So instead it generates really gnarly code, with each dependent sequence listed in the for-loop variable initialization. Luckily this won't matter because, like, there aren't any bugs or anything ;) Portability. The seq library is theoretically portable to syntaxes besides JS, but you'll probably want to do some aggressive preprocessing if you do this. It assumes a lot about operator precedence and such (from a design perspective). caterwaul.js_base()(function ($) { $.seq_macro(language) = language.modifier('seq', this.expand(seq_expand(tree._expression)) -given.tree -where [seq_expand = $.seq()]); $.seq() = $.clone().macros(operator_macros, word_macros) -effect [it.init_function(tree) = this.macroexpand(anon('S[_x]').replace({_x: tree}))] -where [anon = $.anonymizer('S'), rule(p, e) = $.macro(anon(p), e.constructor === Function ? given.match in this.expand(e.call(this, match)) : anon(e)), operator_macros = [rule('S[_x]', '_x'), operator_pattern('|', exists), rule('S[_x, _y]', 'S[_x], S[_y]'), operator_pattern('&', forall), operator_pattern('*', map, each, flatmap), binary_operator('+', concat), operator_pattern('%', filter, filter_not, map_filter), binary_operator('-', cross), operator_pattern('/', foldl, foldr), binary_operator('^', zip)] -where [operator_pattern(op, normal, bang, tbang) = [] -effect- it.push(trule('S[_xs +[_f]]', normal), trule('S[_xs +_var[_f]]', normal)) -effect- it.push(trule('S[_xs +![_f]]', bang), trule('S[_xs +!_var[_f]]', bang)) /when.bang -effect- it.push(trule('S[_xs +~![_f]]', tbang), trule('S[_xs +~!_var[_f]]', tbang)) /when.tbang -returning- it.concat(context_conversions) -where [template(p) = anon(p).replace({'+': op}), trule(p, e) = rule(template(p), e.constructor === Function ? e : template(e)), context_conversions = [ trule('S[_xs +~[_f]]', 'S[_xs +[S[_f]]]'), trule('S[_xs +~_var[_f]]', 'S[_xs +_var[S[_f]]]'), trule('S[_xs +!~[_f]]', 'S[_xs +![S[_f]]]'), trule('S[_xs +!~_var[_f]]', 'S[_xs +!_var[S[_f]]]'), trule('S[_xs +~!~[_f]]', 'S[_xs +~![S[_f]]]'), trule('S[_xs +~!~_var[_f]]', 'S[_xs +~!_var[S[_f]]]')]], binary_operator(op, f) = rule(t('S[_xs + _ys]'), f) -where [t(pattern) = anon(pattern).replace({'+': op})], scope = $.parse('(function () {_body}).call(this)'), scoped(tree) = scope.replace({_body: tree}), loop_anon = $.anonymizer('xs', 'ys', 'x', 'y', 'i', 'j', 'l', 'lj'), loop_form(x) = scoped(loop_anon(anon(x))), op_form(pattern) = bind [form = loop_form(pattern)] in form.replace(variables_for(match)) /given.match, map = op_form('for (var xs = S[_xs], ys = [], _xi = 0, _xl = xs.length, _x; _xi < _xl; ++_xi) _x = xs[_xi], ys.push((_f)); return ys'), each = op_form('for (var xs = S[_xs], _xi = 0, _xl = xs.length, _x; _xi < _xl; ++_xi) _x = xs[_xi], (_f); return xs'), flatmap = op_form('for (var xs = S[_xs], ys = [], _xi = 0, _xl = xs.length, _x; _xi < _xl; ++_xi) _x = xs[_xi], ys.push.apply(ys, (_f)); return ys'), filter = op_form('for (var xs = S[_xs], ys = [], _xi = 0, _xl = xs.length, _x; _xi < _xl; ++_xi) _x = xs[_xi], (_f) && ys.push(_x); return ys'), filter_not = op_form('for (var xs = S[_xs], ys = [], _xi = 0, _xl = xs.length, _x; _xi < _xl; ++_xi) _x = xs[_xi], (_f) || ys.push(_x); return ys'), map_filter = op_form('for (var xs = S[_xs], ys = [], _xi = 0, _xl = xs.length, _x, _y; _xi < _xl; ++_xi) _x = xs[_xi], (_y = (_f)) && ys.push(_y); return ys'), foldl = op_form('for (var xs = S[_xs], _x = xs[0], _xi = 1, _xl = xs.length, _x0; _xi < _xl; ++_xi) _x0 = xs[_xi], _x = (_f); return _x'), foldr = op_form('for (var xs = S[_xs], _xi = 0, _xl = xs.length - 1, _x0 = xs[_xl], _x; _xi >= 0; --_xi) _x = xs[_xi], _x0 = (_f); return _x'), exists = op_form('for (var xs = S[_xs], _x = xs[0], _xi = 0, _xl = xs.length, x; _xi < _xl; ++_xi) {_x = xs[_xi]; if (y = (_f)) return y} return false'), forall = op_form('for (var xs = S[_xs], _x = xs[0], _xi = 0, _xl = xs.length; _xi < _xl; ++_xi) {_x = xs[_xi]; if (! (_f)) return false} return true'), concat = op_form('return (S[_xs]).concat(S[_ys])'), zip = op_form('for (var xs = S[_xs], ys = S[_ys], pairs = [], i = 0, l = xs.length; i < l; ++i) pairs.push([xs[i], ys[i]]); return pairs'), cross = op_form('for (var xs = S[_xs], ys = S[_ys], pairs = [], i = 0, l = xs.length, lj = ys.length; i < l; ++i) ' + 'for (var j = 0; j < lj; ++j) pairs.push([xs[i], ys[j]]);' + 'return pairs'), variables_for(m) = $.merge({}, m, prefixed_hash(m._var)), prefixed_hash(p) = {_x: name, _xi: '#{name}i', _xl: '#{name}l', _x0: '#{name}0'} -where[name = p || 'x']], word_macros = [rule('S[n[_upper]]', n), rule('S[_o /keys]', keys), rule('S[n[_lower, _upper]]', n), rule('S[_o /values]', values), rule('S[n[_lower, _upper, _step]]', n), rule('S[_o /pairs]', pairs), rule('S[_xs |object]', object)] -where [n(match) = n_pattern.replace($.merge({_lower: '0', _step: '1'}, match)), n_pattern = anon('(function () {for (var r = [], i = _lower, u = _upper; i < u; i += _step) r.push(i); return r})()'), scope = $.parse('(function () {_body}).call(this)'), scoped(t) = scope.replace({_body: t}), form(p) = scoped(anon(p)).replace(match) /given.match, keys = form('var ks = [], o = S[_o]; for (var k in o) Object.prototype.hasOwnProperty.call(o, k) && ks.push(k); return ks'), values = form('var vs = [], o = S[_o]; for (var k in o) Object.prototype.hasOwnProperty.call(o, k) && vs.push(o[k]); return vs'), pairs = form('var ps = [], o = S[_o]; for (var k in o) Object.prototype.hasOwnProperty.call(o, k) && ps.push([k, o[k]]); return ps'), object = form('for (var o = {}, xs = S[_xs], i = 0, l = xs.length, x; i < l; ++i) x = xs[i], o[x[0]] = x[1]; return o')]]})(caterwaul); __ meta::sdoc('js::extensions/std/words', <<'__'); Common adjectives and adverbs | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. This behavior installs a bunch of common words and sensible behaviors for them. The goal is to handle most Javascript syntactic cases by using words rather than Javascript primitive syntax. For example, constructing lambdas can be done with 'given' rather than the normal function() construct: | [1, 2, 3].map(x + 1, given[x]) // -> [1, 2, 3].map(function (x) {return x + 1}) In this case, given[] is registered as a postfix binary adverb. Any postfix binary adverb forms added later will extend the possible uses of given[]. (function ($) { var loop_anon = $.anonymizer('i', 'l', 'xs', 'result'); $.word_macros = function (language) { return [ Quotation. qs[] comes from pre-1.0 caterwaul; this lets you quote a piece of syntax, just like quote in Lisp. The idea is that qs[something] returns 'something' as a syntax tree. qse[] is a variant that macroexpands the syntax tree before returning it; this used to be there for performance reasons (now irrelevant with the introduction of precompilation) but is also useful for macro reuse. language.modifier('qs', function (match) {return new $.ref(match._expression)}), language.modifier('qse', function (match) {return new $.ref(this.expand(match._expression))}), Scoping and referencing. These all impact scope or references somehow -- in other words, they create variable references but don't otherwise impact the nature of evaluation. Function words. These define functions in some form. given[] and bgiven[] are postfix adverbs to turn an expression into a function; given[] creates a regular closure while bgiven[] preserves the closure binding. They're aliased to the more concise fn[] and fb[] for historical and ergonomic reasons. For example: | var f = fn[x] in x + 1 var f = x + 1 |given[x]; var f = x + 1 |given.x; language.parameterized_modifier('given', 'from', 'fn', '(function (_parameters) {return _expression})'), language.parameterized_modifier('bgiven', 'bfrom', 'fb', '(function (t, f) {return (function () {return f.apply(t, arguments)})})(this, (function (_parameters) {return _expression}))'), Side-effecting. The goal here is to take an existing value, modify it somehow, and then return it without allocating an actual variable. This can be done using the /effect[] adverb, also written as /se[]. Older versions of caterwaul bound the variable as _; version 1.0 changes this convention to bind the variable to 'it'. For example: | hash(k, v) = {} /effect[it[k] = v]; compose(f, g)(x) = g(x) -then- f(it); language.parameterized_modifier('effect', 'se', '(function (it) {return (_parameters), it}).call(this, (_expression))'), language.parameterized_modifier('then', 're', 'returning', '(function (it) {return (_parameters)}).call(this, (_expression))'), Scoping. You can create local variables by using the where[] and bind[] adverbs. If you do this, the locals can all see each other since they're placed into a 'var' statement. For example: | where[x = 10][alert(x)] alert(x), where[x = 10] bind[f(x) = x + 1] in alert(f(10)) language.parameterized_modifier('where', 'bind', '(function () {var _parameters; return (_expression)}).call(this)'), Control flow modifiers. These impact how something gets evaluated. Conditionals. These impact whether an expression gets evaluated. x /when[y] evaluates to x when y is true, and y when y is false. Similarly, x /unless[y] evaluates to x when y is false, and !y when y is true. A final option 'otherwise' is like || but can have different precedence: | x = x /otherwise.y + z; language.parameterized_modifier('when', '((_parameters) && (_expression))'), language.parameterized_modifier('unless', '(! (_parameters) && (_expression))'), language.parameterized_modifier('otherwise', '((_expression) || (_parameters))'), language.parameterized_modifier('when_defined', '((_parameters) != null && (_expression))'), language.parameterized_modifier('unless_defined', '((_parameters) == null && (_expression))'), Collection-based loops. These are compact postfix forms of common looping constructs. Rather than assuming a side-effect, each modifier returns an array of the results of the expression. | console.log(it), over[[1, 2, 3]] // logs 1, then 2, then 3 console.log(it), over_keys[{foo: 'bar'}] // logs foo console.log(it), over_values[{foo: 'bar'}] // logs bar language.parameterized_modifier('over', loop_anon('(function () {for (var xs = (_parameters), result = [], i = 0, l = xs.length, it; i < l; ++i)' + 'it = xs[i], result.push(_expression); return result}).call(this)')), language.parameterized_modifier('over_keys', loop_anon('(function () {var x = (_parameters), result = []; ' + 'for (var it in x) Object.prototype.hasOwnProperty.call(x, it) && result.push(_expression); return result}).call(this)')), language.parameterized_modifier('over_values', loop_anon('(function () {var x = (_parameters), result = [], it; ' + 'for (var k in x) Object.prototype.hasOwnProperty.call(x, k) && (it = x[k], result.push(_expression));' + 'return result}).call(this)')), Condition-based loops. These iterate until something is true or false, collecting the results of the expression and returning them as an array. For example: | console.log(x), until[++x >= 10], where[x = 0] // logs 1, 2, 3, 4, 5, 6, 7, 8, 9 language.parameterized_modifier('until', loop_anon('(function () {var result = []; while (! (_parameters)) result.push(_expression); return result}).call(this)'))]}})(caterwaul); __ meta::sdoc('js::extensions/ui', <<'__'); Caterwaul UI macros | Spencer Tipping Licensed under the terms of the MIT source code license DOM libraries. Right now I've only got a set of combinators for jQuery. - pinclude pp::js::extensions/ui/dom.jquery caterwaul.js_ui = function (existing) {var js = this.js(); return this.clone().macros(existing.macros(), this.jquery_macro(js))}; __ meta::sdoc('js::extensions/ui/dom.jquery', <<'__'); JQuery DOM combinators | Spencer Tipping Licensed under the terms of the MIT source code license Introduction. DOM drivers are macro systems that transform HAML-like markup into underlying DOM calls. For instance: | div.foo /jquery -> $('
').addClass('foo') table(tr(td('hi')), tbody) /jquery -> $('').append($('').append($(''))) None of the macroexpansions here rely on opaque syntax refs, so they can all be precompiled. Also, the generated code refers to jQuery rather than $ -- this gives you more flexibility about setting noConflict(). If you need to set noConflict(true) (which removes the global jQuery variable), you can bind it locally to make the DOM stuff work: | div.foo /jquery -where [jQuery = stashed_copy_of_jquery] Notation. Caterwaul didn't previously have a DOM plugin in its core distribution. The html[] macro in previous versions of caterwaul came from montenegro, a web framework I was developing in tandem with caterwaul. However, it's useful to have DOM functionality so I'm including it in the main caterwaul distribution. Most of the syntax is copied from the html[] macro in montenegro: | element.class -> $('').addClass('class') element *foo('bar') -> $('').attr('foo', 'bar') element *!foo('bar') -> $('').data('foo', 'bar') <- new! element /foo('bar') -> $('').foo('bar') element /!foo(bar) -> $('').bind('foo', bar) <- new! +element -> element <- new! element %foo -> foo($('')) element(child) -> $('').append(child /jquery) <- here the /jquery marker indicates that 'child' will be re-expanded element(child1, child2) -> $('').append((child1 /jquery).add((child2 /jquery))) element[child] -> $('').append(child) <- no re-expansion here element[child1, child2] -> $('').append(child1.add(child2)) element > child -> $('').append(child /jquery) element >= child -> $('').append(child) element1, element2 -> (element1 /jquery).add((element2 /jquery)) There's also some new syntax to make certain things easier. In particular, I didn't like the way nesting worked in previous versions, so this driver supports some new operators to make it more intuitive: | element1 + element2 -> (element1 /jquery).add((element2 /jquery)) The result of this operator is that you have options as far as nesting is concerned: | div.foo > span.first + span.second, ->
div.bar > span.third + span.fourth
Also, you can now dig through the DOM using HTML selectors. Here's what that looks like: | element >> div.foo -> element.filter('div.foo') element >> _.foo -> element.filter('*.foo') element >>> div.foo -> element.find('div.foo') element << div.foo -> element.parents('div.foo') element >> div.foo /first -> element.filter('div.foo:first') element >> div.foo /contains(x) -> element.filter('div.foo:contains("#{x}")') element >> div.foo + div.bar -> element.filter('div.foo, div.bar') element >> (span >> p) -> element.filter('span p') element >> (span >>> p) -> element.filter('span p') element >> (span > p) -> element.filter('span > p') element >> span[foo] -> element.filter('span[foo]') element >> span[data_bar] -> element.filter('span[data-bar]') <- note conversion of _ to - element >> span[foo=x] -> element.filter('span[foo="#{string_escape(x)}"]') Note that this isn't really intended to be a replacement for jQuery's builtin methods; it's just an easy way to do some simple selection. I highly recommend using native jQuery selectors if you need something more powerful. You shouldn't try to get too elaborate with these; I'm not sure how much stuff jQuery's CSS parser can handle. Also note that CSS3's operator precedence differs from Javascript's. In particular, doing things like div > span + div > code is incorrect because it will be parsed as 'div > (span, div) > code' (though it may render properly as a CSS pattern). It's a good idea to parenthesize in this case, just to communicate your intent to whoever's reading your code. Caterwaul removes the parentheses to make it a valid CSS selector. Unlike the montenegro html[] macro, this one doesn't do any autodetection. The good part about this is that you can create custom HTML elements this way. For example: | my_element /jquery -> $('') <- note the conversion of _ to -; this happens in class and attribute names too caterwaul.js_base()(function ($) { $.jquery_macro(language, options) = language.modifier('jquery', this.expand(jquery_expand(match._expression)) -given.match -where [jquery_expand = $.jquery(options)]); $.jquery(options) = $.clone().macros(jquery_macros, string_macros, search_macros) -effect [it.init_function(tree) = this.macroexpand(anon('J[_x]').replace({_x: tree}))] Transforms. There are a lot of stages here, but most of them are fairly trivial. The first, J[], is used to indicate that something needs to be expanded under the jquery grammar. This is responsible for turning elements into jQuery calls, dot operators into classes, etc, and it does most of the heavy lifting. The other large stage is P[], which converts the pattern language into a jQuery CSS selector. The small stages are S[], which just turns something into a string with underscore-to-dash conversion; TS[], which turns something into a tag-string (e.g. TS[foo] = ""); and PS[], which quotes a compiled pattern. -where [jq = options && options.jquery_name || 'jQuery', anon = $.anonymizer('J', 'TS', 'S', 'P', 'PS'), rule(p, e) = $.macro(anon(p), e.constructor === Function ? this.expand(e.call(this, match)) -given.match : anon(e)), hyphenate(s) = s.replace(/_/g, '-'), p = bind [p_pattern = anon('P[_thing]')] in p_pattern.replace({_thing: node}) -given.node, jquery_macros = [rule('J[_element]', '#{jq}(TS[_element])'), rule('J[_element._class]', '(J[_element]).addClass(S[_class])'), rule('J[_element *_attr(_val)]', '(J[_element]).attr(S[_attr], _val)'), rule('J[_element *!_name(_val)]', '(J[_element]).data(S[_name], _val)'), rule('J[_element /_method(_args)]', '(J[_element])._method(_args)'), rule('J[_element /!_event(_args)]', '(J[_element]).bind(S[_event], _args)'), rule('J[_element %_function]', '_function((J[_element]))'), rule('J[_element(_children)]', '(J[_element]).append((J[_children]))'), rule('J[_element[_children]]', '(J[_element]).append(_children)'), rule('J[_element > _child]', '(J[_element]).append((J[_child]))'), rule('J[_element >= _child]', '(J[_element]).append(_child)'), rule('J[_element1, _element2]', '(J[_element1]).add((J[_element2]))'), rule('J[_element1 + _element2]', '(J[_element1]).add((J[_element2]))'), rule('J[_element >> _pattern]', '(J[_element]).filter((PS[_pattern]))'), rule('J[_element >>> _pattern]', '(J[_element]).find((PS[_pattern]))'), rule('J[_element << _pattern]', '(J[_element]).parents((PS[_pattern]))'), rule('J[(_element)]', '(J[_element])'), rule('J[[_element]]', '[J[_element]]'), rule('J[+_expression]', '_expression')], string_macros = [rule('TS[_identifier]', string('<#{hyphenate(match._identifier.data)}>') -given.match), rule('S[_identifier]', string( hyphenate(match._identifier.data)) -given.match), rule('PS[_identifier]', string(this.expand(p(match._identifier)).data) -given.match)] -where [string(s) = new $.syntax('"' + s.replace(/\\/g, '\\\\').replace(/"/g, '\\"') + '"')], search_macros = [rule('P[_element]', new $.syntax(hyphenate(match._element.data -re [it === '_' ? '*' : it])) -given.match), rule('P[_element._class]', new $.syntax('#{this.expand(p(match._element)).data}.#{hyphenate(match._class.data)}') -given.match), rule('P[_element[_attributes]]', new $.syntax('#{this.expand(p(match._element)).data}[#{this.expand(p(match._attributes))}]') -given.match), rule('P[_attribute = _value]', new $.syntax('#{this.expand(p(match._attribute)).data}="#' + '{#{interpolated(match._value)}}"') -given.match), rule('P[(_element)]', 'P[_element]'), // No paren support rule('P[_element1 + _element2]', binary(', ')), rule('P[_element1, _element2]', binary(', ')), rule('P[_element1 >> _element2]', binary(' ')), rule('P[_element1 >>> _element2]', binary(' ')), rule('P[_element1 > _element2]', binary(' > ')), rule('P[_element1(_element2)]', binary(' > ')), rule('P[_element /_selector]', new $.syntax('#{this.expand(p(match._element)).data}:#{hyphenate(match._selector.data)}') -given.match), rule('P[_element /_selector(_value)]', new $.syntax('#{this.expand(p(match._element)).data}:#{hyphenate(match._selector.data)}("#' + '{#{interpolated(match._value)}")') -given.match)] -where [interpolated(node) = '(#{node.toString()}).replace(/(\\)/g, "$1$1").replace(/(")/g, "\\$1")', binary(op)(match) = new $.syntax('#{this.expand(p(match._element1)).data}#{op}#{this.expand(p(match._element2)).data}')]]})(caterwaul); __ meta::sdoc('js::precompile', <<'__'); Offline precompilation. Uses caterwaul's precompile() method. var code = require('fs').readFileSync(process.argv[2], 'utf8'); require('fs').writeFileSync(process.argv[2].replace(/\.js$/, '.pre.js'), caterwaul.precompile('function () {' + code + '}').toString().replace(/^[\s\n]*function\s*\([^)]*\)[\s\n]*\{((?:.|\n)*)\}[\s\n]*$/, '$1'), 'utf8'); __ meta::sdoc('js::test/sanity-check', <<'__'); caterwaul.test(function () { var threw = false; 3 -should_be- 3; try {3 -should_be- 4} catch (e) {threw = true} threw -should_be- true; }); __ meta::sdoc('js::test/trace', <<'__'); Trace tests. Because tracing is done at runtime we can easily write a test for it. These are some fairly trivial tests to make sure nothing major is broken. caterwaul.test(function () { var observed_trees = []; var observed_values = []; var tree_stack = []; var last = function (xs) {return xs[xs.length - 1]}; var t = caterwaul.tracer(function (t) {tree_stack.push(t); observed_trees.push(t)}, function (t, v) {tree_stack.pop() -should_be- t; observed_values.push(v)}); tree_stack.length -should_be- 0; var f = function () {return 5}; var traced_f = t(f); traced_f(); tree_stack.length -should_be- 0; observed_values[0] -should_be- traced_f; observed_values[1] -should_be- 5; observed_trees[0].toString() -should_be- caterwaul.parse(f).toString(); observed_trees[1].toString() -should_be- caterwaul.parse('5').toString(); observed_values.length -should_be- 2; observed_trees.length -should_be- 2; }); __ meta::sdoc('js::tools/precompile', <<'__'); Caterwaul precompiler | Spencer Tipping Licensed under the terms of the MIT source code license Usage: node precompile.js file.js This will produce a precompiled file called 'file.pre.js'. - include pp::js::caterwaul - include pp::js::extensions/std - include pp::js::extensions/dev - include pp::js::precompile __ meta::template('comment', '\'\'; # A mechanism for line or block comments.'); meta::template('eval', <<'__'); my $result = eval $_[0]; terminal::warning("Error during template evaluation: $@") if $@; $result; __ meta::template('failing_conditional', <<'__'); my ($commands) = @_; my $should_return = $commands =~ / if (.*)$/ && ! eval $1; terminal::warning("eval of template condition failed: $@") if $@; $should_return; __ meta::template('include', <<'__'); my ($commands) = @_; return '' if template::failing_conditional($commands); join "\n", map retrieve($_), split /\s+/, $commands; __ meta::template('pinclude', <<'__'); # Just like the regular include, but makes sure to insert paragraph boundaries # (this is required for SDoc to function properly). my ($commands) = @_; return '' if template::failing_conditional($commands); my $text = join "\n\n", map retrieve($_), split /\s+/, $commands; "\n\n$text\n\n"; __ internal::main(); __END__
').append('hi')).add($('