diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..3a14d300 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +bin +lib +*.o +*.a +*.class +*.out diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..e786589e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,34 @@ +FROM alpine:3.17 AS base + +LABEL maintainer="boyland@uwm.edu" +LABEL version="1.0.0" + +ARG SCALA_VERSION="2.12.13" +ARG BISON_VERSION="3.5.1" +ARG CLASSHOME="/usr/local" + +WORKDIR /usr/lib + +# Install basics +RUN apk add --no-cache bash make flex gcompat gcc g++ openjdk11 build-base m4 git ncurses \ + && apk add --no-cache --virtual=build-dependencies wget ca-certificates + +# Download Scala +RUN wget -q "https://downloads.lightbend.com/scala/${SCALA_VERSION}/scala-${SCALA_VERSION}.tgz" -O - | gunzip | tar x + +ENV SCALAHOME="/usr/lib/scala-$SCALA_VERSION" +ENV PATH="$SCALAHOME/bin:$PATH" + +# Build and install Bison +RUN wget -q "https://ftp.gnu.org/gnu/bison/bison-${BISON_VERSION}.tar.gz" -O - | gunzip | tar x \ + && cd "bison-${BISON_VERSION}" \ + && ./configure \ + && make \ + && make install + +COPY . . + +ENV APSHOME="/usr/lib/bin" +ENV PATH="$APSHOME:$PATH" + +WORKDIR /usr/local diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..f8247c80 --- /dev/null +++ b/Makefile @@ -0,0 +1,38 @@ +SUBDIRS= utilities parse analyze aps2scala apscpp +DOCKERTAG= boylanduwm/aps + +install: + -mkdir -p lib bin + for d in ${SUBDIRS}; do \ + (cd $$d; ${MAKE} install); \ + done + +clean: + for d in ${SUBDIRS}; do \ + (cd $$d; ${MAKE} clean); \ + done + +# This cleans after an install: it does a realclean +# which gets rid of generated .h files as well. +realclean: + for d in ${SUBDIRS}; do \ + (cd $$d; ${MAKE} realclean); \ + done + +dockerbuild: install + sudo docker build . -t ${DOCKERTAG} + +cs854-dockerbuild: + sudo docker build . -t ${DOCKERTAG} -f cs854.Dockerfile + +dockerpush: + docker push ${DOCKERTAG} + +distclean: realclean + rm -rf bin lib + +cs854.tgz : distclean + tar cvf - utilities parse analyze aps2scala codegen cool/cool-tree.aps cool/cool-symbol.aps Makefile README | gzip > $@ + +.PHONY: install clean realclean distclean + diff --git a/README b/README new file mode 100644 index 00000000..7c70a0f1 --- /dev/null +++ b/README @@ -0,0 +1,2 @@ +This APS system is copyright John Boyland 1996-2019. +You may freely use this software provided you don't claim authorship. diff --git a/analyze/.gitignore b/analyze/.gitignore new file mode 100644 index 00000000..2f38c6ce --- /dev/null +++ b/analyze/.gitignore @@ -0,0 +1,2 @@ +apsc +aps-ag.a diff --git a/analyze/Makefile b/analyze/Makefile index aee09949..c360f23e 100644 --- a/analyze/Makefile +++ b/analyze/Makefile @@ -4,15 +4,17 @@ RANLIB= ar -qs PROJECT-DIR=.. APSAGOBJS= aps-bind.o aps-read.o aps-info.o aps-ag-util.o \ aps-fiber.o aps-cond.o aps-dnc.o aps-cycle.o aps-oag.o \ - aps-analyze.o aps-debug.o aps-type.o table.o alist.o + aps-analyze.o aps-debug.o aps-type.o table.o alist.o \ + canonical-signature.o canonical-type.o aps-scc.o aps-schedule.o APSCOBJS= apsc.o ${APSAGOBJS} APSCLIB= ${PROJECT-DIR}/lib/aps-lib.o +UTILITIESLIB= ${PROJECT-DIR}/utilities/utilities.o all : apsc aps-ag.a -CFLAGS= -I${PROJECT-DIR}/parse -g +CFLAGS= -I${PROJECT-DIR}/parse -I${PROJECT-DIR}/utilities -g apsc : ${APSCOBJS} ${APSCLIB} - ${CC} ${CFLAGS} -o apsc ${APSCOBJS} ${APSCLIB} + ${CC} ${CFLAGS} -o apsc ${APSCOBJS} ${APSCLIB} ${UTILITIESLIB} aps-ag.a : ${APSAGOBJS} ${AR} -cr aps-ag.a ${APSAGOBJS} @@ -24,6 +26,10 @@ aps-fiber.o aps-fiber-callsite.o: aps-fiber.h aps-dnc.h aps-fiber-callsite.h aps-oag.o : aps-oag.h aps-cycle.o aps-dnc.o : aps-cycle.h aps-debug.o : aps-debug.h +canonical-type.o : canonical-type.h +canonical-signature.o : canonical-type.o canonical-signature.h +aps-scc.o : aps-dnc.o aps-scc.h +aps-schedule.o : aps-dnc.o aps-cycle.o aps-scc.o aps-schedule.h install: apsc aps-ag.a mv apsc ../bin/apssched diff --git a/analyze/ag.h b/analyze/ag.h index eccc052f..91eed50b 100644 --- a/analyze/ag.h +++ b/analyze/ag.h @@ -155,3 +155,5 @@ struct use_expr { struct ref_expr { EXPR super; } + +#endif diff --git a/analyze/alist.c b/analyze/alist.c index c31950d9..4d935074 100644 --- a/analyze/alist.c +++ b/analyze/alist.c @@ -28,6 +28,7 @@ void *rassoc(void *value, ALIST alist) { if (alist->value == value) return alist->key; alist=alist->next; } + return NULL; } BOOL setassoc(void *key, void *value, ALIST alist) { diff --git a/analyze/aps-ag-util.h b/analyze/aps-ag-util.h index b794f0c2..03eb5d03 100644 --- a/analyze/aps-ag-util.h +++ b/analyze/aps-ag-util.h @@ -1,3 +1,6 @@ +#ifndef APS_AG_UTILS_H +#define APS_AG_UTILS_H + /** We require that top-level matches have the form * match ?name:Type=constructor(?arg1,?arg2,...) * and provide the following functions for accessing the pieces. @@ -12,5 +15,4 @@ extern Declaration match_constructor_decl(Match); extern Declaration match_first_rhs_decl(Match); extern Declaration next_rhs_decl(Declaration); - - +#endif diff --git a/analyze/aps-ag.h b/analyze/aps-ag.h index ce0b0fa3..da66f700 100644 --- a/analyze/aps-ag.h +++ b/analyze/aps-ag.h @@ -16,6 +16,10 @@ #include "aps-oag.h" #include "aps-analyze.h" #include "aps-debug.h" +#include "../utilities/utilities.h" +#include "canonical-type.h" +#include "canonical-signature.h" +#include "aps-scc.h" extern char *aps_yyfilename; extern void aps_error(const void *tnode, const char *fmt, ...); diff --git a/analyze/aps-analyze.c b/analyze/aps-analyze.c index 14fd3686..10791186 100644 --- a/analyze/aps-analyze.c +++ b/analyze/aps-analyze.c @@ -2,6 +2,7 @@ #include #include "aps-ag.h" +#include "aps-schedule.h" /* Several phases: @@ -14,39 +15,93 @@ Several phases: 7> diagnostic output */ +// Boolean indicating whether to use SCC chunk scheduling +bool static_scc_schedule = false; +bool anc_analysis = false; + static void *analyze_thing(void *ignore, void *node) { STATE *s; - if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { + DEPENDENCY d; + if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) + { Declaration decl = (Declaration)node; - switch (Declaration_KEY(decl)) { + switch (Declaration_KEY(decl)) + { + default: break; case KEYmodule_decl: - s = compute_dnc(decl); - switch (analysis_state_cycle(s)) { - default: - aps_error(decl,"Cycle detected; Attribute grammar is not DNC"); - break; - case indirect_control_fiber_dependency: - case control_fiber_dependency: - case indirect_fiber_dependency: - case fiber_dependency: - printf("Fiber cycle detected; cycle being removed\n"); - break_fiber_cycles(decl,s); - /* fall through */ - case no_dependency: - compute_oag(decl,s); - switch (analysis_state_cycle(s)) { - case no_dependency: - break; - default: - aps_error(decl,"Cycle detected; Attribute grammar is not OAG"); - break; - } - break; - } - if (cycle_debug & PRINT_CYCLE) { - print_cycles(s,stdout); + s = compute_dnc(decl, anc_analysis); + if (anc_analysis) { + if (cycle_debug & PRINT_CYCLE) + { + print_cycles(s, stdout); + } + d = (s->original_state_dependency = analysis_state_cycle(s)); + s->loop_required = !(d & DEPENDENCY_MAYBE_SIMPLE); + } else { + if (!(d = (s->original_state_dependency = analysis_state_cycle(s)))) + { + // Do nothing; no cycle to remove + s->loop_required = false; + } + else if (!(d & DEPENDENCY_MAYBE_SIMPLE) || !(d & DEPENDENCY_NOT_JUST_FIBER)) + { + printf("Fiber cycle detected (%d); cycle being removed\n", d); + if (cycle_debug & PRINT_CYCLE) + { + print_cycles(s, stdout); + } + break_fiber_cycles(decl, s, d); + s->loop_required = !(d & DEPENDENCY_MAYBE_SIMPLE); + d = no_dependency; // clear dependency + } + else + { + aps_error(decl, "Unable to handle dependency (%d); Attribute grammar is not DNC", d); + return NULL; + } + + // If SCC scheduling is in-progress + if (static_scc_schedule) + { + // Pure fiber cycles should have been broken when reaching this step + // If there is a non-monotone cycle, then scheduling is no longer possible + if (d & DEPENDENCY_MAYBE_SIMPLE) + { + if (cycle_debug & PRINT_CYCLE) + { + print_cycles(s, stdout); + } + + aps_error(decl, "Non-monotone Cycle detected (%d); Attribute grammar is not COAG", d); + return NULL; + } + + // SCC chunk scheduling supports CRAG without conditional cycles + compute_static_schedule(s); + } + else + { + if (!d) + { + // RAG scheduling with conditional cycles + compute_oag(decl, s); // calculate OAG if grammar is DNC + d = analysis_state_cycle(s); // check again for type-3 errors + } + + if (d) + { + if (cycle_debug & PRINT_CYCLE) + { + print_cycles(s, stdout); + } + + aps_error(decl, "Cycle detected (%d); Attribute grammar is not OAG", d); + return NULL; + } + } } + Declaration_info(decl)->analysis_state = s; break; } @@ -57,5 +112,8 @@ static void *analyze_thing(void *ignore, void *node) void analyze_Program(Program p) { + char *saved_filename = aps_yyfilename; + aps_yyfilename = (char *)program_name(p); traverse_Program(analyze_thing,p,p); + aps_yyfilename = saved_filename; } diff --git a/analyze/aps-analyze.h b/analyze/aps-analyze.h index 96790cf2..b1166f85 100644 --- a/analyze/aps-analyze.h +++ b/analyze/aps-analyze.h @@ -1,4 +1,14 @@ +#ifndef APS_ANALYZE_H +#define APS_ANALYZE_H + +#include + #define MODULE_DECL_ANALYSIS_STATE(md) \ (STATE*)(Declaration_info(md)->analysis_state) +extern bool static_scc_schedule; +extern bool anc_analysis; + extern void analyze_Program(Program); /* decorate modules with STATE */ + +#endif diff --git a/analyze/aps-bind.c b/analyze/aps-bind.c index c9163b37..5b6b0daf 100644 --- a/analyze/aps-bind.c +++ b/analyze/aps-bind.c @@ -142,6 +142,8 @@ static SCOPE add_env_item(SCOPE old, Declaration d) { return new_entry; } } + default: + break; } return old; } @@ -188,6 +190,19 @@ static SCOPE add_ext_sig(SCOPE old, Declaration tdecl, Signature sig) { tnode_line_number(cl)); } } + /* FALLTHROUGH */ + case KEYsig_use: + { + Declaration sig_use_decl = USE_DECL(sig_use_use(sig)); + switch (Declaration_KEY(sig_use_decl)) + { + case KEYsome_class_decl: + aps_error(sig, "Missing actuals for %s in the signature", decl_name(sig_use_decl)); + break; + default: + break; + } + } default: fatal_error("%d: signature too complicated",tnode_line_number(sig)); } @@ -262,6 +277,27 @@ static SCOPE inst_services(TypeEnvironment use_type_env, * * services = signature_services(tdecl,psig,services); */ + Declaration td = some_class_decl_result_type(class_decl); + if (Declaration_KEY(td) == KEYtype_decl) { + Type ty = type_decl_type(td); + switch (Type_KEY(ty)) + { + case KEYtype_inst: + { + Module m = type_inst_module(ty); + Use u = module_use_use(m); + Declaration mdecl = USE_DECL(u); + // Cannot use type_inst_type_actuals(ty) here, we need to use + // tacts for the "actuals" to be replaced with real type_inst actuals. + TypeActuals tas = tacts; + services = inst_services(use_type_env, mdecl, td, tas, services); + break; + } + default: + break; + } + } + traverse_Block(get_public_bindings,&services, some_class_decl_contents(class_decl)); pop_type_contour(); /* not actually necessary */ @@ -302,6 +338,8 @@ static SCOPE signature_services(Declaration tdecl, Signature sig, SCOPE services case KEYfixed_sig: case KEYno_sig: break; + default: + break; } return services; } @@ -392,6 +430,8 @@ static SCOPE type_services(Type t) case KEYfunction_type: case KEYno_type: break; + default: + break; } Type_info(t)->binding_temporary = services; return services; @@ -419,6 +459,10 @@ static void bind_Use(Use u, int namespaces, SCOPE scope) { case KEYqual_use: do_bind(scope,qual_use_from(u)); { + Type type = qual_use_from(u); + if (qual_use_name(u) == intern_symbol("Result")) { + aps_error(u, "Result is a private member and cannot be accessed directly"); + } SCOPE s = type_services(qual_use_from(u)); bind_Use_by_name(u,qual_use_name(u),namespaces,s); if (USE_DECL(u) == 0) { @@ -433,6 +477,8 @@ static void bind_Use(Use u, int namespaces, SCOPE scope) { } } break; + default: + break; } } @@ -460,8 +506,12 @@ static void *get_bindings(void *scopep, void *node) { bind_Program(p); traverse_Program(get_public_bindings,scopep,p); } + default: + break; } } + default: + break; } return scopep; } @@ -481,8 +531,12 @@ static void *get_public_bindings(void *scopep, void *node) { } } return NULL; + default: + break; } } + default: + break; } return scopep; } @@ -547,13 +601,42 @@ static void *do_bind(void *vscope, void *node) { TYPE_FORMAL_EXTENSION_FLAG; new_scope=add_ext_sig(new_scope,rdecl,some_type_formal_sig(tf)); break; + default: + break; } } } break; + default: + break; } } traverse_Block(do_bind,new_scope,module_decl_contents(d)); } + { + Declaration type_formal = first_Declaration(module_decl_type_formals(d)); + for (;type_formal != NULL; type_formal = DECL_NEXT(type_formal)) + { + Signature sig = some_type_formal_sig(type_formal); + switch (Signature_KEY(sig)) + { + case KEYsig_use: + { + Declaration use_decl = USE_DECL(sig_use_use(sig)); + switch (Declaration_KEY(use_decl)) + { + case KEYsome_class_decl: + aps_error(sig, "Missing actuals for %s in the signature", decl_name(use_decl)); + break; + default: + break; + } + break; + } + default: + break; + } + } + } break; case KEYattribute_decl: { Type ftype = attribute_decl_type(d); @@ -605,18 +688,27 @@ static void *do_bind(void *vscope, void *node) { switch (Signature_KEY((Signature)node)) { case KEYsig_use: bind_Use(sig_use_use((Signature)node),NAME_SIGNATURE,scope); + break; + default: + break; } break; case KEYType: switch (Type_KEY((Type)node)) { case KEYtype_use: bind_Use(type_use_use((Type)node),NAME_TYPE,scope); + break; + default: + break; } break; case KEYPattern: switch (Pattern_KEY((Pattern)node)) { case KEYpattern_use: bind_Use(pattern_use_use((Pattern)node),NAME_PATTERN,scope); + break; + default: + break; } break; case KEYExpression: @@ -632,6 +724,8 @@ static void *do_bind(void *vscope, void *node) { new_scope = bind_Declaration(new_scope,controlled_formal(e)); traverse_Expression(do_bind,new_scope,controlled_expr(e)); return NULL; + default: + break; } } break; @@ -645,6 +739,8 @@ static void *do_bind(void *vscope, void *node) { } } break; + default: + break; } return vscope; } @@ -716,6 +812,8 @@ static Expression actuals_set_next_actual(Actuals actuals, Expression next) { Actuals more = append_Actuals_l2(actuals); Expression middle = actuals_set_next_actual(more,next); return actuals_set_next_actual(some,middle); } + default: + break; } fatal_error("control reached end of actuals_set_next_actual"); return NULL; @@ -1044,6 +1142,8 @@ static void *activate_pragmas(void *ignore, void *node) { Declaration_info(d)->decl_flags |= FIELD_DECL_CYCLIC_FLAG; } } + default: + break; } break; case KEYtype_value: @@ -1062,12 +1162,16 @@ static void *activate_pragmas(void *ignore, void *node) { Declaration_info(d)->decl_flags |= SELF_MANAGED_FLAG; } } + default: + break; } break; } } } break; + default: + break; } } return ignore; diff --git a/analyze/aps-bind.h b/analyze/aps-bind.h index 19013258..82ee8fa4 100644 --- a/analyze/aps-bind.h +++ b/analyze/aps-bind.h @@ -1,3 +1,6 @@ +#ifndef APS_BIND_H +#define APS_BIND_H + extern void bind_Program(Program); extern Unit first_Unit(Units); extern Declaration first_Declaration(Declarations); @@ -29,3 +32,5 @@ extern int decl_namespaces(Declaration d); extern int bind_debug; #define PRAGMA_ACTIVATION 1 + +#endif diff --git a/analyze/aps-cond.h b/analyze/aps-cond.h index 9edf899f..dfdb6809 100644 --- a/analyze/aps-cond.h +++ b/analyze/aps-cond.h @@ -1,3 +1,6 @@ +#ifndef APS_COND_H +#define APS_COND_H + typedef struct condition { unsigned positive; unsigned negative; @@ -6,3 +9,7 @@ typedef struct condition { enum CONDcompare { CONDeq, CONDgt, CONDlt, CONDcomp, CONDnone }; extern enum CONDcompare cond_compare(CONDITION *, CONDITION *); + +extern void print_condition(CONDITION *cond, FILE *stream); + +#endif diff --git a/analyze/aps-cycle.c b/analyze/aps-cycle.c old mode 100644 new mode 100755 index 9fc04ac1..be95f67a --- a/analyze/aps-cycle.c +++ b/analyze/aps-cycle.c @@ -10,6 +10,7 @@ #include "aps-ag.h" int cycle_debug = 0; +static const int BUFFER_SIZE = 1000; /* We use a union-find algorithm to detect strongly connected components. * We use a dynamically allocated array to hold the pointers, @@ -22,6 +23,12 @@ static int *parent_index; /* initializes to pi[i] = i */ static int *constructor_instance_start; static int *phylum_instance_start; + +#define UP_DOWN_DIRECTION(v, direction) (direction ? v : !v) +#define IS_JUST_FIBER_DEPENDENCY(d) (((d) & DEPENDENCY_MAYBE_SIMPLE)) +#define UP_DOWN (true) +#define DOWN_UP (false) + static void init_indices(STATE *s) { int num = 0; int i = 0; @@ -55,11 +62,8 @@ static int get_set(int index) { } } -static int merge_sets(int index1, int index2) { - if (parent_index[index1] == -1) /* non normally cyclic */ - parent_index[index1] = get_set(index2); - else - parent_index[get_set(index1)] = get_set(index2); +static void merge_sets(int index1, int index2) { + parent_index[get_set(index1)] = get_set(index2); } typedef VECTOR(int) SETS; @@ -68,7 +72,8 @@ static void get_fiber_cycles(STATE *s) { int i,j,k; int num_sets = 0; for (i=0; i < num_instances; ++i) { - if (parent_index[i] == i) ++num_sets; + if (parent_index[i] < 0) continue; + if (get_set(i) == i) ++num_sets; } VECTORALLOC(s->cycles,CYCLE,num_sets); num_sets=0; @@ -77,6 +82,7 @@ static void get_fiber_cycles(STATE *s) { INSTANCE *iarray; int count = 0; CYCLE *cyc = &s->cycles.array[num_sets++]; + DEPENDENCY kind = no_dependency; cyc->internal_info = i; for (j=0; j < num_instances; ++j) { if (parent_index[j] >= 0 && get_set(j) == i) ++count; @@ -87,9 +93,11 @@ static void get_fiber_cycles(STATE *s) { for (j=0; j < s->phyla.length; ++j) { int phylum_index = phylum_instance_start[j]; PHY_GRAPH *phy = &s->phy_graphs[j]; - for (k = 0; k < phy->instances.length; ++k) { + int n = phy->instances.length; + for (k = 0; k < n; ++k) { if (parent_index[phylum_index+k] == i) { iarray[count++] = phy->instances.array[k]; + kind = dependency_join(kind,phy->mingraph[k*n+k]); } } } @@ -99,12 +107,16 @@ static void get_fiber_cycles(STATE *s) { (j == s->match_rules.length) ? &s->global_dependencies : &s->aug_graphs[j]; - for (k = 0; k < aug_graph->instances.length; ++k) { + int n = aug_graph->instances.length; + for (k = 0; k < n; ++k) { if (parent_index[constructor_index+k] == i) { iarray[count++] = aug_graph->instances.array[k]; + DEPENDENCY k1 = edgeset_kind(aug_graph->graph[k*n+k]); + kind = dependency_join(kind,k1); } } } + cyc->kind = kind; if (count != cyc->instances.length) { fatal_error("Counted %d instances in cycle, now have %d\n", cyc->instances.length,count); @@ -115,7 +127,7 @@ static void get_fiber_cycles(STATE *s) { printf("%d independent fiber cycle%s found\n",num_sets,num_sets>1?"s":""); for (i=0; i < s->cycles.length; ++i) { CYCLE *cyc = &s->cycles.array[i]; - printf("Cycle %d:\n",i); + printf("Cycle %d (%d):\n",i,cyc->kind); for (j = 0; j < cyc->instances.length; ++j) { printf(" "); print_instance(&cyc->instances.array[j],stdout); @@ -125,8 +137,7 @@ static void get_fiber_cycles(STATE *s) { } } - -/*** determing strongly connected sets of attributes ***/ +/*** determining strongly connected sets of attributes ***/ static void make_augmented_cycles_for_node(AUG_GRAPH *aug_graph, int constructor_index, @@ -262,6 +273,24 @@ static void make_augmented_cycles(AUG_GRAPH *aug_graph, int constructor_index) } +/* true if dependencies can flow over a serial composition */ +static BOOL can_trans(EDGESET es1, EDGESET es2) { + EDGESET e1; + EDGESET e2; + for (e1 = es1; e1 != NULL; e1=e1->rest) { + for (e2 = es2; e2 != NULL; e2=e2->rest) { + CONDITION* cond1 = &e1->cond; + CONDITION* cond2 = &e2->cond; + CONDITION cond; + cond.positive = cond1->positive|cond2->positive; + cond.negative = cond1->negative|cond2->negative; + if (cond.positive & cond.negative) continue; + return TRUE; + } + } + return FALSE; +} + static void make_cycles(STATE *s) { int i,j,k; /* summary cycles */ @@ -303,7 +332,10 @@ static void make_cycles(STATE *s) { if (k != j && edgeset_kind(aug_graph->graph[j*n+k]) != no_dependency && edgeset_kind(aug_graph->graph[k*n+j]) != no_dependency) { - merge_sets(constructor_index+j,constructor_index+k); + /* check to make sure conditions are compatible */ + if (can_trans(aug_graph->graph[j*n+k],aug_graph->graph[k*n+j])) { + merge_sets(constructor_index+j,constructor_index+k); + } } } } @@ -313,314 +345,610 @@ static void make_cycles(STATE *s) { } -/*** Add new instances and redo dependency graphs ***/ +/*** Break cycles and redo dependency graphs ***/ -SYMBOL make_up_down_name(const char *n, int num,BOOL up) { - char name[80]; - sprintf(name,"%s[%s]-%d",up?"UP":"DOWN",n,num); - /* printf("Creating symbol %s\n",name); */ - return intern_symbol(name); +bool instance_is_up(INSTANCE *i) { + return (fibered_attr_direction(&i->fibered_attr)) == instance_outward; } -static char danger[1000]; +/** + * Removes edgeset between two instances at the indices. + * @param index1 source index + * @param index2 sink index + * @param n width of matrix + * @param array instance array + * @param aug_graph augmented dependency graph + */ +static void remove_edgeset(int index1, int index2, int n, INSTANCE *array, AUG_GRAPH *aug_graph) +{ + INSTANCE *attr1 = (&array[index1]); + INSTANCE *attr2 = (&array[index2]); + if (cycle_debug & DEBUG_UP_DOWN) { + printf(" Removing up/down: "); + print_instance(attr1, stdout); + printf(" -> "); + print_instance(attr2, stdout); + printf("\n"); + } + free_edgeset(aug_graph->graph[index1 * n + index2], aug_graph); + aug_graph->graph[index1 * n + index2] = NULL; +} -static const char *phylum_to_string(Declaration d) +/** + * Add edge between two instances to reflect the up/down construction. + * @param index1 source index + * @param index2 sink index + * @param n width of matrix + * @param array instance array + * @param dep dependency to join + * @param cond condition to join + * @param aug_graph augmented dependency graph + */ +static void add_up_down_edge(int index1, int index2, int n, INSTANCE *array, DEPENDENCY dep, CONDITION *cond, AUG_GRAPH *aug_graph) { - switch (Declaration_KEY(d)) { - default: - return decl_name(d); - case KEYpragma_call: - sprintf(danger,"%s:%d",symbol_name(pragma_call_name(d)), - tnode_line_number(d)); - return danger; - case KEYif_stmt: - sprintf(danger,"if:%d",tnode_line_number(d)); - return danger; + INSTANCE *attr1 = &array[index1]; + INSTANCE *attr2 = &array[index2]; + + if (cycle_debug & DEBUG_UP_DOWN) + { + printf(" Adding up/down: "); + print_instance(attr1, stdout); + printf(" -> "); + print_instance(attr2, stdout); + printf("\n"); } + add_edge_to_graph(attr1, attr2, cond, dep, aug_graph); } -static void add_up_down_attributes(STATE *s) { - int i,j,k,l; - CONDITION cond; - cond.positive=0; - cond.negative=0; - for (i=0; i < s->cycles.length; ++i) { +/** + * Combines dependencies for edgeset + * @param es + * @param acc_dependency + * @param acc_cond + */ +static void edgeset_combine_dependencies(EDGESET es, DEPENDENCY* acc_dependency, CONDITION* acc_cond) +{ + for (; es != NULL; es = es->rest) + { + *acc_dependency |= es->kind; + acc_cond->positive |= es->cond.positive; + acc_cond->negative |= es->cond.negative; + } +} + +#define UP_DOWN_DIRECTION(v, direction) (direction ? v : !v) + +/** + * In phylum graph (and in aug graph) + * For every instance i: + * If the instance is NOT in the cycle: + * "OR" the condition/kind for the dependency from i to any instance of the cycle. + * If result is not False, 0 + * Create a dependency from i to *every* instance in the cycle with this condition and kind + * + * "OR" the condition/kind for any instance of the cycle to i + * If result is not False, 0 + * Create a dependency from *every* instance in the cycle to i with this condition and kind + * + * If the instance is in the cycle and an UP attr: + * "OR" the condition/kind for the dependency from i to any instance of the cycle + * If the result is not False, 0 + * Create a dependency from i to all the DOWN instances in the cycle + * Remove all dependencies from this attribute to any other UP instance in the same cycle + * + * If the instance is in the cycle and an DOWN attr: + * Remove all dependencies from this attribute to any other instance in the same cycle + * @param s analysis STATE + * @param direction true: UP_DOWN and false DOWN_UP + */ +static void add_up_down_attributes(STATE *s, bool direction) +{ + int i, j, k, l, m; + DEPENDENCY acc_dependency; + CONDITION acc_cond; + + // Forall cycles in the graph + for (i = 0; i < s->cycles.length; i++) + { CYCLE *cyc = &s->cycles.array[i]; - for (j=0; j < s->phyla.length; ++j) { - int found = 0; + if (cycle_debug & DEBUG_UP_DOWN) printf("Breaking Cycle #%d\n",i); + + // Forall phylum in the phylum_graph + for (j = 0; j < s->phyla.length; j++) + { PHY_GRAPH *phy = &s->phy_graphs[j]; int n = phy->instances.length; int phylum_index = phylum_instance_start[j]; INSTANCE *array = phy->instances.array; - int upindex, downindex; - Declaration upattr = - attribute_decl(def(make_up_down_name(phylum_to_string(s->phyla.array[j]), - i,TRUE), - FALSE,FALSE), - no_type(), /* sloppy */ - direction(FALSE,FALSE,FALSE),no_default()); - Declaration downattr = - attribute_decl(def(make_up_down_name(phylum_to_string(s->phyla.array[j]), - i,FALSE), - FALSE,FALSE), - no_type(), /* sloppy */ - direction(FALSE,FALSE,FALSE),no_default()); - Declaration_info(upattr)->decl_flags = - ATTR_DECL_SYN_FLAG|DECL_LOCAL_FLAG|SHARED_DECL_FLAG|UP_DOWN_FLAG; - Declaration_info(downattr)->decl_flags = - ATTR_DECL_INH_FLAG|DECL_LOCAL_FLAG|SHARED_DECL_FLAG|UP_DOWN_FLAG; - for (k=0; k < n; ++k) { - if (parent_index[k+phylum_index] == cyc->internal_info) { - switch (++found) { - case 1: - array[k].fibered_attr.attr = upattr; - Declaration_info(upattr)->instance_index = upindex = k; - break; - case 2: - array[k].fibered_attr.attr = downattr; - Declaration_info(downattr)->instance_index = downindex = k; - break; - default: - array[k].fibered_attr.attr = NULL; - break; - } - array[k].fibered_attr.fiber = NULL; - } - } - if (found > 0) { - if (found < 2) fatal_error("Not enough attributes to make cycle"); - /* redo dependencies */ - for (k=0; k < n; ++k) { - BOOL kcycle = parent_index[k+phylum_index] == cyc->internal_info; - if (k != downindex) { - for (l=0; l < n; ++l) { - BOOL lcycle = parent_index[l+phylum_index] == cyc->internal_info; - if (l != upindex && - phy->mingraph[k*n+l] != no_dependency) { - if (kcycle || lcycle) { - phy->mingraph[k*n+l] = no_dependency; - if (!lcycle) { - phy->mingraph[downindex*n+l] = fiber_dependency; - } else if (!kcycle) { - phy->mingraph[k*n+upindex] = fiber_dependency; - } - } - } - } - } - } - phy->mingraph[upindex*n+upindex] = no_dependency; - phy->mingraph[upindex*n+downindex] = fiber_dependency; /* set! */ - phy->mingraph[downindex*n+upindex] = no_dependency; - phy->mingraph[downindex*n+downindex] = no_dependency; + + for (k = 0; k < n; k++) + { + INSTANCE *instance = &array[k]; + + // If instance is not in the cycle + if (parent_index[k + phylum_index] != cyc->internal_info) + { + acc_dependency = no_dependency; + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If dependency is not to self and is in cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + acc_dependency |= phy->mingraph[k * n + l]; + } + } + + // If any dependency + if (acc_dependency) + { + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If edge is not to self and it is in the cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + phy->mingraph[k * n + l] = acc_dependency; + } + } + } + + acc_dependency = no_dependency; + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If dependency is not to self and is in cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + acc_dependency |= phy->mingraph[l * n + k]; + } + } + + // If any dependency + if (acc_dependency) + { + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If edge is not to self and it is in the cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + phy->mingraph[l * n + k] = acc_dependency; + } + } + } + } + // Instance is in the cycle + else + { + // UP attribute + if (direction && UP_DOWN_DIRECTION(instance_is_up(instance), direction)) + { + acc_dependency = no_dependency; + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If dependency is not to self and is in cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + acc_dependency |= phy->mingraph[k * n + l]; + } + } + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // Make sure it is in the cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + // Make sure it is a DOWN attribute + if (acc_dependency && UP_DOWN_DIRECTION(!instance_is_up(&array[l]), direction)) + { + phy->mingraph[k * n + l] = acc_dependency; + } + else + { + phy->mingraph[k * n + l] = no_dependency; + } + } + } + } + // DOWN attribute + else + { + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // Make sure it is in the cycle + if (parent_index[l + phylum_index] == cyc->internal_info) + { + // Remove edges between instance and all others in the same cycle + phy->mingraph[k * n + l] = no_dependency; + } + } + } + } } - } /* for phyla */ - for (j = 0; j <= s->match_rules.length; ++j) { + } + + // Forall edges in the augmented dependency graph + for (j = 0; j <= s->match_rules.length; j++) + { AUG_GRAPH *aug_graph = - (j == s->match_rules.length) ? - &s->global_dependencies : - &s->aug_graphs[j]; + (j == s->match_rules.length) ? &s->global_dependencies : &s->aug_graphs[j]; int n = aug_graph->instances.length; int constructor_index = constructor_instance_start[j]; INSTANCE *array = aug_graph->instances.array; - int cycle_type = 0; - int upindex = -1, downindex = -1; - int start = 0; - int found = 0; - Declaration lastnode = NULL; - Declaration updecl, downdecl; - for (k=0; k < n; ++k) { - if (parent_index[k+constructor_index] == cyc->internal_info) { - ++found; - if (array[k].node == NULL) - cycle_type |= CYC_LOCAL; - else if (DECL_IS_LHS(array[k].node)) - cycle_type |= CYC_ABOVE; - else if (DECL_IS_RHS(array[k].node)) - cycle_type |= CYC_BELOW; - else - fatal_error("Cannot classify node: %s",phylum_to_string(array[k].node)); + for (k = 0; k < n; k++) + { + INSTANCE *instance = &array[k]; + + if (cycle_debug & DEBUG_UP_DOWN) { + printf("> aug node #%d (",k); + print_instance(&array[k],stdout); + printf(") %s\n", + parent_index[k + constructor_index] == cyc->internal_info ? + (instance_is_up(instance) ? "UP" : "DOWN") : + "not in cycle"); } + + // If instance is not in the cycle + if (parent_index[k + constructor_index] != cyc->internal_info) + { + acc_dependency = no_dependency; + acc_cond.positive = 0; + acc_cond.negative = 0; + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If dependency is not to self and is in cycle + if (parent_index[l + constructor_index] == cyc->internal_info && aug_graph->graph[k * n + l] != NULL) + { + edgeset_combine_dependencies(aug_graph->graph[k * n + l], &acc_dependency, &acc_cond); + } + } + + // If any dependency + if (acc_dependency) + { + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If edge is not to self and it is in the cycle + if (parent_index[l + constructor_index] == cyc->internal_info) + { + // printf("k -> l Adding Not In cycle -> In Cycle: "); + add_up_down_edge(k, l, n, array, acc_dependency, &acc_cond, aug_graph); + } + } + } + + acc_dependency = no_dependency; + acc_cond.positive = 0; + acc_cond.negative = 0; + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If dependency is not to self and is in cycle + if (parent_index[l + constructor_index] == cyc->internal_info && aug_graph->graph[l * n + k] != NULL) + { + edgeset_combine_dependencies(aug_graph->graph[l * n + k], &acc_dependency, &acc_cond); + } + } + + // If any dependency + if (acc_dependency) + { + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If edge is not to self and it is in the cycle + if (parent_index[l + constructor_index] == cyc->internal_info) + { + // printf("l -> k Adding In Cycle -> Not In Cycle: "); + add_up_down_edge(l, k, n, array, acc_dependency, &acc_cond, aug_graph); + } + } + } + } + // Instance is in the cycle + else + { + // UP attribute + if (UP_DOWN_DIRECTION(instance_is_up(instance), direction)) + { + acc_dependency = no_dependency; + acc_cond.positive = 0; + acc_cond.negative = 0; + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // If dependency is not to self and is in cycle + if (parent_index[l + constructor_index] == cyc->internal_info && aug_graph->graph[k * n + l] != NULL) + { + edgeset_combine_dependencies(aug_graph->graph[k * n + l], &acc_dependency, &acc_cond); + } + } + + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // Make sure it is in the cycle + if (parent_index[l + constructor_index] == cyc->internal_info) + { + // Make sure it is a DOWN attribute + if (acc_dependency && UP_DOWN_DIRECTION(!instance_is_up(&array[l]), direction)) + { + // printf("k -> l Adding In cycle -> In Cycle: "); + add_up_down_edge(k, l, n, array, acc_dependency, &acc_cond, aug_graph); + } + else + { + // printf("k -> l Removing In cycle -> In Cycle: "); + remove_edgeset(k, l, n, array, aug_graph); + } + } + } + } + // DOWN attribute + else + { + // Forall instances in the cycle + for (l = 0; l < n; l++) + { + // Make sure it is in the cycle + if (parent_index[l + constructor_index] == cyc->internal_info) + { + // Remove edges between instance and all others in the same cycle + // printf("k -> l Removing Down In cycle -> In Cycle: "); + remove_edgeset(k, l, n, array, aug_graph); + } + } + } + } } - if (cycle_type == CYC_BELOW) { - /* special case if all cycles are below: - * a different cycle for every node. - */ - if (found != 2) - fatal_error("Cannot handle cycle below case in general yet!"); - found = 0; - for (k=0; k < n; ++k) { - if (array[k].node != lastnode) { - start = k; - lastnode = array[k].node; - } - if (parent_index[k+constructor_index] == cyc->internal_info) { - PHY_GRAPH *phy_graph = - Declaration_info(array[k].node)->node_phy_graph; - array[k].fibered_attr.attr = - phy_graph->instances.array[k-start].fibered_attr.attr; - switch (++found) { - case 1: upindex = k; break; - case 2: downindex = k; break; - default: break; - } - array[k].fibered_attr.fiber = NULL; - } - } - if (found != 2) - fatal_error("Counted twice gets different numbers!"); - if (downindex != upindex) { - free_edgeset(aug_graph->graph[upindex*n+upindex],aug_graph); - add_edge_to_graph(&array[upindex],&array[downindex], - &cond,fiber_dependency,aug_graph); - free_edgeset(aug_graph->graph[downindex*n+upindex],aug_graph); - free_edgeset(aug_graph->graph[downindex*n+downindex],aug_graph); - aug_graph->graph[upindex*n+upindex] = - aug_graph->graph[downindex*n+upindex] = - aug_graph->graph[downindex*n+downindex] = - NULL; - } - } else if (cycle_type != 0) { - found = 0; - if (cycle_type == CYC_BELOW) - fatal_error("Below only: no locals to attach to!"); - if (!(cycle_type & CYC_ABOVE)) { - updecl = - value_decl(def(make_up_down_name(aug_graph_name(aug_graph), - i,TRUE), - FALSE,FALSE), - no_type(), /* sloppy */ - direction(FALSE,FALSE,FALSE),no_default()); - downdecl = - value_decl(def(make_up_down_name(aug_graph_name(aug_graph), - i,FALSE), - FALSE,FALSE), - no_type(), /* sloppy */ - direction(FALSE,FALSE,FALSE),no_default()); - Declaration_info(updecl)->decl_flags = DECL_LOCAL_FLAG; - Declaration_info(downdecl)->decl_flags = DECL_LOCAL_FLAG; - } - for (k=0; k < n; ++k) { - if (array[k].node != lastnode) { - start = k; - lastnode = array[k].node; - } - if (parent_index[k+constructor_index] == cyc->internal_info) { - /* printf("in cycle: "); - * print_instance(&array[k],stdout); - * printf("\n"); - */ - if (array[k].node == NULL) { /* a local */ - switch (++found) { - case 1: - array[k].fibered_attr.attr = updecl; - Declaration_info(updecl)->instance_index = upindex = k; - break; - case 2: - array[k].fibered_attr.attr = downdecl; - Declaration_info(downdecl)->instance_index = downindex = k; - break; - default: - array[k].fibered_attr.attr = NULL; - break; - } - } else { - PHY_GRAPH *phy_graph = - Declaration_info(array[k].node)->node_phy_graph; - array[k].fibered_attr.attr = - phy_graph->instances.array[k-start].fibered_attr.attr; - if (found < 2 && ! DECL_IS_RHS(array[k].node)) - switch (++found) { - case 1: upindex = k; break; - case 2: downindex = k; break; - } - } - array[k].fibered_attr.fiber = NULL; - } - } - if (found > 0) { - if (found == 1) downindex = upindex; - /* - printf("up = "); - print_instance(&array[upindex],stdout); - printf(" down = "); - print_instance(&array[downindex],stdout); - printf("\n"); - */ - /* redo dependencies */ - for (k = 0; k < n; ++k) - if (parent_index[k+constructor_index] == cyc->internal_info) - for (l = 0; l < n; ++l) - if (parent_index[l+constructor_index] == cyc->internal_info) { - free_edgeset(aug_graph->graph[k*n+l],aug_graph); - aug_graph->graph[k*n+l] = NULL; - } - for (k=0; k < n; ++k) { - BOOL kcycle = - parent_index[k+constructor_index] == cyc->internal_info; - if (k != downindex) { - for (l=0; l < n; ++l) { - BOOL lcycle = - parent_index[l+constructor_index] == cyc->internal_info; - if (l != upindex && - edgeset_kind(aug_graph->graph[k*n+l]) != no_dependency) { - if (kcycle || lcycle) { - free_edgeset(aug_graph->graph[k*n+l],aug_graph); - aug_graph->graph[k*n+l] = NULL; - if (!lcycle) { - add_edge_to_graph(&array[downindex],&array[l], - &cond,fiber_dependency,aug_graph); - } else if (!kcycle) { - add_edge_to_graph(&array[k],&array[upindex], - &cond,fiber_dependency,aug_graph); - } - } - } - } - } - } - /* fix up dependencies for cycle attributes */ - for (k=0; k < n; ++k) { - if (parent_index[k+constructor_index] == cyc->internal_info && - k != downindex && k != downindex && - array[k].node != NULL && - DECL_IS_RHS(array[k].node)) { - Declaration attr = array[k].fibered_attr.attr; - if (attr != NULL) { - if (ATTR_DECL_IS_SYN(attr)) { - add_edge_to_graph(&array[k],&array[upindex],&cond, - fiber_dependency,aug_graph); - } else { - add_edge_to_graph(&array[downindex],&array[k],&cond, - fiber_dependency,aug_graph); - } - } - } - } - if (downindex != upindex) { - free_edgeset(aug_graph->graph[upindex*n+upindex],aug_graph); - add_edge_to_graph(&array[upindex],&array[downindex], - &cond,fiber_dependency,aug_graph); - free_edgeset(aug_graph->graph[downindex*n+upindex],aug_graph); - free_edgeset(aug_graph->graph[downindex*n+downindex],aug_graph); - aug_graph->graph[upindex*n+upindex] = - aug_graph->graph[downindex*n+upindex] = - aug_graph->graph[downindex*n+downindex] = - NULL; - } - } + } + } +} + +/** + * Utility function that returns boolean indicating if decl is inside some function + * @param decl Declaration to test + * @param func Declaration function + * @return true if decl is inside function + * @return false otherwise + */ +static bool is_inside_some_function(Declaration decl, Declaration *func) +{ + void *current = decl; + Declaration current_decl; + while (current != NULL && (current = tnode_parent(current)) != NULL) + { + switch (ABSTRACT_APS_tnode_phylum(current)) + { + case KEYDeclaration: + current_decl = (Declaration)current; + switch (Declaration_KEY(current_decl)) + { + case KEYsome_function_decl: + *func = current_decl; + return some_function_decl_result(current_decl) == decl; + default: + break; + } + default: + break; + } + } + + return false; +} + +/** + * This function determines whether attribute instance should be considered for circularity check + * @param instance attribute instance + * @return true if instance not is not: If, Match and some formal + * @return false everything else + */ +static bool applicable_for_circularity_check(INSTANCE *instance) +{ + // If and Match statements can show up in a cycle but are never declared circular or non-circular + if (if_rule_p(instance->fibered_attr.attr)) + return false; + + Declaration node = instance->fibered_attr.attr; + Declaration func = NULL; + + // The result in a function/procedure may show up in a cycle but are never declared circular or non-circular + if (is_inside_some_function(node, &func) && some_function_decl_result(func) == node) + return false; + + if (instance->fibered_attr.fiber != NULL) + return false; + + // Formals may show up in a cycle as well + switch (ABSTRACT_APS_tnode_phylum(node)) + { + case KEYDeclaration: + { + switch (Declaration_KEY(node)) + { + case KEYformal: + { + if (func != NULL) { + // function formals can be involved in a cycle (see first.aps) + return false; } + switch (ABSTRACT_APS_tnode_phylum(tnode_parent(node))) + { + case KEYPattern: + // formals used inside match patterns are allowed to be + // involved in a cycle. + return false; + default: + break; + } + } + default: + break; } } + default: + break; + } + + return true; } +/** + * Ensure that attributes that participate in a cycle are defined circular + * @param s Analysis state + */ +static void assert_circular_declaration(STATE* s) { + int i, j, k; + + // Forall phylum in the phylum_graph + for (i = 0; i < s->phyla.length; i++) { + PHY_GRAPH* phy = &s->phy_graphs[i]; + int n = phy->instances.length; + int phylum_index = phylum_instance_start[i]; + INSTANCE* array = phy->instances.array; + + for (j = 0; j < n; j++) { + INSTANCE* instance = &array[j]; + Declaration node = instance->fibered_attr.attr; + + if (!applicable_for_circularity_check(instance)) + continue; + + bool any_cycle = false; + bool declared_circular = instance_circular(instance); + + char instance_to_str[BUFFER_SIZE]; + FILE* f = fmemopen(instance_to_str, sizeof(instance_to_str), "w"); + print_instance(instance, f); + fclose(f); + + for (k = 0; k < num_instances; k++) { + if (parent_index[k] == k) { + if (parent_index[phylum_index + j] == k) { + if (declared_circular) { + any_cycle = true; + } else { + aps_error(node, + "Instance (%s) involves in a cycle but it is not " + "declared circular.", + instance_to_str); + } + } + } + } + + if (declared_circular && !any_cycle) { + aps_warning(node, + "Instance (%s) is declared circular but does not involve " + "in any cycle.", + instance_to_str); + } + } + } + + // Forall edges in the augmented dependency graph + for (i = 0; i <= s->match_rules.length; i++) { + AUG_GRAPH* aug_graph = (i == s->match_rules.length) + ? &s->global_dependencies + : &s->aug_graphs[i]; + int n = aug_graph->instances.length; + int constructor_index = constructor_instance_start[i]; + INSTANCE* array = aug_graph->instances.array; + for (j = 0; j < n; j++) { + INSTANCE* instance = &array[j]; + Declaration node = instance->fibered_attr.attr; + + if (!applicable_for_circularity_check(instance)) + continue; + + bool any_cycle = false; + bool declared_circular = instance_circular(instance); + + char instance_to_str[BUFFER_SIZE]; + FILE* f = fmemopen(instance_to_str, sizeof(instance_to_str), "w"); + print_instance(instance, f); + fclose(f); + + // Forall cycles in the graph + for (k = 0; k < num_instances; k++) { + if (parent_index[k] == k) { + if (parent_index[constructor_index + j] == k) { + if (declared_circular) { + any_cycle = true; + } else { + aps_error(node, + "Instance (%s) involves in a cycle but it is not " + "declared circular.", + instance_to_str); + } + } + } + } + if (declared_circular && !any_cycle) { + aps_warning(node, + "Instance (%s) is declared circular but does not involve " + "in any cycle.", + instance_to_str); + } + } + } +} -void break_fiber_cycles(Declaration module,STATE *s) { +void break_fiber_cycles(Declaration module,STATE *s,DEPENDENCY dep) { void *mark = SALLOC(0); init_indices(s); make_cycles(s); get_fiber_cycles(s); - add_up_down_attributes(s); + assert_circular_declaration(s); + + // If SCC scheduling is in-progress + if (static_scc_schedule) + { + // If the accumulated dependency is NOT just fiber cycle, then + // there exist cycle(s) that carry value(s), so do not break fiber cycles. + if (dep & DEPENDENCY_NOT_JUST_FIBER) + { + printf("Skipped breaking fiber cycles because cycle has dependencies that carry value (not just fiber).\n"); + } + else + { + // Skip running UP/DOWN fiber cycle breaking altogether for SCC scheduling. + printf("Skipped breaking fiber cycles even when dependencies are just fiber.\n"); + + // add_up_down_attributes(s,UP_DOWN); + } + } + else + { + // TODO: DOWN-UP is not a promising solution to handle CRAG. It naturally + // wants to remove edges that carry value (not just fiber dependency) and + // DOWN-UP ideally be should be removed in favor of UP-DOWN. + // + // The better as was demonstrated by SCC chunk scheduling is to not do fiber + // cycle breaking if there is DEPENDENCY_NOT_JUST_FIBER in accumulated dependency. + // + // However, we attempted to prevent direct edges from being affected by + // the fiber-cycle-breaking algorithm: https://github.com/boyland/aps/pull/87 + // + // Preserve UP-DOWN edges if the accumulated dependency is just fiber cycle, + // otherwise preserve DOWN-UP edges. + bool direction = !(dep & DEPENDENCY_NOT_JUST_FIBER) ? UP_DOWN : DOWN_UP; + add_up_down_attributes(s,direction); + } + release(mark); { int saved_analysis_debug = analysis_debug; @@ -643,12 +971,3 @@ void break_fiber_cycles(Declaration module,STATE *s) { print_analysis_state(s,stdout); } } - - -/**** PRINTING ****/ - -void print_fiber_cycles(STATE *s) { -} - - - diff --git a/analyze/aps-cycle.h b/analyze/aps-cycle.h index 4d7a4caa..6dda71e8 100644 --- a/analyze/aps-cycle.h +++ b/analyze/aps-cycle.h @@ -4,11 +4,15 @@ * Routines for breaking cycles involving only fibers. */ -extern void break_fiber_cycles(Declaration,STATE *); +#ifndef APS_CYCLE_H +#define APS_CYCLE_H + +extern void break_fiber_cycles(Declaration,STATE *,DEPENDENCY); extern int cycle_debug; typedef struct cycle_description { int internal_info; + DEPENDENCY kind; VECTOR(INSTANCE) instances; } CYCLE; @@ -20,3 +24,5 @@ typedef struct cycle_description { #define CYC_ABOVE 1 #define CYC_LOCAL 2 #define CYC_BELOW 4 + +#endif diff --git a/analyze/aps-debug.c b/analyze/aps-debug.c index c3c18e7e..e85631cd 100644 --- a/analyze/aps-debug.c +++ b/analyze/aps-debug.c @@ -32,7 +32,7 @@ void aps_warning(const void *tnode, const char *fmt, ...) fflush(stdout); (void) fprintf(stderr, "%s.aps:%d:Warning: ", - aps_yyfilename,tnode_line_number(tnode)); + aps_yyfilename,tnode == NULL ? -1 : tnode_line_number(tnode)); (void) vfprintf(stderr, fmt, args); (void) fprintf(stderr, "\n"); (void) fflush(stderr); @@ -59,7 +59,8 @@ static void list_debug_flags() { fprintf(stderr,"\tP PRAGMA_ACTIVATION\n"); fprintf(stderr,"\tt TYPE CHECKING\n"); fprintf(stderr,"\t+ ADD_FIBER\n"); - fprintf(stderr,"\ta ALL_FIBERSETS\n"); + fprintf(stderr,"\ta ADD_FSA_EDGE"); + fprintf(stderr,"\tA ALL_FIBERSETS\n"); fprintf(stderr,"\tp PUSH_FIBER\n"); fprintf(stderr,"\tf FIBER_INTRO\n"); fprintf(stderr,"\tF FIBER_FINAL\n"); @@ -70,6 +71,7 @@ static void list_debug_flags() { fprintf(stderr,"\tw WORKLIST_CHANGES\n"); fprintf(stderr,"\tx SUMMARY_EDGE_EXTRA\n"); fprintf(stderr,"\t0 ASSERT_CLOSED\n"); + fprintf(stderr,"\t@ EDGESET_ASSERTIONS\n"); fprintf(stderr,"\tE SUMMARY_EDGE\n"); fprintf(stderr,"\t2 TWO_EDGE_CYCLE\n"); fprintf(stderr,"\tD DNC_FINAL\n"); @@ -95,7 +97,8 @@ void set_debug_flags(const char *options) case 'P': bind_debug |= PRAGMA_ACTIVATION; break; case 't': type_debug = -1; break; case '+': fiber_debug |= ADD_FIBER; break; - case 'a': fiber_debug |= ALL_FIBERSETS; break; + case 'a': fiber_debug |= ADD_FSA_EDGE; break; + case 'A': fiber_debug |= ALL_FIBERSETS; break; case 'p': fiber_debug |= PUSH_FIBER; break; case 'f': fiber_debug |= FIBER_INTRO; break; case 'F': fiber_debug |= FIBER_FINAL; break; @@ -110,11 +113,13 @@ void set_debug_flags(const char *options) case 'E': analysis_debug |= SUMMARY_EDGE; break; case 'D': analysis_debug |= DNC_FINAL; break; case 'I': analysis_debug |= DNC_ITERATE; break; + case '@': analysis_debug |= EDGESET_ASSERTIONS; break; case 'C': cycle_debug |= PRINT_CYCLE; break; case 'u': cycle_debug |= DEBUG_UP_DOWN; break; case 'U': cycle_debug |= PRINT_UP_DOWN; break; case 'o': oag_debug |= DEBUG_ORDER; break; case 'O': oag_debug |= TOTAL_ORDER; break; + case 'v': oag_debug |= DEBUG_ORDER_VERBOSE; break; case 'T': oag_debug |= PROD_ORDER; break; case '3': oag_debug |= TYPE_3_DEBUG; break; } diff --git a/analyze/aps-dnc.c b/analyze/aps-dnc.c index 1fa894d4..7ff91ca6 100644 --- a/analyze/aps-dnc.c +++ b/analyze/aps-dnc.c @@ -1,7 +1,7 @@ /* Testing DNC with fibering of conditional attribute grammars * written in APS syntax. First we initialize the graphs for - * every phylum and for every production. Then we interate - * computing summary graphs and then augmented depedency graphs + * every phylum and for every production. Then we iterate + * computing summary graphs and then augmented dependency graphs * for productions until we reach a fixed point. */ #include @@ -14,6 +14,9 @@ int analysis_debug = 0; /*** FUNCTIONS FOR INSTANCES */ +BOOL instance_equal(INSTANCE *in1, INSTANCE *in2) { + return in1->node == in2->node && fibered_attr_equal(&in1->fibered_attr, &in2->fibered_attr); +} BOOL fibered_attr_equal(FIBERED_ATTRIBUTE *fa1, FIBERED_ATTRIBUTE *fa2) { return fa1->attr == fa2->attr && fa1->fiber == fa2->fiber; @@ -58,6 +61,7 @@ enum instance_direction instance_direction(INSTANCE *i) { return instance_local; } else { fatal_error("%d: unknown attributed node",tnode_line_number(i->node)); + return dir; /* keep CC happy */ } } @@ -79,6 +83,8 @@ DEPENDENCY dependency_trans(DEPENDENCY k1, DEPENDENCY k2) (k2 & DEPENDENCY_NOT_JUST_FIBER)) | ((k1 & DEPENDENCY_MAYBE_CARRYING)& (k2 & DEPENDENCY_MAYBE_CARRYING)) | + ((k1 & DEPENDENCY_MAYBE_SIMPLE)& + (k2 & DEPENDENCY_MAYBE_SIMPLE)) | SOME_DEPENDENCY); } @@ -164,11 +170,65 @@ void remove_from_worklist(EDGESET node, AUG_GRAPH *aug_graph) { } static EDGESET edgeset_freelist = NULL; + +static VECTOR(EDGESET) private_check_vector; +static int private_check_vector_used = 0; +static Boolean check_all_edgesets__add(EDGESET e) { + int n = private_check_vector_used; + int i; + if (private_check_vector.array == 0) { + VECTORALLOC(private_check_vector,EDGESET,16); + } + for (i=0; i < n; ++i) { + if (private_check_vector.array[i] == e) return false; + } + if (n >= private_check_vector.length) { + EDGESET *oldarray = private_check_vector.array; + VECTORALLOC(private_check_vector,EDGESET,n*2); + for (i=0; i < n; ++i) { + private_check_vector.array[i] = oldarray[i]; + } + } + private_check_vector.array[n] = e; + ++private_check_vector_used; + return true; +} + +void check_all_edgesets(AUG_GRAPH *aug_graph) { + int n = aug_graph->instances.length; + int m = n*n; + int i,j; + EDGESET f; + + private_check_vector_used = 0; + + for (f = edgeset_freelist; f != NULL; f = f->rest) { + if (!check_all_edgesets__add(f)) { + fatal_error("Found duplicate edge set in free list\n"); + } + } + for (i=0; i < n; ++i) { + for (j=0; j < n; ++j) { + EDGESET es = aug_graph->graph[i*n+j]; + while (es != NULL) { + if (!check_all_edgesets__add(es)) { + fatal_error("Found duplicate edge set %d -> %d\n",i,j); + } + es = es->rest; + } + } + } +} + + EDGESET new_edgeset(INSTANCE *source, INSTANCE *sink, CONDITION *cond, DEPENDENCY kind) { EDGESET new_edge; + if (analysis_debug & EDGESET_ASSERTIONS) { + // can't do anything right now + } if (edgeset_freelist == NULL) { new_edge = (EDGESET)HALLOC(sizeof(struct edgeset)); } else { @@ -197,6 +257,9 @@ void free_edge(EDGESET old, AUG_GRAPH *aug_graph) { old->kind = no_dependency; remove_from_worklist(old,aug_graph); edgeset_freelist = old; + if (analysis_debug & EDGESET_ASSERTIONS) { + check_all_edgesets(aug_graph); + } } void free_edgeset(EDGESET es, AUG_GRAPH *aug_graph) { @@ -255,6 +318,16 @@ EDGESET add_edge(INSTANCE *source, DEPENDENCY kind, EDGESET current, AUG_GRAPH *aug_graph) { + if (!kind) { + print_instance(source,stdout); + fputs("->",stdout); + print_instance(sink,stdout); + fputs(":",stdout); + print_edge_helper(kind,cond,stdout); + puts("\n"); + fatal_error("Trying to add dependency with kind=0 %d->%d", source->index, sink->index); + } + if (current == NULL) { EDGESET new_edge=new_edgeset(source,sink,cond,kind); add_to_worklist(new_edge,aug_graph); @@ -303,9 +376,11 @@ void add_edge_to_graph(INSTANCE *source, DEPENDENCY kind, AUG_GRAPH *aug_graph) { int index = source->index*(aug_graph->instances.length)+sink->index; + EDGESET current = aug_graph->graph[index]; + aug_graph->graph[index] = NULL; aug_graph->graph[index] = - add_edge(source,sink,cond,kind,aug_graph->graph[index],aug_graph); + add_edge(source,sink,cond,kind,current,aug_graph); } void add_transitive_edge_to_graph(INSTANCE *source, @@ -452,6 +527,7 @@ static Expression if_rule_test(void* if_rule) { return Match_info((Match)if_rule)->match_test; default: fatal_error("%d: unknown if_rule",tnode_line_number(if_rule)); + return 0; /* keep CC happy */ } } @@ -463,6 +539,7 @@ static CONDITION* if_rule_cond(void *if_rule) { return &Match_info((Match)if_rule)->match_cond; default: fatal_error("%d: unknown if_rule",tnode_line_number(if_rule)); + return 0; /* keep CC happy */ } } @@ -532,6 +609,25 @@ static void *init_decl_cond(void *vcond, void *node) { traverse_Block(init_decl_cond,&new_cond,case_stmt_default(decl)); } return NULL; + case KEYfor_stmt: /* almost same as case, but no negative conditions */ + { + Matches ms = for_stmt_matchers(decl); + Expression testvar = for_stmt_expr(decl); + CONDITION new_cond = *cond; + Match m; + + Expression_info(testvar)->next_expr = 0; + traverse_Matches(get_match_tests,&testvar,ms); + for (m = first_Match(ms); m; m=MATCH_NEXT(m)) { + int index = Match_info(m)->if_index; + Match_info(m)->match_cond = new_cond; + new_cond.positive |= (1 << index); /* first set it */ + traverse_Block(init_decl_cond,&new_cond,matcher_body(m)); + new_cond.positive &= ~(1 << index); /* now clear it */ + /* do not do a negative test */ + } + } + return NULL; default: break; } default: @@ -584,14 +680,6 @@ static int assign_instances(INSTANCE *array, int index, return index; } -static Type infer_some_value_decl_type(Declaration d) { - if (Declaration_KEY(d) == KEYnormal_formal) { - return infer_formal_type(d); - } else { - return some_value_decl_type(d); - } -} - /** Count and then assign instances. * Called in two cases:
    *
  • one to set instance indices and count instances @@ -735,7 +823,6 @@ static void *get_instances(void *vaug_graph, void *node) { STATE *s = aug_graph->global_state; int i; Declaration_info(decl)->instance_index = index; - Declaration_info(decl)->decl_flags |= DECL_RHS_FLAG; for (i=0; i < s->phyla.length; ++i) { if (s->phyla.array[i] == pdecl) { Declaration_info(decl)->node_phy_graph = &s->phy_graphs[i]; @@ -789,6 +876,7 @@ static void *get_instances(void *vaug_graph, void *node) { nil_Expressions()); Expression_info(e)->funcall_proxy = proxy; Declaration_info(proxy)->instance_index = index; + Declaration_info(proxy)->proxy_fdecl = fdecl; Declaration_info(proxy)->node_phy_graph = summary_graph_for(s,fdecl); Declaration_info(proxy)->decl_flags |= DECL_RHS_FLAG; @@ -956,6 +1044,19 @@ static BOOL decl_is_collection(Declaration d) { } } +BOOL decl_is_circular(Declaration d) +{ + if (!d) return FALSE; + switch (Declaration_KEY(d)) { + case KEYvalue_decl: + return direction_is_circular(value_decl_direction(d)); + case KEYattribute_decl: + return direction_is_circular(attribute_decl_direction(d)); + default: + return FALSE; + } +} + // return true if we are sure this vertex represents an input dependency static BOOL vertex_is_input(VERTEX* v) { @@ -1048,8 +1149,10 @@ Declaration attr_ref_node_decl(Expression e) { Expression node = attr_ref_object(e); switch (Expression_KEY(node)) { - default: fatal_error("%d: can't handle this attribute instance", - tnode_line_number(node)); + default: + fatal_error("%d: can't handle this attribute instance", + tnode_line_number(node)); + return NULL; case KEYvalue_use: return USE_DECL(value_use_use(node)); } @@ -1094,6 +1197,8 @@ static void record_expression_dependencies(VERTEX *sink, CONDITION *cond, case KEYvalue_use: { Declaration decl=Use_info(value_use_use(e))->use_decl; Declaration rdecl; + int new_kind = kind; + if (!decl_is_circular(decl)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; if (decl == NULL) fatal_error("%d: unbound expression",tnode_line_number(e)); if (DECL_IS_LOCAL(decl) && @@ -1107,13 +1212,13 @@ static void record_expression_dependencies(VERTEX *sink, CONDITION *cond, phylum_shared_info_attribute(phy,aug_graph->global_state); source.modifier = NO_MODIFIER; if (vertex_is_output(&source)) aps_warning(e,"Dependence on output value"); - add_edges_to_graph(&source,sink,cond,kind&~DEPENDENCY_MAYBE_CARRYING, + add_edges_to_graph(&source,sink,cond,new_kind&~DEPENDENCY_MAYBE_CARRYING, aug_graph); new_mod.field = decl; new_mod.next = mod; source.modifier = &new_mod; - add_edges_to_graph(&source,sink,cond,kind,aug_graph); + add_edges_to_graph(&source,sink,cond,new_kind,aug_graph); } else if (Declaration_info(decl)->decl_flags & (ATTR_DECL_INH_FLAG|ATTR_DECL_SYN_FLAG)) { /* a use of parameter or result (we hope) */ @@ -1122,43 +1227,47 @@ static void record_expression_dependencies(VERTEX *sink, CONDITION *cond, source.attr = decl; source.modifier = mod; if (vertex_is_output(&source)) aps_warning(e,"Dependence on output value"); - add_edges_to_graph(&source,sink,cond,kind,aug_graph); + add_edges_to_graph(&source,sink,cond,new_kind,aug_graph); } else { source.node = NULL; source.attr = decl; source.modifier = mod; if (vertex_is_output(&source)) aps_warning(e,"Dependence on output value"); - add_edges_to_graph(&source,sink,cond,kind,aug_graph); + add_edges_to_graph(&source,sink,cond,new_kind,aug_graph); } } break; case KEYfuncall: { Declaration decl; + int new_kind = kind; if ((decl = attr_ref_p(e)) != NULL) { + if (!decl_is_circular(decl)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; else new_kind = kind; source.node = attr_ref_node_decl(e); source.attr = decl; source.modifier = mod; if (vertex_is_output(&source)) aps_warning(e,"Dependence on output value"); - add_edges_to_graph(&source,sink,cond,kind,aug_graph); + add_edges_to_graph(&source,sink,cond,new_kind,aug_graph); } else if ((decl = field_ref_p(e)) != NULL) { + if (!decl_is_circular(decl)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; else new_kind = kind; Expression object = field_ref_object(e); new_mod.field = decl; new_mod.next = mod; // first the dependency on the pointer itself (NOT carrying) record_expression_dependencies(sink,cond, - kind&~DEPENDENCY_MAYBE_CARRYING, + new_kind&~DEPENDENCY_MAYBE_CARRYING, NO_MODIFIER,object,aug_graph); // then the dependency on the field (possibly carrying) - record_expression_dependencies(sink,cond,kind,&new_mod,object, + record_expression_dependencies(sink,cond,new_kind,&new_mod,object, aug_graph); } else if ((decl = local_call_p(e)) != NULL) { + if (!decl_is_circular(decl)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; else new_kind = kind; Declaration result = some_function_decl_result(decl); Expression actual = first_Actual(funcall_actuals(e)); /* first depend on the arguments (not carrying, no fibers) */ if (mod == NO_MODIFIER) { for (;actual!=NULL; actual=Expression_info(actual)->next_actual) { record_expression_dependencies(sink,cond, - kind&~DEPENDENCY_MAYBE_CARRYING, + new_kind&~DEPENDENCY_MAYBE_CARRYING, NO_MODIFIER,actual,aug_graph); } } @@ -1298,9 +1407,11 @@ static void record_lhs_dependencies(Expression lhs, CONDITION *cond, break; case KEYfuncall: { + int new_kind = kind; Declaration field, attr, fdecl, decl; VERTEX sink; if ((field = field_ref_p(lhs)) != NULL) { + if (!decl_is_circular(field)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; else new_kind = kind; /* Assignment of a field, or a field of a field */ Expression object = field_ref_object(lhs); MODIFIER new_mod; @@ -1314,6 +1425,7 @@ static void record_lhs_dependencies(Expression lhs, CONDITION *cond, record_lhs_dependencies(object,cond,control_dependency, &new_mod,object,aug_graph); } else if ((attr = attr_ref_p(lhs)) != NULL) { + if (!decl_is_circular(attr)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; else new_kind = kind; sink.node = attr_ref_node_decl(lhs); sink.attr = attr; sink.modifier = mod; @@ -1321,7 +1433,8 @@ static void record_lhs_dependencies(Expression lhs, CONDITION *cond, if (vertex_is_input(&sink)) aps_error(lhs,"Assignment of input value"); record_expression_dependencies(&sink,cond,kind,NULL,rhs,aug_graph); record_condition_dependencies(&sink,cond,aug_graph); - } else if ((fdecl = local_call_p(lhs)) != NULL) { + } else if ((fdecl = local_call_p(lhs)) != NULL) { + if (!decl_is_circular(fdecl)) new_kind |= DEPENDENCY_MAYBE_SIMPLE; else new_kind = kind; Declaration result = some_function_decl_result(decl); Declaration proxy = Expression_info(lhs)->funcall_proxy; if (mod == NO_MODIFIER) { @@ -1332,7 +1445,7 @@ static void record_lhs_dependencies(Expression lhs, CONDITION *cond, sink.modifier = mod; set_value_for(&sink,rhs,aug_graph); if (vertex_is_input(&sink)) aps_error(lhs,"Assignment of input value"); - record_expression_dependencies(&sink,cond,kind,NULL,rhs,aug_graph); + record_expression_dependencies(&sink,cond,new_kind,NULL,rhs,aug_graph); record_condition_dependencies(&sink,cond,aug_graph); } else { Expression actual = first_Actual(funcall_actuals(lhs)); @@ -1390,7 +1503,7 @@ static void *get_edges(void *vaug_graph, void *node) { case KEYformal: { Declaration case_stmt = formal_in_case_p(decl); if (case_stmt != NULL) { - Expression expr = case_stmt_expr(case_stmt); + Expression expr = some_case_stmt_expr(case_stmt); VERTEX f; f.node = 0; f.attr = decl; @@ -1449,7 +1562,13 @@ static void *get_edges(void *vaug_graph, void *node) { dot_mod.next = nodot_mod.next = NO_MODIFIER; sink.modifier = &nodot_mod; source.modifier = &dot_mod; - add_edges_to_graph(&source,&sink,cond,fiber_dependency, + DEPENDENCY new_kind = fiber_dependency; + if (decl_is_circular(fdecl)) { + new_kind &= ~DEPENDENCY_MAYBE_SIMPLE; + } else { + new_kind |= DEPENDENCY_MAYBE_SIMPLE; + } + add_edges_to_graph(&source,&sink,cond,new_kind, aug_graph); } } @@ -1509,13 +1628,13 @@ static void *get_edges(void *vaug_graph, void *node) { NO_MODIFIER, test, aug_graph); } break; - case KEYcase_stmt: + case KEYsome_case_stmt: { Match m; VERTEX sink; sink.node = 0; sink.modifier = NO_MODIFIER; - for (m=first_Match(case_stmt_matchers(decl)); m; m=MATCH_NEXT(m)) { + for (m=first_Match(some_case_stmt_matchers(decl)); m; m=MATCH_NEXT(m)) { Expression test = Match_info(m)->match_test; sink.attr = (Declaration)m; record_condition_dependencies(&sink,cond,aug_graph); @@ -1527,6 +1646,18 @@ static void *get_edges(void *vaug_graph, void *node) { } } break; + case KEYfor_in_stmt: + { Declaration formal = for_in_stmt_formal(decl); + Expression expr = for_in_stmt_seq(decl); + VERTEX f; + f.node = 0; + f.attr = formal; + f.modifier = NO_MODIFIER; + record_condition_dependencies(&f,cond,aug_graph); + record_expression_dependencies(&f,cond,dependency,NO_MODIFIER, + expr,aug_graph); + } + break; default: printf("%d: don't handle this kind yet\n",tnode_line_number(decl)); break; @@ -1635,6 +1766,16 @@ static void *mark_local(void *ignore, void *node) { return node; } +static void *mark_object_flag(void *ignore, void *node) { + if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { + Declaration decl = (Declaration)node; + if (Declaration_KEY(decl) == KEYvalue_decl && type_is_phylum(value_decl_type(decl))) { + Declaration_info(decl)->decl_flags |= DECL_OBJECT_FLAG; + } + } + return node; +} + static void init_node_phy_graph2(Declaration node, Type ty, STATE *state) { switch (Type_KEY(ty)) { default: @@ -1699,7 +1840,6 @@ static void init_augmented_dependency_graph(AUG_GRAPH *aug_graph, fatal_error("%d: Cannot find start phylum summary graph", tnode_line_number(tlm)); Declaration_info(tlm)->node_phy_graph = &state->phy_graphs[i]; - Declaration_info(tlm)->decl_flags |= DECL_RHS_FLAG; } body = module_decl_contents(tlm); break; @@ -1717,16 +1857,9 @@ static void init_augmented_dependency_graph(AUG_GRAPH *aug_graph, Declaration_info(tlm)->node_phy_graph = &state->phy_graphs[i]; } body = some_function_decl_body(tlm); - Declaration_info(tlm)->decl_flags |= DECL_LHS_FLAG; { Type ftype=some_function_decl_type(tlm); Declaration formal=first_Declaration(function_type_formals(ftype)); Declaration result=first_Declaration(function_type_return_values(ftype)); - for (; formal != NULL; formal=Declaration_info(formal)->next_decl) { - Declaration_info(formal)->decl_flags |= ATTR_DECL_INH_FLAG; - } - for (; result != NULL; result=Declaration_info(result)->next_decl) { - Declaration_info(result)->decl_flags |= ATTR_DECL_SYN_FLAG; - } } break; case KEYtop_level_match: @@ -1741,7 +1874,6 @@ static void init_augmented_dependency_graph(AUG_GRAPH *aug_graph, tnode_line_number(tlm)); case KEYpattern_var: aug_graph->lhs_decl = pattern_var_formal(and_pattern_p1(pat)); - Declaration_info(aug_graph->lhs_decl)->decl_flags |= DECL_LHS_FLAG; init_node_phy_graph(aug_graph->lhs_decl,state); break; } @@ -1772,7 +1904,6 @@ static void init_augmented_dependency_graph(AUG_GRAPH *aug_graph, break; case KEYpattern_var: { Declaration next_rhs = pattern_var_formal(next_pat); - Declaration_info(next_rhs)->decl_flags |= DECL_RHS_FLAG; init_node_phy_graph(next_rhs,state); if (last_rhs == NULL) { aug_graph->first_rhs_decl = next_rhs; @@ -1880,6 +2011,146 @@ static void init_augmented_dependency_graph(AUG_GRAPH *aug_graph, aug_graph->schedule = (int *)HALLOC(aug_graph->instances.length*sizeof(int)); } +static void* set_rhs_decl_flag(void* vstate, void* node) { + if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { + Declaration decl = (Declaration)node; + switch (Declaration_KEY(decl)) { + case KEYassign: { + Declaration pdecl = proc_call_p(assign_rhs(decl)); + if (pdecl != NULL) { + Declaration_info(decl)->decl_flags |= DECL_RHS_FLAG; + } + break; + } + default: + break; + } + } + return vstate; +} + +static void set_decl_flags_aug_graph(Declaration tlm, STATE* state) { + // Traverse the tlm and set RHS flags of assignments + traverse_Declaration(set_rhs_decl_flag, state, tlm); + + switch (Declaration_KEY(tlm)) { + case KEYmodule_decl: + { + Declaration_info(tlm)->decl_flags |= DECL_RHS_FLAG; + break; + } + case KEYsome_function_decl: + { + Declaration_info(tlm)->decl_flags |= DECL_LHS_FLAG; + + Type ftype = some_function_decl_type(tlm); + Declaration formal = first_Declaration(function_type_formals(ftype)); + Declaration result = + first_Declaration(function_type_return_values(ftype)); + for (; formal != NULL; formal = Declaration_info(formal)->next_decl) { + Declaration_info(formal)->decl_flags |= ATTR_DECL_INH_FLAG; + } + for (; result != NULL; result = Declaration_info(result)->next_decl) { + Declaration_info(result)->decl_flags |= ATTR_DECL_SYN_FLAG; + } + break; + } + case KEYtop_level_match: + { + Pattern pat = matcher_pat(top_level_match_m(tlm)); + switch (Pattern_KEY(pat)) { + case KEYand_pattern: + switch (Pattern_KEY(and_pattern_p1(pat))) { + case KEYpattern_var: + { + Declaration lhs_decl = pattern_var_formal(and_pattern_p1(pat)); + Declaration_info(lhs_decl)->decl_flags |= DECL_LHS_FLAG; + break; + } + default: + break; + } + pat = and_pattern_p2(pat); + break; + default: + break; + } + switch (Pattern_KEY(pat)) { + case KEYpattern_call: + { + Pattern next_pat; + for (next_pat = first_PatternActual(pattern_call_actuals(pat)); + next_pat != NULL; + next_pat = Pattern_info(next_pat)->next_pattern_actual) { + switch (Pattern_KEY(next_pat)) { + case KEYpattern_var: { + Declaration next_rhs = pattern_var_formal(next_pat); + Declaration_info(next_rhs)->decl_flags |= DECL_RHS_FLAG; + if (type_is_phylum(base_type(Pattern_info(next_pat)->pat_type))) { + Declaration_info(next_rhs)->decl_flags |= DECL_RHS_SYNTAX_FLAG; + } + break; + } + default: + break; + } + } + break; + } + default: + break; + } + break; + } + default: + break; + } +} + +static void set_decl_flags_module(STATE* s, Declaration module) { + int i; + for (i = 0; i < s->match_rules.length; ++i) { + set_decl_flags_aug_graph(s->match_rules.array[i], s); + } + set_decl_flags_aug_graph(module, s); + + Declarations decls = block_body(module_decl_contents(module)); + /* initialize attribute kind of decls */ + Declaration decl = first_Declaration(decls); + for (; decl != NULL; decl = Declaration_info(decl)->next_decl) { + switch (Declaration_KEY(decl)) { + case KEYattribute_decl: + /** DOn't do this error -- FIELD is not yet set. + if (!ATTR_DECL_IS_SYN(decl) && !ATTR_DECL_IS_INH(decl) && + !FIELD_DECL_P(decl)) { + aps_warning(decl, + "%s not declared either synthesized or inherited. " + "Marking it as synthesized for now.", + decl_name(decl)); + Declaration_info(decl)->decl_flags |= ATTR_DECL_SYN_FLAG; + } + */ + break; + case KEYsome_function_decl: + { + Type ftype = some_function_decl_type(decl); + Declaration d; + for (d = first_Declaration(function_type_formals(ftype)); d != NULL; + d = DECL_NEXT(d)) { + Declaration_info(d)->decl_flags |= ATTR_DECL_INH_FLAG; + } + for (d = first_Declaration(function_type_return_values(ftype)); + d != NULL; d = DECL_NEXT(d)) { + Declaration_info(d)->decl_flags |= ATTR_DECL_SYN_FLAG; + } + break; + } + default: + break; + } + } +} + static void init_summary_dependency_graph(PHY_GRAPH *phy_graph, Declaration phylum, STATE *state) @@ -1935,6 +2206,9 @@ static void init_analysis_state(STATE *s, Declaration module) { /* mark all local declarations such */ traverse_Declaration(mark_local,module,module); + /* mark obj decls */ + traverse_Declaration(mark_object_flag,module,module); + /* get phyla (imported only) */ { Declaration tf=first_Declaration(type_formals); Declarations edecls = NULL; @@ -1976,13 +2250,14 @@ static void init_analysis_state(STATE *s, Declaration module) { } { int phyla_count = 0; if (edecls == NULL) { - aps_error(module,"no extension to module %s", + fatal_error("no extension to module %s", symbol_name(def_name(declaration_def(module)))); } else { Declaration edecl = first_Declaration(edecls); /*DEBUG fprintf(stderr,"got an extension!\n"); */ for (; edecl != NULL; edecl = Declaration_info(edecl)->next_decl) { switch (Declaration_KEY(edecl)) { + default: break; case KEYphylum_decl: if (def_is_public(phylum_decl_def(edecl))) ++phyla_count; if (DECL_IS_START_PHYLUM(edecl)) s->start_phylum = edecl; @@ -1998,6 +2273,7 @@ static void init_analysis_state(STATE *s, Declaration module) { { Declaration decl = first_Declaration(decls); for (; decl != NULL; decl = DECL_NEXT(decl)) { switch (Declaration_KEY(decl)) { + default: break; case KEYsome_function_decl: ++phyla_count; break; @@ -2011,6 +2287,7 @@ static void init_analysis_state(STATE *s, Declaration module) { for (edecl = first_Declaration(edecls); edecl != NULL; edecl = Declaration_info(edecl)->next_decl) { switch (Declaration_KEY(edecl)) { + default: break; case KEYphylum_decl: if (def_is_public(phylum_decl_def(edecl))) { s->phyla.array[phyla_count++] = edecl; @@ -2022,6 +2299,7 @@ static void init_analysis_state(STATE *s, Declaration module) { { Declaration decl = first_Declaration(decls); for (; decl != NULL; decl = DECL_NEXT(decl)) { switch (Declaration_KEY(decl)) { + default: break; case KEYsome_function_decl: s->phyla.array[phyla_count++] = decl; break; @@ -2037,6 +2315,7 @@ static void init_analysis_state(STATE *s, Declaration module) { if (decl == NULL) aps_error(module,"empty module"); for (; decl != NULL; decl = Declaration_info(decl)->next_decl) { switch (Declaration_KEY(decl)) { + default: break; case KEYsome_function_decl: case KEYtop_level_match: ++match_rule_count; break; /*DEBUG @@ -2055,6 +2334,7 @@ static void init_analysis_state(STATE *s, Declaration module) { decl != NULL; decl = Declaration_info(decl)->next_decl) { switch (Declaration_KEY(decl)) { + default: break; case KEYsome_function_decl: case KEYtop_level_match: s->match_rules.array[match_rule_count++] = decl; @@ -2062,7 +2342,10 @@ static void init_analysis_state(STATE *s, Declaration module) { } } } - + + // Ensure declaration flags are all set before fibering begins + set_decl_flags_module(s, module); + /* perform fibering */ fiber_module(s->module,s); add_fibers_to_state(s); @@ -2071,6 +2354,7 @@ static void init_analysis_state(STATE *s, Declaration module) { { Declaration decl = first_Declaration(decls); for (; decl != NULL; decl = Declaration_info(decl)->next_decl) { switch (Declaration_KEY(decl)) { + default: break; case KEYattribute_decl: if (!ATTR_DECL_IS_SYN(decl) && !ATTR_DECL_IS_INH(decl) && !FIELD_DECL_P(decl)) { @@ -2082,6 +2366,7 @@ static void init_analysis_state(STATE *s, Declaration module) { Declaration formal = first_Declaration(function_type_formals(ftype)); Type ntype = formal_type(formal); switch (Type_KEY(ntype)) { + default: break; case KEYtype_use: { Declaration phylum=Use_info(type_use_use(ntype))->use_decl; if (phylum == NULL) @@ -2157,6 +2442,22 @@ static void synchronize_dependency_graphs(AUG_GRAPH *aug_graph, phy_n = phy_graph->instances.length; + int index_of_lhs = -1; + for (i = 0; i < n; i++) { + if (aug_graph->instances.array[i].node == aug_graph->lhs_decl) { + index_of_lhs = i; + break; + } + } + + if (index_of_lhs == -1) { + fatal_error("LHS %d not found in instances of %s", decl_name(aug_graph->lhs_decl), aug_graph_name(aug_graph)); + } else { + if (analysis_debug & SUMMARY_EDGE) { + printf("LHS found at index %d for %s\n", index_of_lhs, aug_graph_name(aug_graph)); + } + } + /* discover when the instances for this node end. */ max = start + phy_n; @@ -2226,7 +2527,6 @@ static void augment_dependency_graph_for_node(AUG_GRAPH *aug_graph, Declaration node) { int start=Declaration_info(node)->instance_index; PHY_GRAPH *phy_graph = Declaration_info(node)->node_phy_graph; - synchronize_dependency_graphs(aug_graph,start,phy_graph); } @@ -2255,6 +2555,7 @@ void *augment_dependency_graph_func_calls(void *paug_graph, void *node) { case KEYDeclaration: { Declaration decl = (Declaration)node; switch (Declaration_KEY(decl)) { + default: break; case KEYsome_function_decl: case KEYtop_level_match: /* don't look inside (unless its what we're doing the analysis for) */ @@ -2301,7 +2602,72 @@ void augment_dependency_graph(AUG_GRAPH *aug_graph) { aug_graph,aug_graph->match_rule); } +static int edgeset_length(EDGESET es) { + int n = 0; + while (es != 0) { + ++n; + es = es->rest; + } + return n; +} + +/** + * Loop over all the elements in the set and add transitive edges to the graph. + * This cannot be done as a regular for-loop because adding edges can result + * in changes to the graph, including the edge set we are looping over. + * The edges a and b are the ones that will be sent to transitive addition + * routine: one of them should be null: it will be replaced with the + * each edge in the set. + */ +void add_transitive_edges_to_graph(AUG_GRAPH *aug_graph, EDGESET set, EDGESET a, EDGESET b) +{ + int i, n; + EDGESET e; + BOOL seta = a == NULL; + CONDITION *cv; + DEPENDENCY *dv; + struct edgeset tmp; + + if (set == NULL) return; + if (set->rest == NULL) { /* special case the one element set */ + if (seta) a = set; else b = set; + add_transitive_edge_to_graph(a->source,b->sink, + &a->cond,&b->cond, + a->kind,b->kind, + aug_graph); + return; + } + + n = edgeset_length(set); + cv = (CONDITION *)SALLOC(n * sizeof(CONDITION)); + dv = (DEPENDENCY *)SALLOC(n * sizeof(DEPENDENCY)); + for (e = set, i = 0; e != NULL && i < n; e = e->rest, ++i) { + cv[i] = e->cond; + dv[i] = e->kind; + } + + /* This code dpdends on all teh elements of an edge set having the same + * sources and sinks. + */ + tmp.rest = NULL; + tmp.source = set->source; + tmp.sink = set->sink; + tmp.next_in_edge_worklist = NULL; + + for (i = 0; i < n; ++i) { + tmp.cond = cv[i]; + tmp.kind = dv[i]; + if (seta) a = &tmp; else b = &tmp; + add_transitive_edge_to_graph(a->source,b->sink, + &a->cond,&b->cond, + a->kind,b->kind, + aug_graph); + } + release(cv); +} + void close_using_edge(AUG_GRAPH *aug_graph, EDGESET edge) { + struct edgeset copy = *edge; int i,j; int source_index = edge->source->index; int sink_index = edge->sink->index; @@ -2313,28 +2679,14 @@ void close_using_edge(AUG_GRAPH *aug_graph, EDGESET edge) { } for (i=0; i < n; ++i) { - EDGESET e; /* first: instance[i]->source */ - for (e = aug_graph->graph[i*n+source_index]; - e != NULL; - e = e->rest) { - add_transitive_edge_to_graph(e->source,edge->sink, - &e->cond,&edge->cond, - e->kind,edge->kind, - aug_graph); - } + add_transitive_edges_to_graph(aug_graph, aug_graph->graph[i*n+source_index], + NULL, ©); /* then sink->instance[i] */ - for (e = aug_graph->graph[sink_index*n+i]; - e != NULL; - e = e->rest) { - add_transitive_edge_to_graph(edge->source,e->sink, - &edge->cond,&e->cond, - edge->kind,e->kind, - aug_graph); - } + add_transitive_edges_to_graph(aug_graph, aug_graph->graph[sink_index*n+i], + ©, NULL); } } - /* A very slow check, hence optional. * O(n^3*2^c) where 'n' is the number of instances. * (for reasonable non-toy examples, n can be > 100). @@ -2452,6 +2804,14 @@ DEPENDENCY analysis_state_cycle(STATE *s) { kind = dependency_join(kind,k1); } } + { + AUG_GRAPH *aug_graph = &s->global_dependencies; + int n = aug_graph->instances.length; + for (j=0; j < n; ++j) { + DEPENDENCY k1 = edgeset_kind(aug_graph->graph[j*n+j]); + kind = dependency_join(kind,k1); + } + } return kind; } @@ -2483,9 +2843,10 @@ void dnc_close(STATE*s) { } } -STATE *compute_dnc(Declaration module) { +STATE *compute_dnc(Declaration module, bool anc_analysis) { STATE *s=(STATE *)HALLOC(sizeof(STATE)); Declaration_info(module)->analysis_state = s; + s->anc_analysis = anc_analysis; init_analysis_state(s,module); dnc_close(s); if (analysis_debug & (DNC_ITERATE|DNC_FINAL)) { @@ -2527,6 +2888,7 @@ void print_dep_vertex(VERTEX *v, FILE *stream) void print_instance(INSTANCE *i, FILE *stream) { if (stream == 0) stream = stdout; + fprintf(stream,"[%d]", i->index); if (i->node != NULL) { if (ABSTRACT_APS_tnode_phylum(i->node) != KEYDeclaration) { fprintf(stream,"%d:?<%d>",tnode_line_number(i->node), @@ -2570,21 +2932,35 @@ void print_instance(INSTANCE *i, FILE *stream) { void print_edge_helper(DEPENDENCY kind, CONDITION *cond, FILE* stream) { if (stream == 0) stream = stdout; - switch (kind) { - default: fprintf(stream,"?%d",kind); break; - case no_dependency: fputc('!',stream); break; - case indirect_control_fiber_dependency: - case indirect_fiber_dependency: fputc('?',stream); /* fall through */ - case control_fiber_dependency: - case fiber_dependency: fputc('(',stream); break; - case indirect_control_dependency: - case indirect_dependency: fputc('?',stream); /* fall through */ - case control_dependency: - case dependency: break; + + fprintf(stream,"direct"); + + if (!(kind & SOME_DEPENDENCY)) + { + fputc('!',stream); // no-dependency + } + + if (!(kind & DEPENDENCY_NOT_JUST_FIBER)) + { + fputc('(',stream); // fiber dependency } + if (!(kind & DEPENDENCY_MAYBE_DIRECT)) + { + fputc('?',stream); // indirect dependency + } + if (kind & DEPENDENCY_MAYBE_SIMPLE) + { + fputc('s', stream); // simple dependency + } + else + { + fputc('o', stream); // circular dependency + } + if (cond != NULL) print_condition(cond,stream); - if ((kind & DEPENDENCY_NOT_JUST_FIBER) == 0) { - fputc(')',stream); + if ((kind & DEPENDENCY_NOT_JUST_FIBER) == 0) + { + fputc(')',stream); // fiber dependency } } @@ -2628,6 +3004,7 @@ const char *aug_graph_name(AUG_GRAPH *aug_graph) { case KEYtop_level_match: { Pattern pat=matcher_pat(top_level_match_m(aug_graph->match_rule)); switch (Pattern_KEY(pat)) { + default: break; case KEYand_pattern: pat = and_pattern_p2(pat); } @@ -2667,6 +3044,7 @@ void print_aug_graph(AUG_GRAPH *aug_graph, FILE *stream) { case KEYtop_level_match: { Pattern pat=matcher_pat(top_level_match_m(aug_graph->match_rule)); switch (Pattern_KEY(pat)) { + default: break; case KEYand_pattern: pat = and_pattern_p2(pat); } @@ -2739,11 +3117,11 @@ void print_phy_graph(PHY_GRAPH *phy_graph, FILE *stream) { fputs(" -> ",stream); for (j=0; j < n; ++j) { DEPENDENCY kind= phy_graph->mingraph[i*n+j]; + if (kind != no_dependency) fputs("\n\t", stream); if (kind == no_dependency) continue; if (kind == fiber_dependency) fputc('(',stream); print_instance(&phy_graph->instances.array[j],stream); if (kind == fiber_dependency) fputc(')',stream); - fputc(' ',stream); } fputc('\n',stream); } @@ -2765,6 +3143,7 @@ void print_analysis_state(STATE *s, FILE *stream) { void print_cycles(STATE *s, FILE *stream) { int i,j; + DEPENDENCY d; if (stream == 0) stream = stdout; /** test for cycles **/ for (i=0; i < s->phyla.length; ++i) { @@ -2772,15 +3151,10 @@ void print_cycles(STATE *s, FILE *stream) { PHY_GRAPH *phy_graph = &s->phy_graphs[i]; int n = phy_graph->instances.length; for (j=0; j < n; ++j) { - switch (phy_graph->mingraph[j*n+j]) { - case no_dependency: break; - case indirect_control_fiber_dependency: - case control_fiber_dependency: - case indirect_fiber_dependency: - case fiber_dependency: - fprintf(stream,"fiber "); - /* fall through */ - default: + d = phy_graph->mingraph[j*n+j]; + if (d) + { + if (!(d & DEPENDENCY_NOT_JUST_FIBER)) fprintf(stream,"fiber "); fprintf(stream,"summary cycle involving %s.", symbol_name(def_name(declaration_def(phy_graph->phylum)))); print_instance(&phy_graph->instances.array[j],stdout); @@ -2793,16 +3167,11 @@ void print_cycles(STATE *s, FILE *stream) { AUG_GRAPH *aug_graph = &s->aug_graphs[i]; int n = aug_graph->instances.length; for (j=0; j < n; ++j) { - switch (edgeset_kind(aug_graph->graph[j*n+j])) { - case no_dependency: break; - case indirect_control_fiber_dependency: - case control_fiber_dependency: - case indirect_fiber_dependency: - case fiber_dependency: - fprintf(stream,"fiber "); - /* fall through */ - default: - fprintf(stream,"local cycle for %s involving ", + d = edgeset_kind(aug_graph->graph[j*n+j]); + if (d) + { + if (!(d & DEPENDENCY_NOT_JUST_FIBER)) fprintf(stream,"fiber "); + fprintf(stream,"local cycle (%d) for %s involving ", (edgeset_kind(aug_graph->graph[j*n+j])), aug_graph_name(aug_graph)); print_instance(&aug_graph->instances.array[j],stdout); fprintf(stream,"\n"); @@ -2811,3 +3180,34 @@ void print_cycles(STATE *s, FILE *stream) { } } } + +/** + * Utility function that determines whether fibered attribute is circular or not. + * if a fiber_attr is circular, which is true if the fiber is empty and the attribute + * (local or node attribute) is declared circular, OR (if the fiber is non-empty) if + * the last step in the fiber is circular. + * Note that a.b.c. is repsented as short = a.b, field = c + * @param fiber_attr fibered attribute pointer + * @return boolean indicating the circularity + */ +BOOL fiber_attr_circular(FIBERED_ATTRIBUTE* fiber_attr) +{ + // If fiber is empty + if (fiber_attr->fiber == base_fiber || fiber_attr->fiber == NULL) + { + return decl_is_circular(fiber_attr->attr); + } + + FIBER fiber = fiber_attr->fiber; + return decl_is_circular(fiber->field); +} + +/** + * Utility function indicating whether INSTANCE (attribute instance) is circular or not + * @param in attribute instance + * @return boolean indicating the circularity + */ +BOOL instance_circular(INSTANCE* in) +{ + return fiber_attr_circular(&in->fibered_attr); +} diff --git a/analyze/aps-dnc.h b/analyze/aps-dnc.h index f59d3197..8552667b 100644 --- a/analyze/aps-dnc.h +++ b/analyze/aps-dnc.h @@ -1,4 +1,9 @@ +#ifndef APS_DNC_H +#define APS_DNC_H + #include "jbb-vector.h" +#include "scc.h" +#include typedef struct attrset { struct attrset *rest; @@ -18,26 +23,31 @@ typedef struct attribute_instance { int index; } INSTANCE; +extern BOOL instance_equal(INSTANCE*, INSTANCE*); + enum instance_direction {instance_local, instance_inward, instance_outward}; +enum instance_direction fibered_attr_direction(FIBERED_ATTRIBUTE *fa); enum instance_direction instance_direction(INSTANCE *); +extern BOOL fiber_attr_circular(FIBERED_ATTRIBUTE* fiber_attr); +extern BOOL instance_circular(INSTANCE* in); +extern BOOL decl_is_circular(Declaration d); + typedef unsigned DEPENDENCY; #define SOME_DEPENDENCY 1 #define DEPENDENCY_NOT_JUST_FIBER 2 #define DEPENDENCY_MAYBE_CARRYING 4 #define DEPENDENCY_MAYBE_DIRECT 8 +#define DEPENDENCY_MAYBE_SIMPLE 16 #define no_dependency 0 -#define fiber_dependency (SOME_DEPENDENCY|DEPENDENCY_MAYBE_CARRYING|DEPENDENCY_MAYBE_DIRECT) #define dependency (SOME_DEPENDENCY|DEPENDENCY_NOT_JUST_FIBER|DEPENDENCY_MAYBE_CARRYING|DEPENDENCY_MAYBE_DIRECT) -#define control_fiber_dependency (SOME_DEPENDENCY|DEPENDENCY_MAYBE_DIRECT) +#define max_dependency (SOME_DEPENDENCY|DEPENDENCY_NOT_JUST_FIBER|DEPENDENCY_MAYBE_CARRYING|DEPENDENCY_MAYBE_DIRECT|DEPENDENCY_MAYBE_SIMPLE) #define control_dependency (SOME_DEPENDENCY|DEPENDENCY_NOT_JUST_FIBER|DEPENDENCY_MAYBE_DIRECT) -#define indirect_fiber_dependency (SOME_DEPENDENCY|DEPENDENCY_MAYBE_CARRYING) -#define indirect_dependency (SOME_DEPENDENCY|DEPENDENCY_NOT_JUST_FIBER|DEPENDENCY_MAYBE_CARRYING) -#define indirect_control_fiber_dependency (SOME_DEPENDENCY) +#define fiber_dependency (SOME_DEPENDENCY|DEPENDENCY_MAYBE_CARRYING|DEPENDENCY_MAYBE_DIRECT) +#define control_fiber_dependency (SOME_DEPENDENCY|DEPENDENCY_MAYBE_DIRECT) #define indirect_control_dependency (SOME_DEPENDENCY|DEPENDENCY_NOT_JUST_FIBER) -#define max_dependency dependency #define AT_MOST(k1,k2) (((k1)&~(k2))==0) @@ -67,6 +77,8 @@ typedef struct augmented_dependency_graph { struct augmented_dependency_graph *next_in_aug_worklist; int *schedule; /* one-d array, indexed by instance number */ struct cto_node *total_order; + SCC_COMPONENTS* components; /* SCC components of instances in augmented dependency graph */ + bool* component_cycle; /* boolean indicating whether SCC component at index is circular */ } AUG_GRAPH; extern const char *aug_graph_name(AUG_GRAPH *); @@ -76,10 +88,15 @@ typedef struct summary_dependency_graph { VECTOR(INSTANCE) instances; DEPENDENCY *mingraph; /* two-d array, indexed by instance number */ struct summary_dependency_graph *next_in_phy_worklist; + SCC_COMPONENTS* components; /* SCC components of instances in phylum graph */ + bool* component_cycle; /* boolean indicating whether SCC component at index is circular */ int *summary_schedule; /* one-d array, indexed by instance number */ + bool* cyclic_flags; /* one-d array, indexed by phase number indicating whether phase is circular or not */ + int max_phase; /* integer denoting the maximum phase number for this phylum */ + bool* empty_phase; /* one-d array, indexed by phase number there is no attribute belonging to this phase */ } PHY_GRAPH; extern const char *phy_graph_name(PHY_GRAPH *); - + typedef VECTOR(struct cycle_description) CYCLES; typedef struct analysis_state { @@ -93,6 +110,11 @@ typedef struct analysis_state { AUG_GRAPH global_dependencies; VECTOR(FIBER) fibers; CYCLES cycles; + BOOL loop_required; + BOOL anc_analysis; + DEPENDENCY original_state_dependency; // This is value of analysis_state_cycle + // before removing fiber cycle or + // linearization of phases in summary graph } STATE; extern PHY_GRAPH* summary_graph_for(STATE *, Declaration); @@ -108,7 +130,16 @@ extern INSTANCE *get_instance(Declaration attr, FIBER fiber, extern void assert_closed(AUG_GRAPH*); extern void dnc_close(STATE *); -extern STATE *compute_dnc(Declaration module); +extern STATE *compute_dnc(Declaration module, bool anc_analysis); + +/* Low level routines: use with caution */ +extern void free_edge(EDGESET old, AUG_GRAPH *aug_graph); +extern void free_edgeset(EDGESET es, AUG_GRAPH *aug_graph); +extern void add_edge_to_graph(INSTANCE *source, + INSTANCE *sink, + CONDITION *cond, + DEPENDENCY kind, + AUG_GRAPH *aug_graph); /* The following routines return TRUE if a change occurs. */ extern int close_augmented_dependency_graph(AUG_GRAPH *); @@ -133,3 +164,6 @@ extern int analysis_debug; #define DNC_ITERATE (1<<11) #define TWO_EDGE_CYCLE (1<<12) #define ASSERT_CLOSED (1<<13) +#define EDGESET_ASSERTIONS (1<<14) + +#endif diff --git a/analyze/aps-fiber-AI.c b/analyze/aps-fiber-AI.c index 67e6f866..adc25e65 100644 --- a/analyze/aps-fiber-AI.c +++ b/analyze/aps-fiber-AI.c @@ -65,7 +65,7 @@ int assign_sets(CALLSITE_SET site, void* node, CALLSITE_SET source) { return dirty; } -/* go through all assigments iteratively */ +/* go through all assignments iteratively */ void* traverser(void *changed, void *node) { int * dirty_sign = (int*)changed; switch (ABSTRACT_APS_tnode_phylum(node)) { @@ -74,7 +74,7 @@ void* traverser(void *changed, void *node) { switch (Declaration_KEY(decl)) { default: break; - /* only inspect the assigments */ + /* only inspect the assignments */ case KEYassign: nothing(); DEBUG_INFO("#%d", tnode_line_number(decl)); @@ -144,7 +144,7 @@ void* locater(void *node) { return NULL; } -/* main interpetation function */ +/* main interpretation function */ CALLSITE_SET interpret(void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { /* only inspect RHS expressions */ @@ -176,7 +176,7 @@ CALLSITE_SET interpret(void *node) { pSite = Declaration_info(udecl)->call_sites; if(pSite == NULL) { - DEBUG_INFO("\tValue use not decared locally: %s\n", + DEBUG_INFO("\tValue use not declared locally: %s\n", decl_name(udecl)); return NULL; } diff --git a/analyze/aps-fiber-AI.h b/analyze/aps-fiber-AI.h index 78287507..b2d12ecf 100644 --- a/analyze/aps-fiber-AI.h +++ b/analyze/aps-fiber-AI.h @@ -2,6 +2,9 @@ // Dec. 1999 // Yu Wang +#ifndef APS_FIBER_AI_H +#define APS_FIBER_AI_H + #include #include "vector.h" @@ -26,3 +29,5 @@ void *check_all_decls(void *nouse, void * node) ; void expr_type(Expression e) ; void decl_type(Declaration d) ; void value_use_decl_type(Expression e) ; + +#endif diff --git a/analyze/aps-fiber-callsite.c b/analyze/aps-fiber-callsite.c index 24b56f54..9c0efa7c 100644 --- a/analyze/aps-fiber-callsite.c +++ b/analyze/aps-fiber-callsite.c @@ -69,7 +69,7 @@ int assign_sets(CALLSITE_SET site, void* node, CALLSITE_SET source) { return dirty; } -/* go through all assigments iteratively */ +/* go through all assignments iteratively */ void* traverser(void *changed, void *node) { int * dirty_sign = (int*)changed; switch (ABSTRACT_APS_tnode_phylum(node)) { @@ -78,7 +78,7 @@ void* traverser(void *changed, void *node) { switch (Declaration_KEY(decl)) { default: break; - /* only inspect the assigments */ + /* only inspect the assignments */ case KEYassign: DEBUG_INFO("#%d", tnode_line_number(decl)); { @@ -147,7 +147,7 @@ void* locater(void *node) { return NULL; } -/* main interpetation function */ +/* main interpretation function */ CALLSITE_SET interpret(void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { /* only inspect RHS expressions */ @@ -179,7 +179,7 @@ CALLSITE_SET interpret(void *node) { pSite = Declaration_info(udecl)->call_sites; if(pSite == NULL) { - DEBUG_INFO("\tValue use not decared locally: %s\n", + DEBUG_INFO("\tValue use not declared locally: %s\n", decl_name(udecl)); return NULL; } diff --git a/analyze/aps-fiber-callsite.h b/analyze/aps-fiber-callsite.h index e9f8b6c4..0f0e8e2b 100644 --- a/analyze/aps-fiber-callsite.h +++ b/analyze/aps-fiber-callsite.h @@ -2,6 +2,9 @@ // Dec. 1999 // Yu Wang +#ifndef APS_FIBER_CALLSITE_H +#define APS_FIBER_CALLSITE_H + #include #include "vector.h" @@ -25,3 +28,5 @@ int callsite_set_empty_p(CALLSITE_SET) ; CALLSITE_SET empty_callsite_set() ; void INCLUDE(CALLSITE_SET*, CALLSITE_SET); int assign_sets(CALLSITE_SET , void* , CALLSITE_SET ) ; + +#endif diff --git a/analyze/aps-fiber.c b/analyze/aps-fiber.c index 9d8fcb72..f4542819 100644 --- a/analyze/aps-fiber.c +++ b/analyze/aps-fiber.c @@ -1,17 +1,17 @@ #include #include #include "jbb.h" +#include "aps-lex.h" #include "jbb-alloc.h" #include "aps-ag.h" - #define ADD_FIBER 1 #define ALL_FIBERSETS 2 #define PUSH_FIBER 4 int fiber_debug = 0; typedef struct edges { - struct edges *rest; // rest of esges + struct edges *rest; // rest of edges Declaration edge; // f or f. or empty(null). int from; // FSA from } *EDGES; // list of edges @@ -59,6 +59,10 @@ int BACKWARD_ = 1; int FORWARD_ = 2; int DFA_node_number = 1; // there is always 1 init node. (final states) +// forward declaration +void print_nodeset(NODESET ns); +int get_node_decl(Declaration decl); + ///////////// DATA definition end //////////////////////////////// @@ -266,9 +270,11 @@ FIBERSET intersect_fiberset(FIBERSET fs1, FIBERSET fs2) { */ Declaration field_ref_p(Expression expr) { switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: { Expression func = funcall_f(expr); switch (Expression_KEY(func)) { + default: break; case KEYvalue_use: { Declaration attr = USE_DECL(value_use_use(func)); if (attr == NULL) aps_error(func,"unbound function"); @@ -282,14 +288,17 @@ Declaration field_ref_p(Expression expr) { Declaration attr_ref_p(Expression expr) { switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: { Expression func = funcall_f(expr); switch (Expression_KEY(func)) { + default: break; case KEYvalue_use: { Declaration attr = USE_DECL(value_use_use(func)); if (attr == NULL) aps_error(func,"unbound function"); else if (DECL_IS_LOCAL(attr) && !FIELD_DECL_P(attr)) { switch (Declaration_KEY(attr)) { + default: break; case KEYattribute_decl: return attr; } @@ -303,14 +312,17 @@ Declaration attr_ref_p(Expression expr) { Declaration constructor_call_p(Expression expr) { switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: { Expression func = funcall_f(expr); switch (Expression_KEY(func)) { + default: break; case KEYvalue_use: { Declaration decl = USE_DECL(value_use_use(func)); if (decl == NULL) aps_error(func,"unbound function"); else if (DECL_IS_LOCAL(decl) && !FIELD_DECL_P(decl)) { switch (Declaration_KEY(decl)) { + default: break; case KEYconstructor_decl: return decl; } @@ -348,13 +360,16 @@ BOOL local_type_p(Type ty) { /* could this type carry an object? */ Declaration local_call_p(Expression expr) { switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: { Expression func = funcall_f(expr); switch (Expression_KEY(func)) { + default: break; case KEYvalue_use: { Declaration decl = USE_DECL(value_use_use(func)); if (decl != NULL && DECL_IS_LOCAL(decl)) { switch (Declaration_KEY(decl)) { + default: break; case KEYprocedure_decl: case KEYfunction_decl: return decl; @@ -404,8 +419,10 @@ Declaration result_decl_p(Declaration rdecl) { while (ABSTRACT_APS_tnode_phylum(node) == KEYDeclarations) node = tnode_parent(node); switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYType: switch (Type_KEY((Type)node)) { + default: break; case KEYfunction_type: return (Declaration)tnode_parent(node); } @@ -521,15 +538,19 @@ Declaration phylum_shared_info_attribute(Declaration phylum, STATE *s) { Declaration responsible_node_declaration(void *node) { while (node != NULL) { switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration decl = (Declaration)node; switch (Declaration_KEY(decl)) { + default: break; case KEYtop_level_match: { Pattern pat=matcher_pat(top_level_match_m((Declaration)node)); switch (Pattern_KEY(pat)) { + default: break; case KEYand_pattern: pat = and_pattern_p1(pat); switch (Pattern_KEY(pat)) { + default: break; case KEYpattern_var: return pattern_var_formal(pat); break; @@ -558,10 +579,12 @@ Declaration shared_use_p(Expression expr) { */ if (responsible_node_declaration(expr) == NULL) return NULL; switch (Expression_KEY(expr)) { + default: break; case KEYvalue_use: { Declaration decl = USE_DECL(value_use_use(expr)); if (decl == NULL || !DECL_IS_SHARED(decl)) return NULL; switch (Declaration_KEY(decl)) { + default: break; case KEYvalue_decl: return decl; } @@ -617,8 +640,8 @@ Declaration responsible_node_shared_info(void *node, STATE *s) { return phylum_shared_info_attribute(phy,s); } -// return true if this pattern variable is controlled by a case -// statement and if so what the case statement is. +// return true if this pattern variable is controlled by a case/for +// statement and if so what the case/for statement is. Declaration formal_in_case_p(Declaration formal) { if (Declaration_KEY(formal) != KEYnormal_formal) return NULL; if (ABSTRACT_APS_tnode_phylum(tnode_parent(formal)) == KEYPattern) { @@ -641,8 +664,9 @@ Declaration formal_in_case_p(Declaration formal) { switch (Declaration_KEY((Declaration)parent)) { default: fatal_error("%d: not a case",tnode_line_number(parent)); + return NULL; /* Keep CC happy */ break; - case KEYcase_stmt: + case KEYsome_case_stmt: return (Declaration)parent; case KEYtop_level_match: return NULL; @@ -778,6 +802,7 @@ void init_field_decls(Declaration module, STATE *s) { decl = DECL_NEXT(decl)) { Declaration_info(decl)->decl_flags |= SHARED_DECL_FLAG; switch (Declaration_KEY(decl)) { + default: break; case KEYvalue_decl: { Declaration reversed; if (fiber_debug & FIBER_INTRO) { @@ -836,6 +861,7 @@ static void *lhs_key = &lhs_key; /* for now, lhs and rhs cover everything */ void *init_rhs_lhs(void *key, void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; Expression_info(expr)->expr_flags |= EXPR_RHS_FLAG; @@ -844,6 +870,7 @@ void *init_rhs_lhs(void *key, void *node) { case KEYDeclaration: { Declaration decl = (Declaration)node; switch (Declaration_KEY(decl)) { + default: break; case KEYpragma_call: return NULL; case KEYassign: @@ -864,6 +891,7 @@ void *preinitialize_fibersets(void *statep, void *node) FIBERSETS *fss = 0; int i; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration decl = (Declaration)node; fss = &fibersets_for((Declaration)node); @@ -948,8 +976,9 @@ void print_shared_info_fibersets(STATE *state) { } } -void *finalize_fibersets_for_decl(Declaration decl) { +void finalize_fibersets_for_decl(Declaration decl) { switch (Declaration_KEY(decl)) { + default: break; case KEYdeclaration: { fiberset_for(decl,FIBERSET_REVERSE_FINAL) = @@ -990,10 +1019,12 @@ static void *add_fiberset_to_value_uses(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; if (!EXPR_IS_RHS(expr)) return fsp; /* don't look at LHS */ switch (Expression_KEY(expr)) { + default: break; case KEYvalue_use: /* if we have a use, and the use is unshared * (i.e. the decl is not a shared value decl or @@ -1017,6 +1048,7 @@ static void *add_fiberset_to_direct_defs(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; Declaration field; @@ -1027,6 +1059,7 @@ static void *add_fiberset_to_direct_defs(void *fsp, void *node) { return fsp; /* not a field ref */ object = field_ref_object(expr); switch (Expression_KEY(object)) { + default: break; case KEYvalue_use: /* if we have a direct field assignment, and the use is unshared * (i.e. the decl is not a shared value decl or @@ -1072,10 +1105,12 @@ static void *add_fiberset_to_value_defs(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; if (!EXPR_IS_LHS(expr)) return fsp; /* only look at LHS */ switch (Expression_KEY(expr)) { + default: break; case KEYvalue_use: /* if we have a use, and the use is unshared * (i.e. the decl is not a shared value decl or @@ -1103,9 +1138,11 @@ static void *add_fiberset_to_function_use_shared_info(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: if (attribute_decl_phylum(fs->tnode) == local_call_p(expr)) { Declaration attr = @@ -1127,9 +1164,11 @@ static void *add_fiberset_to_function_call_shared_info(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: { Declaration decl = local_call_p(expr); if (decl != NULL) { @@ -1151,6 +1190,7 @@ static void *add_fiberset_to_function_call_shared_info(void *fsp, void *node) { */ static void *add_fiberset_to_shared_info(void *fsp, void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration tdecl = (Declaration)node; switch (Declaration_KEY(tdecl)) { @@ -1209,15 +1249,17 @@ static void *add_fiberset_to_shared_use(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; switch (Expression_KEY(expr)) { + default: break; case KEYvalue_use: { Declaration sdecl = USE_DECL(value_use_use(expr)); /* if we have a shared use, that use is equivalent to * shared_info.sdecl * and the direction is correct, - * then we apply the rules for field acess. + * then we apply the rules for field access. */ if (DECL_IS_SHARED(sdecl) && Declaration_KEY(sdecl) == KEYvalue_decl && @@ -1269,9 +1311,11 @@ static void *add_fiberset_to_attr_use(void *fsp, void *node) { FIBERSET fs = (FIBERSET)fsp; int fstype = fs->fiberset_type; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: if (fs->tnode == attr_ref_p(expr)) add_to_fiberset(fs->fiber,expr,fstype,&expr_fiberset_for(expr,fstype)); @@ -1288,9 +1332,11 @@ static void *add_fiberset_to_function_call(void *fsp, void *node) { int fstype = fs->fiberset_type; Declaration formal=(Declaration)fs->tnode; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYExpression: { Expression expr = (Expression)node; switch (Expression_KEY(expr)) { + default: break; case KEYfuncall: { Declaration func = formal_function_decl(formal); if (local_call_p(expr) == func) { @@ -1317,6 +1363,7 @@ static void *add_fiberset_to_pattern_vars(void *fsp, void *node) { case KEYPattern: { Pattern pat = (Pattern)node; switch (Pattern_KEY(node)) { + default: break; case KEYpattern_var: { Declaration pvar = pattern_var_formal(pat); add_to_fiberset(fs->fiber,pvar,fstype,&fiberset_for(pvar,fstype)); @@ -1385,11 +1432,18 @@ USET add_to_uset(Declaration decl, USET uset) if (*head != new_uset) { done = FALSE; if (fiber_debug & ADD_FIBER) { - printf("Added (%d,%s%s) to uset of %s\n", + printf("Added (%d,%s%s) to uset of ", tnode_line_number(new_uset->u), EXPR_IS_LHS(new_uset->u) ? "dot " : "", - decl_name(uitem_field(new_uset->u)), - decl_name(decl)); + decl_name(uitem_field(new_uset->u))); + switch (Declaration_KEY(decl)) { + case KEYassign: + printf("assignment:%d\n",tnode_line_number(decl)); + break; + default: + printf("%s\n",decl_name(decl)); + break; + } } *head = new_uset; } @@ -1409,8 +1463,16 @@ OSET add_to_oset(Declaration decl, OSET oset) if (new_oset != *head) { done = FALSE; if (fiber_debug & ADD_FIBER) { - printf("Added %s to oset for %s\n", - decl_name(new_oset->o),decl_name(decl)); + printf("Added %s to oset for ", + decl_name(new_oset->o)); + switch (Declaration_KEY(decl)) { + case KEYassign: + printf("assignment:%d\n",tnode_line_number(decl)); + break; + default: + printf("%s\n",decl_name(decl)); + break; + } } *head = new_oset; } @@ -1488,58 +1550,60 @@ USET doUO(Expression e, OSET oset) { RETURN EMPTY_USET; break; - case KEYvalue_use: - { Declaration sdecl = USE_DECL(value_use_use(e)); - // Several cases - // * 1> a use of a local "shared" global variable - // * perhaps a global collection - // * 2> a use of a local (attribute) - // -// printf("DEBUG: in KEYvalue.\n"); - if (!DECL_IS_LOCAL(sdecl)) { -// printf("DEBUG: not local.\n"); - RETURN EMPTY_USET; - } else if (DECL_IS_SHARED(sdecl)) { -// printf("DEBUG: shared.\n"); - USET p = (USET)malloc(sizeof(struct uset)); - p->u = e; - p->rest = NULL; - add_to_uset(responsible_node_shared_info(e, mystate),p); - add_to_oset(sdecl,oset); - RETURN get_uset(sdecl); - } else if (!DECL_IS_SYNTAX(sdecl)) { -// printf("DEBUG: not syntax.\n"); - add_to_oset(sdecl,oset); - RETURN get_uset(sdecl); - } else { - aps_error(e,"assigning a syntax decl"); - } - break; - } // case KEYvalue_use - case KEYfuncall: - // * Several cases - // * 1> X.a (attr_ref) - // * 2> w.f (field_ref) - // - { Declaration fdecl; - - // attr ref: X.a - if ((fdecl = attr_ref_p(e)) != NULL) { - add_to_oset(fdecl,oset); - RETURN get_uset(fdecl); - } else if ((fdecl = field_ref_p(e))!= NULL) { - // field ref: w.f - Expression object = field_ref_object(e); - - USET newuset = (USET)malloc(sizeof(struct uset)); - OSET o_w; - newuset->rest = NULL; - newuset->u = e; - o_w = doOU(object, newuset); - RETURN EMPTY_USET; - } - } // case funcall - } // switch + case KEYvalue_use: + { Declaration sdecl = USE_DECL(value_use_use(e)); + // Several cases + // * 1> a use of a local "shared" global variable + // * perhaps a global collection + // * 2> a use of a local (attribute) + // + // printf("DEBUG: in KEYvalue.\n"); + if (!DECL_IS_LOCAL(sdecl)) { + // printf("DEBUG: not local.\n"); + RETURN EMPTY_USET; + } else if (DECL_IS_SHARED(sdecl)) { + // printf("DEBUG: shared.\n"); + USET p = (USET)malloc(sizeof(struct uset)); + p->u = e; + p->rest = NULL; + add_to_uset(responsible_node_shared_info(e, mystate),p); + add_to_oset(sdecl,oset); + RETURN get_uset(sdecl); + } else if (!DECL_IS_SYNTAX(sdecl)) { + // printf("DEBUG: not syntax.\n"); + add_to_oset(sdecl,oset); + RETURN get_uset(sdecl); + } else { + aps_error(e,"assigning a syntax decl"); + } + break; + } // case KEYvalue_use + case KEYfuncall: + // * Several cases + // * 1> X.a (attr_ref) + // * 2> w.f (field_ref) + // + { Declaration fdecl; + + // attr ref: X.a + if ((fdecl = attr_ref_p(e)) != NULL) { + add_to_oset(fdecl,oset); + RETURN get_uset(fdecl); + } else if ((fdecl = field_ref_p(e))!= NULL) { + // field ref: w.f + Expression object = field_ref_object(e); + + USET newuset = (USET)malloc(sizeof(struct uset)); + OSET o_w; + newuset->rest = NULL; + newuset->u = e; + o_w = doOU(object, newuset); + RETURN EMPTY_USET; + } + } // case funcall + } // switch + fatal_error("doUO was about to return undefined.\n"); + return 0; } // doUOp: calculate Uset of pattern given an OSet. @@ -1669,6 +1733,9 @@ OSET doOU(Expression e, USET uset) add_to_uset(sdecl,uset); RETURN get_oset(sdecl); } else { + //XXX: FIx this warning + // DECL_IS_RHS is set for pattern variables that are not syntax + // And it's OK to read "lineno" or other phylum/syntax things aps_warning(e,"using a syntax decl"); add_to_uset(sdecl,uset); RETURN get_oset(sdecl); @@ -1681,6 +1748,7 @@ OSET doOU(Expression e, USET uset) // * 2> w.f (field_ref) // * 3> local function // * 4> primitive function + // * special functions that never involve fibers: e.g. lineno // { Declaration fdecl; @@ -1717,11 +1785,12 @@ OSET doOU(Expression e, USET uset) USET q; for (q = u_o; q != NULL; q= q->rest) { - if ( (EXPR_IS_LHS(q->u)) && same_field(q->u ,newuset->u)) { - // f. belongs to u_o - oset = oset_union(oset, - doOU(assign_rhs( (Declaration)tnode_parent(q->u) ), uset)); - } // if + if ( (EXPR_IS_LHS(q->u)) && same_field(q->u ,newuset->u)) { + // f. belongs to u_o + Declaration assign = (Declaration)tnode_parent(q->u); + add_to_uset(assign,uset); + oset = oset_union(oset, get_oset(assign)); + } // if } // for q } // for p RETURN oset; @@ -1761,7 +1830,7 @@ OSET doOU(Expression e, USET uset) RETURN get_oset(result); } - } else { // primiive: how to get decl? don't need. + } else { // primitive: how to get decl? don't need. OSET oset = EMPTY_OSET; Expression arg = first_Actual(funcall_actuals(e)); for (; arg!= NULL; arg = Expression_info(arg)->next_expr){ @@ -1777,6 +1846,11 @@ void *print_all_ou(void *statep, void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { case KEYDeclaration: { Declaration decl = (Declaration)node; + switch (Declaration_KEY(decl)) { + default: break; + case KEYassign: + return statep; + } if (Declaration_info(decl)->oset != NULL) { printf("OSET of node: %s\n", decl_name(decl)); print_oset(Declaration_info(decl)->oset); @@ -1837,8 +1911,17 @@ int id_decl_node(Declaration decl) { FSA_next_node_index += 2; if (fiber_debug & ALL_FIBERSETS) { + switch (Declaration_KEY(decl)) + { + case KEYassign: + printf("%d: index for assign is %d\n",tnode_line_number(decl), + index); + break; + default: printf("%d: index for %s is %d\n",tnode_line_number(decl), decl_name(decl),index); + break; + } } Declaration_info(decl)->index = index; omega = add_to_nodeset(omega, index); @@ -1910,6 +1993,7 @@ void print_fields(EDGES fields_list){ void *count_node(void *u, void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration decl = (Declaration)node; @@ -1980,10 +2064,12 @@ void *count_node(void *u, void *node) void *compute_OU(void *u, void *node) { switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration decl = (Declaration)node; switch (Declaration_KEY(decl)) { + default: break; case KEYtop_level_match: { // shared_info Declaration lhs = top_level_match_lhs_decl(decl); @@ -2004,7 +2090,7 @@ void *compute_OU(void *u, void *node) break; } // case top_level_match - // value_decl: includes initilization precess + // value_decl: includes initialization precess case KEYvalue_decl: { Default def = value_decl_default(decl) ; switch (Default_KEY(def)){ @@ -2032,19 +2118,28 @@ void *compute_OU(void *u, void *node) // printf("ASSIGN: %d \n", tnode_line_number(node)); Expression lhs = assign_lhs(decl); Expression rhs = assign_rhs(decl); - - USET u1 = doUO(lhs, EMPTY_OSET); - OSET o2 = doOU(rhs, u1); - doUO(lhs, o2); + + add_to_oset(decl, doOU(rhs, get_uset(decl))); + add_to_uset(decl, doUO(lhs, get_oset(decl))); return NULL; break; } - case KEYcase_stmt: { + case KEYfor_in_stmt: + { + Expression sequence = for_in_stmt_seq(decl); + Declaration formal = for_in_stmt_formal(decl); + OSET oset = doOU(sequence, EMPTY_USET); + add_to_oset(formal, oset); + USET uset = get_uset(formal); + doOU(sequence, uset); + break; + } + case KEYsome_case_stmt: { Match m; - Expression expr = case_stmt_expr(decl); + Expression expr = some_case_stmt_expr(decl); OSET oset = doOU(expr, EMPTY_USET); - for (m= first_Match(case_stmt_matchers(decl)); m; m = MATCH_NEXT(m)){ + for (m= first_Match(some_case_stmt_matchers(decl)); m; m = MATCH_NEXT(m)){ USET u = doUOp(matcher_pat(m), oset); doOU(expr, u); } @@ -2156,6 +2251,7 @@ void *build_FSA(void *vstate, void *node) omega = add_to_nodeset(omega,FSA_default_node+1); } switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration decl = (Declaration)node; @@ -2176,6 +2272,7 @@ void *build_FSA(void *vstate, void *node) if (uset != NULL) add_edges_uset(decl,NULL); // Qx(-)-->Qu(-) switch (Declaration_KEY(decl)) { + default: break; case KEYmodule_decl: if (uset != NULL) { USET q; @@ -2275,17 +2372,41 @@ void *build_FSA(void *vstate, void *node) Expression lhs = assign_lhs(decl); Expression rhs = assign_rhs(decl); + if (fiber_debug & ADD_FSA_EDGE) { + printf("%d: in assign\n",tnode_line_number(node)); + } + NODESET M = link_expr_lhs(lhs, EMPTY_NODESET); NODESET N = link_expr_rhs(rhs, M); link_expr_lhs(lhs, N); + if (fiber_debug & ADD_FSA_EDGE) { + printf(" M="); + print_nodeset(M); + printf(", N="); + print_nodeset(N); + printf("\n"); + } + return NULL; } // KEYassign - case KEYcase_stmt: { + case KEYfor_in_stmt: + { + Expression sequence = for_in_stmt_seq(decl); + Declaration formal = for_in_stmt_formal(decl); + NODESET m = EMPTY_NODESET; + int index = get_node_decl(formal); + if (index > 0) { + m = set_of_node(index + 1); + } + link_expr_rhs(sequence, m); + break; + } + case KEYsome_case_stmt: { Match m; - Expression expr = case_stmt_expr(decl); + Expression expr = some_case_stmt_expr(decl); NODESET M = link_expr_rhs(expr, EMPTY_NODESET); - for (m= first_Match(case_stmt_matchers(decl)); m; m = MATCH_NEXT(m)){ + for (m= first_Match(some_case_stmt_matchers(decl)); m; m = MATCH_NEXT(m)){ NODESET N = link_expr_lhs_p(matcher_pat(m), M); link_expr_rhs(expr, N); } @@ -2512,6 +2633,7 @@ NODESET link_expr_rhs(Expression e, NODESET ns){ return set_of_node(get_node_decl(fdecl)); //{Qd} } else if ((fdecl = field_ref_p(e)) != NULL) { //field ref: w.f NODESET n; + if (fiber_debug & ADD_FSA_EDGE) printf("Starting to add edges for field ref of %s\n",decl_name(fdecl)); for (n=ns; n; n = n->rest) { add_FSA_edge(get_node_expr(e)+1, n->node, fdecl); add_FSA_edge(get_node_expr(e), n->node, reverse_field(fdecl)); @@ -2523,6 +2645,7 @@ NODESET link_expr_rhs(Expression e, NODESET ns){ link_expr_rhs(object, set_of_node(get_node_expr(e)+1)); // Qe(-) { OSET oset = doOU(e, EMPTY_USET); + if (fiber_debug & ADD_FSA_EDGE) printf("ending to add edges for field ref of %s\n",decl_name(fdecl)); return oset_to_nodeset(oset); } } @@ -2600,35 +2723,36 @@ NODESET link_expr_lhs(Expression e, NODESET ns) { } //if return set_of_node(get_node_decl(decl)+1); // Qd(-) - break; } // case value_use - case KEYfuncall: { - Declaration fdecl; - if ((fdecl = attr_ref_p(e)) != NULL) { // attr ref: X.a - // same as value_use - return set_of_node(get_node_decl(fdecl)+1); //{Qd(-)} - } else if ((fdecl = field_ref_p(e)) != NULL) { //field ref: w.f - NODESET n; - for (n=ns; n; n = n->rest) { - // Qe(-)----f.--->n - add_FSA_edge(get_node_expr(e)+1, n->node, reverse_field(fdecl)); - add_FSA_edge(get_node_expr(e), n->node, fdecl); - // Qe(-) to n: bar node is even number. - // the edge is reverse_field(fdecl); - } // for end - { - Expression object = field_ref_object(e); - link_expr_lhs(object, set_of_node(get_node_expr(e)+1) ); // Qe(-) - { - USET uset = doUO(e, EMPTY_OSET); - // printf("DEBUG: after doUO in link_expr.\n"); - return uset_to_nodeset(uset); - } - } - } // if end - break; - } // KEYfuncall - } // switch + case KEYfuncall: { + Declaration fdecl; + if ((fdecl = attr_ref_p(e)) != NULL) { // attr ref: X.a + // same as value_use + return set_of_node(get_node_decl(fdecl)+1); //{Qd(-)} + } else if ((fdecl = field_ref_p(e)) != NULL) { //field ref: w.f + NODESET n; + for (n=ns; n; n = n->rest) { + // Qe(-)----f.--->n + add_FSA_edge(get_node_expr(e)+1, n->node, reverse_field(fdecl)); + add_FSA_edge(get_node_expr(e), n->node, fdecl); + // Qe(-) to n: bar node is even number. + // the edge is reverse_field(fdecl); + } // for end + { + Expression object = field_ref_object(e); + link_expr_lhs(object, set_of_node(get_node_expr(e)+1) ); // Qe(-) + { + USET uset = doUO(e, EMPTY_OSET); + // printf("DEBUG: after doUO in link_expr.\n"); + return uset_to_nodeset(uset); + } + } + } else { + aps_error(e, "wrong: not a proper funcall to add edges."); + return EMPTY_NODESET; + } + } // KEYfuncall + } // switch } static int edge_num = 0; @@ -2929,6 +3053,7 @@ void *DFA_fiber_set(void *u, void *node) STATE *state = (STATE *)u; switch (ABSTRACT_APS_tnode_phylum(node)) { + default: break; case KEYDeclaration: { Declaration decl = (Declaration)node; @@ -2946,8 +3071,16 @@ void *DFA_fiber_set(void *u, void *node) decl_fsets->set[FIBERSET_REVERSE_FINAL] = NULL; if (fiber_debug & FIBER_FINAL) { - printf("fiber set for %s /%d is: ", decl_name(decl), - Declaration_info(decl)->index); + printf("fiber set for "); + switch (Declaration_KEY(decl)) { + case KEYassign: + printf("assignment on line %d",tnode_line_number(decl)); + break; + default: + puts(decl_name(decl)); + break; + } + printf(" /%d is ",Declaration_info(decl)->index); } for (i= 2; i<= DFA_node_number; i++) { // for any DFA tree node // fiberset doesn't include base fiber @@ -3030,6 +3163,8 @@ int circle_check_rec(NODESET old, int node){ } } if (terminal == 1) return 0; + fatal_error("circle_check_rec assertion failure"); + return 0; } int circle_check(){ @@ -3137,7 +3272,7 @@ void fiber_module(Declaration module, STATE *s) { print_fields(all_fields_list); } - // initionlize the graph + // initialize the graph // FSA_graph = (EDGES)malloc(sizeof(struct edges)*index); // printf("DEBUG: traverse declaration to build FSA.\n"); @@ -3257,12 +3392,24 @@ void print_fiberset_entry(FIBERSET fs, FILE *stream) { } print_fiber(fs->fiber,stdout); switch (ABSTRACT_APS_tnode_phylum(tnode)) { + default: break; case KEYDeclaration: - printf(" of %s",decl_name((Declaration)tnode)); + { + Declaration decl = (Declaration)tnode; + switch (Declaration_KEY(decl)) { + case KEYassign: + printf(" of assignment on line %d",tnode_line_number(decl)); + break; + default: + printf(" of %s",decl_name(decl)); + break; + } + } break; case KEYExpression: { Expression expr = (Expression)tnode; switch (Expression_KEY(expr)) { + default: break; case KEYvalue_use: printf(" of use(%s)", symbol_name(use_name(value_use_use(expr)))); @@ -3285,6 +3432,7 @@ void print_fiberset_entry(FIBERSET fs, FILE *stream) { } else { Expression func = funcall_f(expr); switch (Expression_KEY(func)) { + default: break; case KEYvalue_use: { Use u = value_use_use(func); diff --git a/analyze/aps-fiber.h b/analyze/aps-fiber.h index 0d30e177..2059dce1 100644 --- a/analyze/aps-fiber.h +++ b/analyze/aps-fiber.h @@ -1,3 +1,6 @@ +#ifndef APS_FIBER_H +#define APS_FIBER_H + typedef struct fiber { /* three possibilities: * 1> null, only for a special case base fiber (never stored in sets) @@ -57,6 +60,7 @@ extern int member_fiberset(FIBER,FIBERSET); struct analysis_state; +extern void add_fibers_to_state(struct analysis_state *s); extern void fiber_module(Declaration module, struct analysis_state *s); extern void print_fiber(FIBER,FILE *); @@ -70,6 +74,7 @@ extern int fiber_debug; #define FIBER_INTRO 8 #define FIBER_FINAL 16 #define CALLSITE_INFO 32 +#define ADD_FSA_EDGE 64 /* useful routines */ @@ -112,5 +117,4 @@ typedef struct uset { #define EMPTY_OSET (OSET)NULL #define EMPTY_USET (USET)NULL - - +#endif diff --git a/analyze/aps-info.h b/analyze/aps-info.h index c71fb8e7..80b7384b 100644 --- a/analyze/aps-info.h +++ b/analyze/aps-info.h @@ -4,6 +4,9 @@ * we need a new slot. */ +#ifndef APS_INFO_H +#define APS_INFO_H + struct Program_info { unsigned program_flags; #define PROGRAM_BOUND_FLAG 1 @@ -33,6 +36,7 @@ struct Declaration_info { Declaration dual_decl; /* a declaration created out of nowhere */ Declaration copied_decl; /* pointer to copied version (scratch) */ FIBERSETS decl_fibersets; + Declaration proxy_fdecl; /* actual fdecl being proxied */ int if_index; /* count of if_stmt within a top-level match */ #define instance_index if_index CONDITION decl_cond; /* condition that must be satisfied to take effect */ @@ -59,6 +63,7 @@ struct Declaration_info { #define UP_DOWN_FLAG (1<<16) #define MODULE_DECL_GENERATING (1<<17) #define MODULE_DECL_GENERATING_VALID (1<<18) +#define DECL_RHS_SYNTAX_FLAG (1<<19) OSET oset; /* oset of the declaration */ USET uset; /* uset of the declaration */ int index; /* represent the nodeis Qd and Qd(-) */ @@ -66,6 +71,7 @@ struct Declaration_info { }; extern struct Declaration_info *Declaration_info(Declaration); +#define DECL_PHY_GRAPH(decl) (Declaration_info(decl)->node_phy_graph) #define DECL_NEXT(decl) (Declaration_info(decl)->next_decl) #define NEXT_FIELD(decl) (Declaration_info(decl)->next_field_decl) #define DUAL_DECL(decl) (Declaration_info(decl)->dual_decl) @@ -78,7 +84,7 @@ extern struct Declaration_info *Declaration_info(Declaration); #define DECL_IS_OBJECT(decl) \ (Declaration_info(decl)->decl_flags&DECL_OBJECT_FLAG) #define DECL_IS_SYNTAX(decl) \ - (Declaration_info(decl)->decl_flags&(DECL_LHS_FLAG|DECL_RHS_FLAG)) + (Declaration_info(decl)->decl_flags&(DECL_LHS_FLAG|DECL_RHS_SYNTAX_FLAG)) #define TYPE_FORMAL_IS_EXTENSION(decl) \ (Declaration_info(decl)->decl_flags&TYPE_FORMAL_EXTENSION_FLAG) #define ATTR_DECL_IS_SYN(decl) \ @@ -178,3 +184,5 @@ extern void set_tnode_parent(Program p); extern void *tnode_parent(void *); #define decl_name(decl) (char*)symbol_name(def_name(declaration_def(decl))) + +#endif diff --git a/analyze/aps-oag.c b/analyze/aps-oag.c index 4a6e4930..c791a97a 100644 --- a/analyze/aps-oag.c +++ b/analyze/aps-oag.c @@ -303,6 +303,22 @@ CTO_NODE* schedule_rest(AUG_GRAPH *aug_graph, return cto_node; } +/** Return phase (synthesized) or -phase (inherited) + * for fibered attribute, given the phylum's summary dependence graph. + * TODO: make public and export and remove from static-impl.cc + */ +int attribute_schedule(PHY_GRAPH *phy_graph, FIBERED_ATTRIBUTE* key) +{ + int n = phy_graph->instances.length; + for (int i=0; i < n; ++i) { + FIBERED_ATTRIBUTE* fa = &(phy_graph->instances.array[i].fibered_attr); + if (fa->attr == key->attr && fa->fiber == key->fiber) + return phy_graph->summary_schedule[i]; + } + fatal_error("Could not find summary schedule for instance"); + return 0; +} + void schedule_augmented_dependency_graph(AUG_GRAPH *aug_graph) { int n = aug_graph->instances.length; int i; @@ -316,6 +332,28 @@ void schedule_augmented_dependency_graph(AUG_GRAPH *aug_graph) { printf("Scheduling conditional total order for %s\n", aug_graph_name(aug_graph)); } + if (oag_debug & DEBUG_ORDER) { + for (int i=0; i <= n; ++i) { + INSTANCE *in = &(aug_graph->instances.array[i]); + print_instance(in,stdout); + printf(": "); + Declaration ad = in->fibered_attr.attr; + Declaration chdecl; + + int j = 0, ch = -1; + for (chdecl = aug_graph->first_rhs_decl; chdecl != 0; chdecl=DECL_NEXT(chdecl)) { + if (in->node == chdecl) ch = j; + ++j; + } + if (in->node == aug_graph->lhs_decl || ch >= 0) { + PHY_GRAPH *npg = Declaration_info(in->node)->node_phy_graph; + int ph = attribute_schedule(npg,&(in->fibered_attr)); + printf("<%d,%d>\n",ph,ch); + } else { + printf("local\n"); + } + } + } /* we use the schedule array as temp storage */ for (i=0; i < n; ++i) { diff --git a/analyze/aps-oag.h b/analyze/aps-oag.h index 613f903d..0eea2dc2 100644 --- a/analyze/aps-oag.h +++ b/analyze/aps-oag.h @@ -1,5 +1,26 @@ +#ifndef APS_OAG_H +#define APS_OAG_H + extern void compute_oag(Declaration,STATE *); +/** Return phase (synthesized) or -phase (inherited) + * for fibered attribute, given the phylum's summary dependence graph. + */ +extern int attribute_schedule(PHY_GRAPH *phy_graph, FIBERED_ATTRIBUTE* key); + +typedef struct child_phase_type CHILD_PHASE; + +struct child_phase_type +{ + short ph; // Phase number: + // ph is negative for inherited attributes (> 0) + // ph is positive for synthesized attributes (< 0) + // ph is zero for locals (= 0) + short ch; // Child number: + // ch is -1 for parent attributes + // otherwise it is in range [0,nch) where nch is total count of children +}; + /** A conditional total order is a tree of cto nodes. * null means the CTO is done. * @@ -13,12 +34,25 @@ struct cto_node { INSTANCE* cto_instance; CTO_NODE* cto_next; CTO_NODE* cto_if_true; + CHILD_PHASE child_phase; // + Declaration child_decl; // child decl in case of visit marker + short visit; // parent phase number + int chunk_index; // SCC component index + BOOL chunk_circular; // flag to indicate chunk is circular (phylum phase is circular for visits or local attribute depending on itself) #define cto_if_false cto_next }; - + extern int oag_debug; #define TOTAL_ORDER 1 #define DEBUG_ORDER 2 #define PROD_ORDER 4 #define PROD_ORDER_DEBUG 8 #define TYPE_3_DEBUG 16 +#define DEBUG_ORDER_VERBOSE 32 + +#define CONDITION_IS_IMPOSSIBLE(cond) ((cond).positive & (cond).negative) +#define MERGED_CONDITION_IS_IMPOSSIBLE(cond1, cond2) (((cond1).positive|(cond2).positive) & ((cond1).negative|(cond2).negative)) + +CONDITION instance_condition(INSTANCE *in); + +#endif diff --git a/analyze/aps-read.c b/analyze/aps-read.c index 0e2d2ef2..e0f417d8 100644 --- a/analyze/aps-read.c +++ b/analyze/aps-read.c @@ -7,8 +7,10 @@ #include #include #include "jbb.h" +#include "aps-lex.h" #include "aps-tree.h" #include "aps-read.h" +#include "aps.tab.h" #define MAX_PROGRAMS 100 @@ -17,6 +19,7 @@ static Program programs[MAX_PROGRAMS]; extern FILE *aps_yyin; extern char *aps_yyfilename; +extern int aps_yyparse(); Program the_tree = NULL; int aps_parse_error = 0; @@ -76,6 +79,7 @@ static int initialize() { } initialized=TRUE; } + return initialized; } Program find_Program(String name) { diff --git a/analyze/aps-read.h b/analyze/aps-read.h index a9e5afd4..65f1ab0d 100644 --- a/analyze/aps-read.h +++ b/analyze/aps-read.h @@ -2,5 +2,10 @@ * Read APS programs and index by name. */ +#ifndef APS_READ_H +#define APS_READ_H + extern void set_aps_path(char *); extern Program find_Program(String); + +#endif diff --git a/analyze/aps-scc.c b/analyze/aps-scc.c new file mode 100644 index 00000000..0b361032 --- /dev/null +++ b/analyze/aps-scc.c @@ -0,0 +1,125 @@ +#include +#include +#include +#include +#include "aps-ag.h" +#include "jbb-alloc.h" + +void set_phylum_graph_components(PHY_GRAPH* phy_graph) { + int i, j, k; + int n = phy_graph->instances.length; + SccGraph graph; + scc_graph_initialize(&graph, n); + + // Add vertices + for (i = 0; i < n; i++) { + INSTANCE* in = &phy_graph->instances.array[i]; + scc_graph_add_vertex(&graph, (void*)in); + } + + // Add edges + for (i = 0; i < n; i++) { + INSTANCE* source = &phy_graph->instances.array[i]; + for (j = 0; j < n; j++) { + if (phy_graph->mingraph[i * n + j]) { + INSTANCE* sink = &phy_graph->instances.array[j]; + + scc_graph_add_edge(&graph, (void*)source, (void*)sink); + } + } + } + + phy_graph->components = scc_graph_components(&graph); + phy_graph->component_cycle = + (bool*)calloc(sizeof(bool), phy_graph->components->length); + + for (i = 0; i < phy_graph->components->length; i++) { + SCC_COMPONENT* comp = phy_graph->components->array[i]; + + for (j = 0; j < comp->length; j++) { + INSTANCE* in = (INSTANCE*)comp->array[j]; + + if (phy_graph->mingraph[in->index * n + in->index]) { + phy_graph->component_cycle[i] = true; + } + } + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf("Components of Phylum Graph: %s\n", phy_graph_name(phy_graph)); + for (j = 0; j < phy_graph->components->length; j++) { + SCC_COMPONENT* comp = phy_graph->components->array[j]; + printf(" Component #%d [%s]\n", j, + phy_graph->component_cycle[j] ? "circular" : "non-circular"); + + for (k = 0; k < comp->length; k++) { + printf(" "); + print_instance(comp->array[k], stdout); + printf("\n"); + } + } + printf("\n"); + } +} + +void set_aug_graph_components(AUG_GRAPH* aug_graph) { + int i, j, k; + int n = aug_graph->instances.length; + SccGraph graph; + scc_graph_initialize(&graph, n); + + // Add vertices + for (i = 0; i < n; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + scc_graph_add_vertex(&graph, (void*)in); + } + + // Add edges + for (i = 0; i < n; i++) { + INSTANCE* source = &aug_graph->instances.array[i]; + for (j = 0; j < n; j++) { + INSTANCE* sink = &aug_graph->instances.array[j]; + if (edgeset_kind(aug_graph->graph[i * n + j])) { + if (!MERGED_CONDITION_IS_IMPOSSIBLE(instance_condition(source), + instance_condition(sink))) { + scc_graph_add_edge(&graph, (void*)source, (void*)sink); + } + } + } + } + + aug_graph->components = scc_graph_components(&graph); + aug_graph->component_cycle = + (bool*)calloc(sizeof(bool), aug_graph->components->length); + + for (i = 0; i < aug_graph->components->length; i++) { + SCC_COMPONENT* comp = aug_graph->components->array[i]; + + for (j = 0; j < comp->length; j++) { + INSTANCE* source = (INSTANCE*)comp->array[j]; + + for (k = 0; k < comp->length; k++) { + INSTANCE* sink = (INSTANCE*)comp->array[k]; + if (edgeset_kind(aug_graph->graph[source->index * n + sink->index])) { + aug_graph->component_cycle[i] = true; + } + } + } + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf("Components of Augmented Graph: %s\n", aug_graph_name(aug_graph)); + for (j = 0; j < aug_graph->components->length; j++) { + SCC_COMPONENT* comp = aug_graph->components->array[j]; + printf(" Component #%d [%s]\n", j, + aug_graph->component_cycle[j] ? "circular" : "non-circular"); + + for (k = 0; k < comp->length; k++) { + printf(" "); + print_instance((INSTANCE*)comp->array[k], stdout); + printf("\n"); + } + } + printf("\n"); + } +} diff --git a/analyze/aps-scc.h b/analyze/aps-scc.h new file mode 100644 index 00000000..f6dd6846 --- /dev/null +++ b/analyze/aps-scc.h @@ -0,0 +1,18 @@ +#ifndef APS_SCC_H +#define APS_SCC_H + +#include "aps-dnc.h" + +/** + * @brief sets phylum graph SCC components of instances + * @param phy_graph phylum graph + */ +void set_phylum_graph_components(PHY_GRAPH* phy_graph); + +/** + * @brief sets augmented dependency graph SCC components of instances + * @param aug_graph augmented dependency graph + */ +void set_aug_graph_components(AUG_GRAPH* aug_graph); + +#endif diff --git a/analyze/aps-schedule.c b/analyze/aps-schedule.c index e69de29b..b114b921 100644 --- a/analyze/aps-schedule.c +++ b/analyze/aps-schedule.c @@ -0,0 +1,2904 @@ +#include +#include +#include +#include +#include + +#include "aps-ag.h" +#include "aps-debug.h" +#include "aps-tree.h" +#include "jbb-alloc.h" + +#define MAX(x, y) (((x) > (y)) ? (x) : (y)) +#define IS_VISIT_MARKER(node) (node->cto_instance == NULL) +#define CHUNK_GUIDING_DEPENDENCY (indirect_control_dependency) + +static int BUFFER_SIZE = 1000; + +// Enum representing type of chunks +enum ChunkTypeEnum { + HalfLeft = 1, // Parent inherited attribute + HalfRight = 2, // Parent synthesized attribute + Local = 4, // Local + Visit = 8 // Child visit +}; + +struct chunk_type { + enum ChunkTypeEnum type; // Type of chunk + int ph; // Phase + int ch; // Child number + int index; // Index of the chunk in the array of chunks + bool circular; // Boolean indicating whether chunk is circular or not + VECTOR(INSTANCE) instances; // Array of instances in the chunk +}; + +typedef struct chunk_type Chunk; + +struct chunk_graph_type { + DEPENDENCY* graph; + VECTOR(Chunk) instances; + bool* schedule; + SCC_COMPONENTS* chunk_components; +}; + +typedef struct chunk_graph_type ChunkGraph; + +// Utility struct to keep of track of information needed to handle group +// scheduling +struct total_order_state { + // Tuple indexed by instance number + CHILD_PHASE* instance_groups; + VECTOR(Declaration) children; +}; + +typedef struct total_order_state TOTAL_ORDER_STATE; + +static CTO_NODE* group_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + CHILD_PHASE* group, + const short parent_ph); + +static CTO_NODE* schedule_visit_start(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + const short parent_ph); + +static CTO_NODE* chunk_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + const short parent_ph); + +static CTO_NODE* chunk_component_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int prev_chunk_component_index, + const int remaining, + const short parent_ph); + +static bool chunk_ready_to_go(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* sink_component, + const int sink_component_index, + Chunk* sink_chunk, + TOTAL_ORDER_STATE* state); + +static int chunk_indexof(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + INSTANCE* instance); + +static CTO_NODE* schedule_visit_end(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + CHILD_PHASE* prev_group, + const short parent_ph); + +static int chunk_indexof(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + INSTANCE* instance); + +static int chunk_component_indexof(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + Chunk* chunk); + +static int chunk_lookup(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + enum ChunkTypeEnum type, + short ph, + short ch); + +/** + * @brief Utility to checks whether SCC component contains target + * @param component SCC component pointer + * @param target pointer to search + * @return boolean indicating whether target exist or not + */ +static bool scc_component_contains(SCC_COMPONENT* component, void* target) { + int i; + for (i = 0; i < component->length; i++) { + if (component->array[i] == target) { + return true; + } + } + + return false; +} + +/** + * Utility function that checks whether instance belongs to any phylum cycle + * or not + * @param phy_graph phylum graph + * @param in attribute instance + * @return -1 if instance does not belong to any cycle or index of phylum + * cycle + */ +static bool instance_in_phylum_cycle(PHY_GRAPH* phy_graph, INSTANCE* in) { + int n = phy_graph->instances.length; + + return phy_graph->mingraph[in->index * n + in->index]; +} + +/** + * Utility function that checks whether instance belongs to any augmented + * dependency graph cycle or not + * @param aug_graph augmented dependency graph + * @param in attribute instance + * @return -1 if instance does not belong to any cycle or index of augmented + * dependency graph cycle + */ +static bool instance_in_aug_cycle(AUG_GRAPH* aug_graph, INSTANCE* in) { + int n = aug_graph->instances.length; + + return edgeset_kind(aug_graph->graph[in->index * n + in->index]); +} + +/** + * Utility function that returns boolean indicating whether attributes in SCC + * contain parent or not + * @param aug_graph augmented dependency graph + * @param comp SCC component + * @return boolean indicating whether attributes of parent are in SCC + */ +static bool scc_involves_parent(AUG_GRAPH* aug_graph, SCC_COMPONENT* comp) { + int i; + for (i = 0; i < comp->length; i++) { + INSTANCE* in = (INSTANCE*)comp->array[i]; + + if (aug_graph->lhs_decl == in->node) { + return true; + } + } + + return false; +} + +/** + * Utility function to determine if instance group is local or not + * @param group instance group + * @return boolean indicating if instance group is local + */ +static bool group_is_local(CHILD_PHASE* group) { + return group->ph == 0 && group->ch == 0; +} + +/** + * Utility function to look ahead in the total order + * @param current CTO_NODE node + * @param ph phase + * @param ch child number + * @param immediate immediately followed by + * @param visit_marker_only look for visit marker only + * @param any boolean pointer to set to true if found any + */ +static void followed_by(CTO_NODE* current, + const short ph, + const short ch, + const bool immediate, + bool visit_marker_only, + bool* any) { + if (current == NULL) + return; + + if (current->child_phase.ph == ph && current->child_phase.ch == ch) { + if ((visit_marker_only && IS_VISIT_MARKER(current)) || !visit_marker_only) { + *any = true; + } + } else if (immediate) { + return; + } + + if (group_is_local(¤t->child_phase) && + if_rule_p(current->cto_instance->fibered_attr.attr)) { + followed_by(current->cto_if_true, ph, ch, immediate, visit_marker_only, + any); + } + + followed_by(current->cto_next, ph, ch, immediate, visit_marker_only, + any); +} + +/** + * Utility function that returns boolean indicating whether there is any + * in augmented dependency graph + * @param current CTO_NODE node + * @param ph phase + * @param ch child number + * @return boolean indicating if there is any + */ +static bool aug_graph_contains_phase(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + CONDITION cond, + const short ph, + const short ch) { + int i; + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + if (group->ph == ph && group->ch == ch && + !MERGED_CONDITION_IS_IMPOSSIBLE(instance_condition(in), cond)) { + return true; + } + } + + return false; +} + +/** + * @brief Utility function used by assert_total_order to check sanity of total order + * 1) After end of phase visit marker there should be no attribute + * belonging to or <-ph,-1> because parent phase has ended. + * 2) Immediately before child visit marker there should be attribute + * belonging to <-ph,ch> (if any) + * 3) Immediately after visit marker should be attribute belonging + * to (if any) + * @param current CTO_NODE node + * @param prev_group group + */ +static void total_order_sanity_check(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + CTO_NODE* current, + CONDITION cond, + CHILD_PHASE* prev_group, + CHILD_PHASE* prev_parent, + TOTAL_ORDER_STATE* state) { + if (current == NULL) + return; + + CHILD_PHASE* current_group = ¤t->child_phase; + + if (IS_VISIT_MARKER(current)) { + // If end of parent phase visit marker + if (current->child_phase.ch == -1) { + // Boolean indicating whether end of phase visit marker was preceded + // by parent synthesized attributes if any + bool preceded_by_parent_synthesized_current_phase = + prev_parent->ph > 0 && prev_parent->ch == -1; + + if (aug_graph_contains_phase(aug_graph, state, cond, current_group->ph, + -1) && + !preceded_by_parent_synthesized_current_phase) { + fatal_error( + "[%s] Expected parent synthesized attribute <%+d,%+d> precede the " + "end of phase visit marker <%+d,%+d>", + aug_graph_name(aug_graph), current_group->ph, -1, current_group->ph, + -1); + } + + // End of total order so no need to check if after + // it is followed by <-(ph+1),-1> because this is the end + if (current->cto_next == NULL) + return; + + // Boolean indicating whether followed by inherited attribute of + // parent for the next phase <-(ph+1),-1> if any + bool followed_by_parent_inherited_next_phase = false; + followed_by(current->cto_next, -(current->child_phase.ph + 1), -1, + false /* not immediate */, false /* not visit markers only*/, + &followed_by_parent_inherited_next_phase); + + if (aug_graph_contains_phase(aug_graph, state, cond, + -(current_group->ph + 1), -1) && + !followed_by_parent_inherited_next_phase) { + aps_warning(current, + "[%s] Expected after end of phase visit marker " + "<%+d,%+d> to be " + "followed by parent inherited attribute of next " + "phase <%+d,%+d>", + aug_graph_name(aug_graph), current_group->ph, -1, + -(current_group->ph + 1), -1); + } + } else { + // Boolean indicating whether child visit marker was followed by child + // synthesized attribute(s) if any + bool followed_by_child_synthesized = false; + followed_by(current->cto_next, current->child_phase.ph, + current->child_phase.ch, true /* immediate */, + false /* not visit markers only*/, + &followed_by_child_synthesized); + + // Boolean indicating whether visit marker was followed by child + // inherited attribute(s) <-ph,ch> + bool preceded_by_child_inherited = prev_group->ph == -current_group->ph && + prev_group->ch == current_group->ch; + + // If graph contains then after visit should be group of + // Similarly, if graph contains <-ph,ch>, before the visit should be group of <-ph,ch> + if (aug_graph_contains_phase(aug_graph, state, cond, + current->child_phase.ph, + current->child_phase.ch) && + !followed_by_child_synthesized) { + fatal_error( + "[%s] After visit marker <%+d,%+d> the phase should be " + "(i.e. " + "<%+d,%+d>).", + aug_graph_name(aug_graph), current->child_phase.ph, + current->child_phase.ch, current->child_phase.ph, + current->child_phase.ch); + } else if (aug_graph_contains_phase(aug_graph, state, cond, + -current->child_phase.ph, + current->child_phase.ch) && + !preceded_by_child_inherited) { + fatal_error( + "[%s] Before visit marker <%+d,%+d> the phase should be <-ph,ch> " + "(i.e. <%+d,%+d>).", + aug_graph_name(aug_graph), current->child_phase.ph, + current->child_phase.ch, -current->child_phase.ph, + current->child_phase.ch); + } + } + } + + if (current_group->ch == -1) { + prev_parent = current_group; + } + + if (group_is_local(¤t->child_phase)) { + // Do not change the current group if instance is local + current_group = prev_group; + + // If instance is conditional then adjust the positive and negative + // conditions + if (if_rule_p(current->cto_instance->fibered_attr.attr)) { + int cmask = + 1 << (if_rule_index(current->cto_instance->fibered_attr.attr)); + + cond.positive |= cmask; + total_order_sanity_check(aug_graph, chunk_graph, current->cto_if_true, + cond, ¤t->child_phase, prev_parent, state); + cond.positive &= ~cmask; + + cond.negative |= cmask; + total_order_sanity_check(aug_graph, chunk_graph, current->cto_if_false, + cond, ¤t->child_phase, prev_parent, state); + cond.negative &= ~cmask; + return; + } + } + + // Continue the sanity check + total_order_sanity_check(aug_graph, chunk_graph, current->cto_next, cond, + current_group, prev_parent, state); +} + +/** + * Helper function to assert child visits are consecutive + * @param current head of total order linked list + * @param ph head of total order linked list + * @param ch head of total order linked list + */ +static void child_visit_consecutive_check(CTO_NODE* current, + AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + short ph, + short ch) { + if (current == NULL) + return; + + if (IS_VISIT_MARKER(current) && current->child_phase.ch == ch) { + if (current->child_phase.ph != ph) { + int current_visit_component_index = chunk_component_indexof( + aug_graph, chunk_graph, + &chunk_graph->instances.array[chunk_lookup( + aug_graph, chunk_graph, Visit, current->child_phase.ph, ch)]); + + int target_visit_component_index = chunk_component_indexof( + aug_graph, chunk_graph, + &chunk_graph->instances + .array[chunk_lookup(aug_graph, chunk_graph, Visit, ph, ch)]); + + if (current_visit_component_index != target_visit_component_index) { + fatal_error( + "Out of order child visits, expected visit(%d,%d) but " + "received visit(%d,%d)", + ph, ch, current->child_phase.ph, current->child_phase.ch); + } else { + aps_warning(current, + "Out of order child visits, expected visit(%d,%d) but " + "received visit(%d,%d)", + ph, ch, current->child_phase.ph, current->child_phase.ch); + } + } + + // Done with this phase. Now check the next phase of this child + child_visit_consecutive_check(current->cto_next, aug_graph, chunk_graph, + ph + 1, ch); + return; + } + + if (current->cto_if_true != NULL) { + child_visit_consecutive_check(current->cto_if_true, aug_graph, chunk_graph, + ph, ch); + } + + child_visit_consecutive_check(current->cto_next, aug_graph, chunk_graph, ph, + ch); +} + +/** + * This function asserts that visits for a particular child are consecutive + * @param aug_graph Augmented dependency graph + * @param state state + * @param head head of total order linked list + */ +static void child_visit_consecutive(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + TOTAL_ORDER_STATE* state, + CTO_NODE* head) { + int i; + for (i = 0; i < state->children.length; i++) { + child_visit_consecutive_check(head, aug_graph, chunk_graph, 1, i); + } +} + +/** + * This function asserts that there is a visit call for each phase of child + * @param aug_graph Augmented dependency graph + * @param instance_groups array of indexed by INSTANCE index + * @param head head of total order linked list + */ +static void child_visit_completeness(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + CTO_NODE* head) { + int i, j, k; + for (i = 0; i < state->children.length; i++) { + Declaration decl = state->children.array[i]; + PHY_GRAPH* phy_graph = DECL_PHY_GRAPH(decl); + if (phy_graph == NULL) + continue; + + int max_phase = phy_graph->max_phase; + + for (j = 1; j <= max_phase; j++) { + bool any = false; + followed_by(head, j, i, false /* not immediate */, + true /* visit markers only */, &any); + + if (!any) { + fatal_error( + "Missing child %d (%s) visit call for phase %d in %s aug_graph", i, + decl_name(decl), j, aug_graph_name(aug_graph)); + } + } + } +} + +/** + * This function asserts that no non-circular visit of a child is done in the + * circular visit of the parent. + * @param aug_graph Augmented dependency graph + * @param instance_groups array of indexed by INSTANCE index + * @param cto_node current linked list node + * @param parent_ph current parent visit phase + */ +static void check_circular_visit(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + CTO_NODE* cto_node, + short parent_ph) { + if (cto_node == NULL) + return; + + CHILD_PHASE group = cto_node->child_phase; + + if (IS_VISIT_MARKER(cto_node)) { + if (group.ch == -1) { + parent_ph += 1; + } else { + if (!group_is_local(&group)) { + PHY_GRAPH* parent_phy = DECL_PHY_GRAPH(aug_graph->lhs_decl); + PHY_GRAPH* child_phy = DECL_PHY_GRAPH(state->children.array[group.ch]); + + if (child_phy != NULL && parent_phy->cyclic_flags[parent_ph] && + !child_phy->cyclic_flags[group.ph]) { + fatal_error( + "Non-circular child visit marker <%+d,%+d> " + "cannot be scheduled in circular parent visit %d", + group.ph, group.ch, parent_ph); + } + } + } + } else { + if (!group_is_local(&group) && group.ch > -1) { + PHY_GRAPH* parent_phy = DECL_PHY_GRAPH(aug_graph->lhs_decl); + PHY_GRAPH* child_phy = DECL_PHY_GRAPH(state->children.array[group.ch]); + + char instance_to_str[BUFFER_SIZE]; + FILE* f = fmemopen(instance_to_str, sizeof(instance_to_str), "w"); + print_instance(cto_node->cto_instance, f); + fclose(f); + + if (parent_phy->cyclic_flags[parent_ph] && + !instance_in_aug_cycle(aug_graph, cto_node->cto_instance)) { + fatal_error( + "While investigating %s <%+d,%+d>: non-circular child instance " + "cannot be scheduled in circular parent visit %d", + instance_to_str, group.ph, group.ch, parent_ph); + } + } + } + + if (cto_node->cto_if_true != NULL) { + check_circular_visit(aug_graph, state, cto_node->cto_if_true, parent_ph); + } + + check_circular_visit(aug_graph, state, cto_node->cto_next, parent_ph); +} + +static void ensure_instances_are_in_one_visit(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + CTO_NODE* cto_node, + SCC_COMPONENT* comp, + int parent_ph) { + if (cto_node == NULL) + return; + + if (cto_node->cto_instance != NULL) { + int i; + for (i = 0; i < comp->length; i++) { + INSTANCE* in = (INSTANCE*)comp->array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + // Instance is in this SCC + if (in == cto_node->cto_instance) { + // We are just initializing the parent_ph to make sure parent_ph is the + // same for all instances + if (parent_ph == -1) { + parent_ph = cto_node->visit; + } else if (parent_ph != cto_node->visit) { + char instance_to_str[BUFFER_SIZE]; + FILE* f = fmemopen(instance_to_str, sizeof(instance_to_str), "w"); + print_instance(cto_node->cto_instance, f); + fclose(f); + + aps_warning( + NULL, + "Instance %s <%+d,%+d> of circular SCC involving parent should " + "be contained in one parent visit. Expected them all to be in " + "%d parent phase but saw instance in %d parent phase.", + instance_to_str, group->ph, group->ch, parent_ph, + cto_node->visit); + } + } + } + } + + if (cto_node->cto_if_true != NULL) { + ensure_instances_are_in_one_visit(aug_graph, state, cto_node->cto_if_true, + comp, parent_ph); + } + + ensure_instances_are_in_one_visit(aug_graph, state, cto_node->cto_next, comp, + parent_ph); +} + +static void check_circular_scc_scheduled_in_one_visit(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + CTO_NODE* cto_node) { + int i; + for (i = 0; i < aug_graph->components->length; i++) { + SCC_COMPONENT* comp = aug_graph->components->array[i]; + + if (scc_involves_parent(aug_graph, comp)) { + ensure_instances_are_in_one_visit(aug_graph, state, cto_node, comp, -1); + } + } +} + +static void validate_linked_list(AUG_GRAPH* aug_graph, + CTO_NODE* cto_node, + CTO_NODE* prev) { + if (cto_node == NULL) + return; + + if (cto_node->cto_prev != prev) { + fatal_error("Prev of CTO_NODE is not set correctly."); + return; + } + + if (cto_node->cto_next != NULL) { + if (cto_node->cto_next->cto_prev != cto_node) { + fatal_error("Next node's Prev is not set correctly."); + return; + } + } + + if (cto_node->cto_if_true != NULL) { + validate_linked_list(aug_graph, cto_node->cto_if_true, cto_node); + } + + validate_linked_list(aug_graph, cto_node->cto_next, cto_node); +} + +static void check_duplicates(AUG_GRAPH* aug_graph, + CTO_NODE* cto_node, + bool* schedule) { + if (cto_node == NULL) + return; + + if (cto_node->cto_instance != NULL) { + if (schedule[cto_node->cto_instance->index]) { + char instance_to_str[BUFFER_SIZE]; + FILE* f = fmemopen(instance_to_str, sizeof(instance_to_str), "w"); + print_instance(cto_node->cto_instance, f); + fclose(f); + + fatal_error("Instance %s (%d) is duplicated in linkedlist", + instance_to_str, cto_node->cto_instance->index); + return; + } else { + schedule[cto_node->cto_instance->index] = true; + } + } + + if (cto_node->cto_if_true != NULL) { + size_t schedule_size = aug_graph->instances.length * sizeof(bool); + bool* old_schedule = (bool*)alloca(schedule_size); + memcpy(old_schedule, schedule, schedule_size); + + check_duplicates(aug_graph, cto_node->cto_if_true, schedule); + + schedule = old_schedule; + } + + check_duplicates(aug_graph, cto_node->cto_next, schedule); +} + +/** + * Function that throws an error if phases are out of order + * @param aug_graph + * @param head head of linked list + * @param state current value of ph being investigated + */ +static void assert_total_order(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + TOTAL_ORDER_STATE* state, + CTO_NODE* head) { + // Condition #0: Ensure linkedlist prev, next are set correctly + validate_linked_list(aug_graph, head, NULL); + + // Condition #1: completeness of child visit calls + child_visit_completeness(aug_graph, state, head); + + CHILD_PHASE* parent_inherited_group = + (CHILD_PHASE*)alloca(sizeof(CHILD_PHASE)); + parent_inherited_group->ph = 1; + parent_inherited_group->ch = -1; + + CONDITION cond; + cond.positive = 0; + cond.negative = 0; + + // Condition #2: general sanity of total order using visit markers + total_order_sanity_check(aug_graph, chunk_graph, head, cond, + parent_inherited_group, parent_inherited_group, + state); + + // Condition #3: consecutive of child visit calls + child_visit_consecutive(aug_graph, chunk_graph, state, head); + + // Condition #4: ensure no non-circular visit of a child in the circular + // visit of the parent. + check_circular_visit(aug_graph, state, head, 1); + + // Condition #5: Ensure circular visit involving parent are done as one + // visit + check_circular_scc_scheduled_in_one_visit(aug_graph, state, head); + + size_t schedule_size = aug_graph->instances.length * sizeof(bool); + bool* schedule = (bool*)alloca(schedule_size); + memset(schedule, false, schedule_size); + + // Condition #6: Ensure no duplicate attribute instance is happening + check_duplicates(aug_graph, head, schedule); +} + +/** + * Utility function that schedules a single phase of all circular attributes + * @param phy_graph phylum graph + * @param ph phase its currently scheduling for + * @return number of nodes scheduled successfully for this phase + */ +static int schedule_phase_circular(PHY_GRAPH* phy_graph, int phase) { + int n = phy_graph->instances.length; + int i, j, k; + + for (i = 0; i < phy_graph->components->length; i++) { + SCC_COMPONENT* comp = phy_graph->components->array[i]; + + // Not a circular SCC + if (!phy_graph->component_cycle[i]) + continue; + + // This cycle is already scheduled + if (phy_graph->summary_schedule[((INSTANCE*)comp->array[0])->index]) + continue; + + size_t temp_schedule_size = n * sizeof(int); + int* temp_schedule = (int*)alloca(temp_schedule_size); + memcpy(temp_schedule, phy_graph->summary_schedule, temp_schedule_size); + + // Temporarily mark all attributes in this cycle as scheduled + for (j = 0; j < comp->length; j++) { + INSTANCE* in = (INSTANCE*)comp->array[j]; + temp_schedule[in->index] = 1; + } + + bool cycle_ready = true; + + for (j = 0; j < comp->length; j++) { + INSTANCE* in = (INSTANCE*)comp->array[j]; + for (k = 0; k < n; k++) { + // If there is an attribute that is not scheduled but it should + // come first then this cycle is not ready + if (!temp_schedule[k] && + phy_graph->mingraph[k * n + in->index] != no_dependency) { + cycle_ready = false; + } + } + } + + // If all attributes in this cycle are ready + if (cycle_ready) { + for (j = 0; j < comp->length; j++) { + INSTANCE* in = (INSTANCE*)comp->array[j]; + // -phase for inherited attributes + // +phase for synthesized attributes + phy_graph->summary_schedule[in->index] = + instance_direction(in) == instance_inward ? -phase : phase; + + if (oag_debug & TOTAL_ORDER) { + printf("%+d ", phy_graph->summary_schedule[in->index]); + print_instance(in, stdout); + printf(" [%s]\n", instance_direction(in) == instance_inward + ? "inherited" + : "synthesized"); + } + } + + return comp->length; + } + } + + return 0; +} + +/** + * Utility function that schedules a single phase of all non-circular + * attributes + * @param phy_graph phylum graph + * @param ph phase its currently scheduling for + * @param ignore_cycles ignore cycles + * @return number of nodes scheduled successfully for this phase + */ +static int schedule_phase_non_circular(PHY_GRAPH* phy_graph, int phase) { + int done = 0; + int n = phy_graph->instances.length; + int i, j; + + /* find inherited instances for the phase. */ + for (i = 0; i < n; i++) { + INSTANCE* in = &phy_graph->instances.array[i]; + if (instance_direction(in) == instance_inward && + !instance_in_phylum_cycle(phy_graph, in) && + phy_graph->summary_schedule[i] == 0) { + for (j = 0; j < n; ++j) { + if (phy_graph->summary_schedule[j] == 0 && + phy_graph->mingraph[j * n + i] != no_dependency) + break; + } + if (j == n) { + phy_graph->summary_schedule[i] = -phase; + done++; + for (j = 0; j < n; ++j) { + /* force extra dependencies */ + int sch = phy_graph->summary_schedule[j]; + if (sch != 0 && sch != -phase) { + phy_graph->mingraph[j * n + i] = indirect_control_dependency; + } + } + if (oag_debug & TOTAL_ORDER) { + printf("%d ", -phase); + print_instance(in, stdout); + printf(" [inherited]\n"); + } + } + } + } + + /* now schedule synthesized attributes */ + for (i = 0; i < n; i++) { + INSTANCE* in = &phy_graph->instances.array[i]; + if (instance_direction(in) == instance_outward && + !instance_in_phylum_cycle(phy_graph, in) && + phy_graph->summary_schedule[i] == 0) { + for (j = 0; j < n; ++j) { + if (phy_graph->summary_schedule[j] == 0 && + phy_graph->mingraph[j * n + i] != no_dependency) + break; + } + if (j == n) { + phy_graph->summary_schedule[i] = phase; + done++; + for (j = 0; j < n; ++j) { + /* force extra dependencies */ + int sch = phy_graph->summary_schedule[j]; + if (sch != 0 && sch != phase) { + phy_graph->mingraph[j * n + i] = indirect_control_dependency; + } + } + if (oag_debug & TOTAL_ORDER) { + printf("+%d ", phase); + print_instance(in, stdout); + printf(" [synthesized]\n"); + } + } + } + } + + return done; +} + +/** + * Utility function that calculates ph (phase) for each attribute of a phylum + * - Note that phases always should start and end with non-circular and there should be + * empty non-circular between two circular phases. + * - By this point, fiber cycles have been broken by fiber cycle logic (up/down) + * @param phy_graph phylum graph to schedule + */ +static void schedule_summary_dependency_graph(PHY_GRAPH* phy_graph) { + int n = phy_graph->instances.length; + int phase = 1; + int done = 0; + bool cont = true; + + if (oag_debug & TOTAL_ORDER) { + printf("Scheduling order for %s\n", decl_name(phy_graph->phylum)); + } + + // Find SCC components of instances given a phylum graph + set_phylum_graph_components(phy_graph); + + // Nothing is scheduled + memset(phy_graph->summary_schedule, 0, n * sizeof(int)); + + int i, j; + int phase_count = n + 1; + size_t phase_size_bool = phase_count * sizeof(bool); + size_t phase_size_int = phase_count * sizeof(int); + + bool* circular_phase = (bool*)HALLOC(phase_size_bool); + memset(circular_phase, false, phase_size_bool); + + bool* empty_phase = (bool*)HALLOC(phase_size_bool); + memset(empty_phase, false, phase_size_bool); + + // Hold on to the flag indicating whether phase is circular or not + phy_graph->cyclic_flags = circular_phase; + phy_graph->empty_phase = empty_phase; + + int count_non_circular = 0, count_circular = 0; + bool cycle_happened = false; + + do { + // Schedule non-circular attributes in this phase + count_non_circular = schedule_phase_non_circular(phy_graph, phase); + if (count_non_circular) { + done += count_non_circular; + circular_phase[phase] = false; + phy_graph->max_phase = phase; + phase++; + cycle_happened = false; + + if (oag_debug & TOTAL_ORDER) { + printf("^^^ non-circular\n"); + } + continue; + } else if (cycle_happened || phase == 1) { + if (oag_debug & TOTAL_ORDER) { + printf("^^^ empty non-circular phase: %d\n", phase); + } + + // Add an empty phase between circular and non-circular + circular_phase[phase] = false; + // Mark this phase as empty + empty_phase[phase] = true; + phy_graph->max_phase = phase; + phase++; + cycle_happened = false; + } + + // Schedule circular attributes in this phase + count_circular = schedule_phase_circular(phy_graph, phase); + if (count_circular) { + done += count_circular; + circular_phase[phase] = true; + cycle_happened = true; + phy_graph->max_phase = phase; + phase++; + + if (oag_debug & TOTAL_ORDER) { + printf("^^^ circular\n"); + } + continue; + } else { + cycle_happened = false; + } + } while (count_non_circular || count_circular); + + // Ensure there is a non-circular at the end if its done scheduling + if (done == n && cycle_happened) + { + if (oag_debug & TOTAL_ORDER) { + printf("^^^ empty non-circular phase: %d\n", phase); + } + + // Add an empty phase between circular and non-circular + circular_phase[phase] = false; + // Mark this phase as empty + empty_phase[phase] = true; + phy_graph->max_phase = phase; + phase++; + cycle_happened = false; + } + + if (oag_debug & TOTAL_ORDER) { + printf("\n"); + } + + if (done < n) { + if (cycle_debug & PRINT_CYCLE) { + printf("Failed to schedule phylum graph for: %s\n", + phy_graph_name(phy_graph)); + for (i = 0; i < n; i++) { + INSTANCE* in = &phy_graph->instances.array[i]; + int s = phy_graph->summary_schedule[i]; + print_instance(in, stdout); + switch (instance_direction(in)) { + case instance_local: + printf(" (a local attribute!) "); + break; + case instance_inward: + printf(" inherited "); + break; + case instance_outward: + printf(" synthesized "); + break; + default: + printf(" (garbage direction!) "); + break; + } + if (s != 0) { + printf(": phase %+d\n", s); + } else { + printf(" depends on "); + for (j = 0; j < n; j++) { + if (phy_graph->summary_schedule[j] == 0 && + phy_graph->mingraph[j * n + i] != no_dependency) { + INSTANCE* in2 = &phy_graph->instances.array[j]; + print_instance(in2, stdout); + if (phy_graph->mingraph[j * n + i] == fiber_dependency) + printf("(?)"); + putc(' ', stdout); + } + } + putc('\n', stdout); + } + } + } + + fatal_error("Cycle detected when scheduling phase %d for %s", phase, + decl_name(phy_graph->phylum)); + } +} + +/** + * Utility function to print indent with single space character + * @param indent indent count + * @param stream output stream + */ +static void print_indent(int count, FILE* stream) { + while (count-- >= 0) { + fprintf(stream, " "); + } +} + +/** + * Utility function to print the static schedule + * @param cto CTO node + * @param indent current indent count + * @param stream output stream + */ +static void print_total_order(AUG_GRAPH* aug_graph, + CTO_NODE* cto, + int chunk_index, + bool chunk_circular, + TOTAL_ORDER_STATE* state, + int indent, + FILE* stream) { + if (cto == NULL) { + if (oag_debug & DEBUG_ORDER_VERBOSE) { + fprintf(stream, " Finished scheduling [%s] Chunk #%d\n\n", + chunk_circular ? "circular" : "non-circular", chunk_index); + } + return; + } + bool extra_newline = false; + bool print_group = true; + + if (cto->chunk_index != chunk_index) { + if (chunk_index != -1) { + if (oag_debug & DEBUG_ORDER_VERBOSE) { + fprintf(stream, " Finished scheduling [%s] Chunk #%d\n\n", + cto->chunk_circular ? "circular" : "non-circular", chunk_index); + } + } + + chunk_index = cto->chunk_index; + chunk_circular = cto->chunk_circular; + if (oag_debug & DEBUG_ORDER_VERBOSE) { + fprintf(stream, " Started scheduling [%s] Chunk #%d\n", + cto->chunk_circular ? "circular" : "non-circular", chunk_index); + } + } + + if (cto->cto_instance == NULL) { + print_indent(indent, stream); + if (cto->child_phase.ch != -1) { + PHY_GRAPH* child_phy_graph = + DECL_PHY_GRAPH(state->children.array[cto->child_phase.ch]); + fprintf(stream, "-> [%s] ", + child_phy_graph->empty_phase[cto->child_phase.ph] + ? "empty visit" + : "none empty visit"); + } else { + fprintf(stream, "<- "); + } + fprintf(stream, "visit marker (%d) ", cto->visit); + if (cto->child_decl == NULL && cto->child_phase.ch != -1) { + fatal_error("Missing child_decl for visit marker <%+d,%+d>.", + cto->child_phase.ph, cto->child_phase.ch); + } + if (cto->child_decl != NULL) { + fprintf(stream, " (%s) ", decl_name(cto->child_decl)); + } else { + extra_newline = true; + } + } else { + print_indent(indent, stream); + print_instance(cto->cto_instance, stream); + CONDITION cond = instance_condition(cto->cto_instance); + fprintf(stream, " (visit: %d, component: %d, circular: %s, lineno: %d)", cto->visit, + cto->chunk_index, + cto->chunk_circular ? "circular" : "non-circular", + tnode_line_number(cto->cto_instance->fibered_attr.attr)); + + if (if_rule_p(cto->cto_instance->fibered_attr.attr)) { + print_group = false; + } + } + + if (print_group) { + fprintf(stream, " <%+d,%+d>", cto->child_phase.ph, cto->child_phase.ch); + } + + fprintf(stream, "\n"); + if (extra_newline) { + fprintf(stream, "\n"); + } + + if (cto->cto_if_true != NULL) { + print_indent(indent + 1, stream); + fprintf(stream, "(true)\n"); + print_total_order(aug_graph, cto->cto_if_true, chunk_index, chunk_circular, + state, indent + 2, stdout); + print_indent(indent + 1, stream); + fprintf(stream, "(false)\n"); + indent += 2; + } + + print_total_order(aug_graph, cto->cto_next, chunk_index, chunk_circular, + state, indent, stdout); +} + +/** + * Utility function to determine if instance is local or not + * @param aug_graph Augmented dependency graph + * @param instance_groups array of + * @param i instance index to test + * @return boolean indicating if instance is local + */ +static bool instance_is_local(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + const int i) { + CHILD_PHASE* group = &state->instance_groups[i]; + return group_is_local(group); +} + +/** + * Utility function to check if two child phase are equal + * @param group_key1 first child phase struct + * @param group_key2 second child phase struct + * @return boolean indicating if two child phase structs are equal + */ +static bool child_phases_are_equal(CHILD_PHASE* group_key1, + CHILD_PHASE* group_key2) { + return group_key1->ph == group_key2->ph && group_key1->ch == group_key2->ch; +} + +/** + * Returns the rank of a chunk (higher the rank equals the higher the priority). + * The rank will be used to schedule a chunk from list of ready-to-go chunks. + * @param chunk chunk instance + * @return int rank + */ +static int get_chunk_rank(Chunk* chunk) { + switch (chunk->type) { + case HalfLeft: + return 8; + case HalfRight: + return 2; + case Visit: + case Local: + return 4; + default: { + fatal_error("Unknown chunk type %d.", chunk->type); + return 0; + } + } +} + +/** + * Utility function to get children of augmented dependency graph as array + * of declarations + * @param aug_graph Augmented dependency graph + * @param state State + */ +static void set_aug_graph_children(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state) { + Declaration* children_arr = NULL; + int children_count = 0; + + Declaration current; + for (current = aug_graph->first_rhs_decl; current != NULL; + current = DECL_NEXT(current)) { + children_count++; + } + + int i = 0; + children_arr = (Declaration*)HALLOC(sizeof(Declaration) * children_count); + for (current = aug_graph->first_rhs_decl; current != NULL; + current = DECL_NEXT(current)) { + children_arr[i++] = current; + } + + // Assign children vector array + state->children.array = children_arr; + state->children.length = children_count; +} + +static void print_chunk_info(Chunk* chunk) { + switch (chunk->type) { + case HalfLeft: { + printf("Parent inherited for phase: %d", chunk->ph); + break; + } + case HalfRight: { + printf("Parent synthesized for phase: %d", chunk->ph); + break; + } + case Visit: { + printf("Child %d %s%s visit for phase: %d", chunk->ch, + chunk->instances.length ? "" : "empty ", + chunk->circular ? "circular" : "non-circular", chunk->ph); + break; + } + case Local: { + printf("Local %sattribute", + if_rule_p(chunk->instances.array[0].fibered_attr.attr) + ? "conditional " + : ""); + break; + } + } +} + +static void print_chunk(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state, + Chunk* chunk, + int indent) { + int i; + print_indent(indent, stdout); + printf("index: %d [%s] ", chunk->index, + chunk->circular ? "circular" : "non-circular"); + + print_chunk_info(chunk); + printf("\n"); + + for (i = 0; i < chunk->instances.length; i++) { + INSTANCE* in = &chunk->instances.array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + print_indent(indent + 1, stdout); + print_instance(in, stdout); + printf(" <%+d,%+d>\n", group->ph, group->ch); + } + printf("\n"); +} + +/** + * Utility function to handle transitions between groups while scheduling + * cycles + * @param aug_graph Augmented dependency graph + * @param comp SCC component + * @param comp_index component index + * @param prev previous CTO node + * @param cond current CONDITION + * @param instance_groups array of indexed by INSTANCE index + * @param remaining count of remaining instances to schedule + * @param group parent group key + * @return head of linked list + */ +static CTO_NODE* schedule_transition_end_of_group( + AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + CHILD_PHASE* group, + const short parent_ph) { + CTO_NODE* cto_node; + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Starting schedule_transition_end_of_group (%s) with " + "(remaining: %d, group: " + "<%+d,%+d>, parent_ph: %d)\n", + aug_graph_name(aug_graph), remaining, group->ph, group->ch, parent_ph); + } + + // Child visit marker + if (group->ph < 0 && group->ch != -1) { + cto_node = (CTO_NODE*)HALLOC(sizeof(CTO_NODE)); + cto_node->cto_prev = prev; + cto_node->cto_instance = NULL; + cto_node->child_phase.ph = -group->ph; + cto_node->child_phase.ch = group->ch; + cto_node->child_decl = state->children.array[group->ch]; + cto_node->visit = parent_ph; + cto_node->chunk_index = chunk->index; + cto_node->chunk_circular = chunk->circular; + cto_node->cto_next = group_schedule( + aug_graph, chunk_graph, chunk_component, chunk_component_index, chunk, + cto_node, cond, state, remaining, &cto_node->child_phase, parent_ph); + return cto_node; + } + + // End of parent visit marker + if (group->ph > 0 && group->ch == -1) { + cto_node = (CTO_NODE*)HALLOC(sizeof(CTO_NODE)); + cto_node->cto_prev = prev; + cto_node->cto_instance = NULL; + cto_node->child_phase.ph = group->ph; + cto_node->child_phase.ch = -1; + cto_node->child_decl = NULL; + cto_node->visit = parent_ph; + cto_node->chunk_index = chunk->index; + cto_node->chunk_circular = chunk->circular; + cto_node->cto_next = + schedule_visit_start(aug_graph, chunk_graph, cto_node, cond, state, + remaining, parent_ph + 1); + return cto_node; + } + + // Fallback to normal scheduler to find the next chunk within the chunk + // component to schedule + return chunk_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, prev, cond, state, + remaining /* no change */, parent_ph); +} + +/** + * Greedy group scheduler + * @param aug_graph Augmented dependency graph + * @param comp SCC component + * @param comp_index component index + * @param prev previous CTO node + * @param cond current CONDITION + * @param state state + * @param remaining count of remaining instances to schedule + * @param group group currently getting scheduled + * @return head of linked list + */ +static CTO_NODE* group_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + CHILD_PHASE* group, + const short parent_ph) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Starting group_schedule (%s) with (remaining: %d, group: " + "<%+d,%+d>, parent_ph: %d)\n", + aug_graph_name(aug_graph), remaining, group->ph, group->ch, parent_ph); + } + + int i; + CTO_NODE* cto_node = prev; + + /* Outer condition is impossible, it's a dead-end branch */ + if (CONDITION_IS_IMPOSSIBLE(cond)) { + return schedule_visit_end(aug_graph, chunk_graph, chunk, prev, cond, state, + remaining, group, parent_ph); + } + + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* instance = &aug_graph->instances.array[i]; + CHILD_PHASE* instance_group = &state->instance_groups[instance->index]; + + // Already scheduled + if (aug_graph->schedule[instance->index]) + continue; + + // Check if everything is in the same group, do not check for dependencies + // Locals will never end-up in this function + if (child_phases_are_equal(instance_group, group)) { + cto_node = (CTO_NODE*)HALLOC(sizeof(CTO_NODE)); + cto_node->cto_prev = prev; + cto_node->cto_instance = instance; + cto_node->child_phase.ph = group->ph; + cto_node->child_phase.ch = group->ch; + cto_node->visit = parent_ph; + cto_node->chunk_index = chunk->index; + cto_node->chunk_circular = chunk->circular; + + // instance has been scheduled (and will not be + // considered for scheduling in the recursive call) + aug_graph->schedule[instance->index] = true; + + chunk_graph->schedule[chunk_indexof(aug_graph, chunk_graph, instance)] = + true; + + // If it is local then sometime is wrong + if (instance_is_local(aug_graph, state, instance->index)) { + fatal_error( + "Group scheduler cannot handle scheduling local attributes"); + return NULL; + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf("-> Scheduled via group scheduler (instance: "); + print_instance(instance, stdout); + printf( + ", group: <%+d,%+d>, cond: (%+d,%+d), inst_cond: (%+d,%+d), " + "remaining: %d)\n\n", + group->ph, group->ch, cond.positive, cond.negative, + instance_condition(instance).positive, + instance_condition(instance).negative, remaining - 1); + } + + if (MERGED_CONDITION_IS_IMPOSSIBLE(cond, instance_condition(instance))) { + cto_node = group_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, prev, cond, + state, remaining - 1, group, parent_ph); + } else { + cto_node->cto_next = group_schedule( + aug_graph, chunk_graph, chunk_component, chunk_component_index, + chunk, cto_node, cond, state, remaining - 1, group, parent_ph); + } + + chunk_graph->schedule[chunk_indexof(aug_graph, chunk_graph, instance)] = + false; // Release it + + aug_graph->schedule[instance->index] = false; + + return cto_node; + } + } + + chunk_graph->schedule[chunk->index] = true; + + // Group scheduling is finished + cto_node = schedule_transition_end_of_group( + aug_graph, chunk_graph, chunk_component, chunk_component_index, chunk, + cto_node, cond, state, remaining, group, parent_ph); + + chunk_graph->schedule[chunk->index] = false; + + return cto_node; +} + +/** + * @brief local chunk scheduler. + * This function is similar to group_schedule except it can handle IF + * conditionals which are locals too. + */ +static CTO_NODE* local_chunk_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + CHILD_PHASE* group, + const short parent_ph) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Starting local_chunk_schedule (%s) with (remaining: %d, group: " + "<%+d,%+d>, parent_ph: %d)\n", + aug_graph_name(aug_graph), remaining, group->ph, group->ch, parent_ph); + } + + int i; + CTO_NODE* cto_node = prev; + + /* Outer condition is impossible, it's a dead-end branch */ + if (CONDITION_IS_IMPOSSIBLE(cond)) { + return schedule_visit_end(aug_graph, chunk_graph, chunk, prev, cond, state, + remaining, group, parent_ph); + } + + for (i = 0; i < chunk->instances.length; i++) { + INSTANCE* instance = &chunk->instances.array[i]; + CHILD_PHASE* instance_group = &state->instance_groups[instance->index]; + + // Already scheduled + if (aug_graph->schedule[instance->index]) + continue; + + // Check if everything is in the same group, do not check for dependencies + // Locals will never end-up in this function + if (child_phases_are_equal(instance_group, group)) { + cto_node = (CTO_NODE*)HALLOC(sizeof(CTO_NODE)); + cto_node->cto_prev = prev; + cto_node->cto_instance = instance; + cto_node->child_phase.ph = group->ph; + cto_node->child_phase.ch = group->ch; + cto_node->visit = parent_ph; + cto_node->chunk_index = chunk->index; + cto_node->chunk_circular = chunk->circular; + + // instance has been scheduled (and will not be + // considered for scheduling in the recursive call) + aug_graph->schedule[instance->index] = true; + + chunk_graph->schedule[chunk_indexof(aug_graph, chunk_graph, instance)] = + true; + + // If it is local then something is wrong + if (!instance_is_local(aug_graph, state, instance->index)) { + fatal_error("Local chunk scheduler can only handle local attributes"); + return NULL; + } + + if (MERGED_CONDITION_IS_IMPOSSIBLE(cond, instance_condition(instance))) { + cto_node = local_chunk_schedule( + aug_graph, chunk_graph, chunk_component, chunk_component_index, + chunk, prev, cond, state, remaining - 1, group, parent_ph); + } else { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf("-> Scheduled via local scheduler (instance: "); + print_instance(instance, stdout); + printf( + ", group: <%+d,%+d>, cond: (%+d,%+d), inst_cond: (%+d,%+d), " + "remaining: %d)\n\n", + group->ph, group->ch, cond.positive, cond.negative, + instance_condition(instance).positive, + instance_condition(instance).negative, remaining - 1); + } + + if (if_rule_p(instance->fibered_attr.attr)) { + int cmask = 1 << (if_rule_index(instance->fibered_attr.attr)); + + cond.positive |= cmask; + cto_node->cto_if_true = local_chunk_schedule( + aug_graph, chunk_graph, chunk_component, chunk_component_index, + chunk, cto_node, cond, state, remaining - 1, group, parent_ph); + cond.positive &= ~cmask; + + cond.negative |= cmask; + cto_node->cto_if_false = local_chunk_schedule( + aug_graph, chunk_graph, chunk_component, chunk_component_index, + chunk, cto_node, cond, state, remaining - 1, group, parent_ph); + cond.negative &= ~cmask; + + } else { + cto_node->cto_next = local_chunk_schedule( + aug_graph, chunk_graph, chunk_component, chunk_component_index, + chunk, cto_node, cond, state, remaining - 1, group, parent_ph); + } + } + + chunk_graph->schedule[chunk_indexof(aug_graph, chunk_graph, instance)] = + false; // Release it + + aug_graph->schedule[instance->index] = false; + + return cto_node; + } + } + + // Group scheduling is finished + return schedule_transition_end_of_group( + aug_graph, chunk_graph, chunk_component, chunk_component_index, chunk, + cto_node, cond, state, remaining, group, parent_ph); +} + +static CTO_NODE* group_schedule_chunk(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + const short parent_ph) { + switch (chunk->type) { + case HalfLeft: { + CHILD_PHASE* group = (CHILD_PHASE*)alloca(sizeof(CHILD_PHASE)); + group->ch = -1; + group->ph = -chunk->ph; + + return group_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, prev, cond, state, + remaining, group, parent_ph); + } + case HalfRight: { + CHILD_PHASE* group = (CHILD_PHASE*)alloca(sizeof(CHILD_PHASE)); + group->ch = -1; + group->ph = chunk->ph; + + return group_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, prev, cond, state, + remaining, group, parent_ph); + } + case Visit: { + CHILD_PHASE* group = (CHILD_PHASE*)alloca(sizeof(CHILD_PHASE)); + group->ch = chunk->ch; + group->ph = -chunk->ph; + + return group_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, prev, cond, state, + remaining, group, parent_ph); + } + case Local: { + CHILD_PHASE* group = (CHILD_PHASE*)alloca(sizeof(CHILD_PHASE)); + group->ch = chunk->ch; + group->ph = chunk->ph; + + return local_chunk_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, prev, cond, + state, remaining, group, parent_ph); + } + default: { + fatal_error("Unknown chunk type."); + return NULL; + } + } + + return NULL; +} + +/** + * @brief Scheduler that finds the next chunk in a given SCC to schedule + */ +static CTO_NODE* chunk_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* chunk_component, + const int chunk_component_index, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + const short parent_ph) { + size_t rank_array_size = chunk_component->length * sizeof(int); + int* rank_array = (int*)alloca(rank_array_size); + memset(rank_array, (int)0, rank_array_size); + + int i, j; + + // Just an assertion to make sure all instances in the chunks marked as + // scheduled have actually been scheduled + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk = &chunk_graph->instances.array[i]; + + if (chunk_graph->schedule[chunk->index]) + continue; + + for (j = 0; j < chunk->instances.length; j++) { + INSTANCE* in = &chunk->instances.array[j]; + + if (!aug_graph->schedule[in->index]) + break; + } + + if (chunk->instances.length > 0 && j == chunk->instances.length) { + aps_warning( + chunk, + "Chunk #%d was marked as not scheduled but all instances in this " + "non-empty chunk are already scheduled.", + chunk->index); + } + } + + // Find out which chunk(s) are available to schedule and determine their rank + PHY_GRAPH* parent_phy = DECL_PHY_GRAPH(aug_graph->lhs_decl); + for (i = 0; i < chunk_component->length; i++) { + Chunk* chunk = (Chunk*)chunk_component->array[i]; + + if (chunk->type == Visit && !chunk->circular && + parent_phy->cyclic_flags[parent_ph]) { + aps_warning( + chunk, + "Manually prevented scheduling non-circular visit <%+d,%+d> in " + "circular parent phase %d in %s augmented dependency graph", + chunk->ch, chunk->ph, parent_ph, aug_graph_name(aug_graph)); + continue; + } + + if (chunk_graph->schedule[chunk->index] || + !chunk_ready_to_go(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, state)) { + continue; + } + + int rank = get_chunk_rank(chunk); + rank_array[i] = rank; + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Finding next chunk to schedule from list of available chunks in " + "component #%d of %s augmented dependency graph:\n", + chunk_component_index, aug_graph_name(aug_graph)); + } + + int current_rank = 0; + int max_rank_index = 0; + for (i = 0; i < chunk_component->length; i++) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + if (rank_array[i] > 0) { + Chunk* chunk = (Chunk*)chunk_component->array[i]; + + print_indent(2, stdout); + printf("Rank=%d ", rank_array[i]); + print_chunk_info(chunk); + printf("\n"); + } + } + + if (rank_array[i] > current_rank) { + max_rank_index = i; + current_rank = rank_array[i]; + } + } + + // If no chunk ready to go, then trigger component scheduler + if (current_rank == 0) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "-> Found no chunk to schedule, moving on to component scheduler.\n"); + } + + return chunk_component_schedule(aug_graph, chunk_graph, prev, cond, state, + chunk_component_index, remaining, + parent_ph); + } else { + // Continue the group scheduler from the chunk + Chunk* chunk_to_schedule_next = chunk_component->array[max_rank_index]; + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + print_indent(1, stdout); + printf("Next chunk to schedule rank=%d ", current_rank); + print_chunk_info(chunk_to_schedule_next); + printf("\n"); + } + + return group_schedule_chunk(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk_to_schedule_next, + prev, cond, state, remaining, parent_ph); + } +} + +/** + * @brief Utility function that returns index of given chunk + */ +static Chunk* find_chunk(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + enum ChunkTypeEnum type, + const int ph, + const int ch) { + int i; + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk = &chunk_graph->instances.array[i]; + if (chunk->type == type && chunk->ph == ph && chunk->ch == ch) { + return chunk; + } + } + return NULL; +} + +/** + * @brief Utility function that returns index of belonging SCC of chunk + */ +static int chunk_indexof(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + INSTANCE* instance) { + int i, j; + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk = &chunk_graph->instances.array[i]; + + for (j = 0; j < chunk->instances.length; j++) { + INSTANCE* other_instance = &chunk->instances.array[j]; + + if (instance->index == other_instance->index) { + return chunk->index; + } + } + } + + fatal_error( + "Failed to find chunk with instance index %d in augmented " + "dependency graph %s.", + instance->index, aug_graph_name(aug_graph)); + + return -1; +} + +/** + * @brief Utility function that returns index of belonging SCC of chunk + */ +static int chunk_component_indexof(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + Chunk* chunk) { + int i, j; + for (i = 0; i < chunk_graph->chunk_components->length; i++) { + SCC_COMPONENT* component = chunk_graph->chunk_components->array[i]; + + for (j = 0; j < component->length; j++) { + Chunk* other_chunk = component->array[j]; + + if (chunk->index == other_chunk->index) { + return i; + } + } + } + + fatal_error( + "Failed to find the SCC component of chunk with index %d in augmented " + "dependency graph %s.", + chunk->index, aug_graph_name(aug_graph)); + + return -1; +} + +/** + * @brief Returns the index of a chunk with given properties or -1 + */ +static int chunk_lookup(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + enum ChunkTypeEnum type, + short ph, + short ch) { + int i; + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk = &chunk_graph->instances.array[i]; + + if (chunk->type == type && chunk->ch == ch && chunk->ph == ph) { + return i; + } + } + + fatal_error( + "Failed to find chunk in augmented " + "dependency graph %s containing chunk with ph=%d, ch=%d and type=%d.", + aug_graph_name(aug_graph), ph, ch, type); + + return -1; +} + +static void debug_chunk_dependencies(ChunkGraph* chunk_graph, + SCC_COMPONENT* sink_component, + Chunk* sink_chunk) { + int i, j; + + int* chunk_dependencies = + (int*)alloca(sink_component->length * sizeof(int) * 2); + int count = 0; + chunk_dependencies[count++] = sink_chunk->index; + + Chunk* current_sink_chunk = sink_chunk; + + j = 0; + while (true) { + for (i = 0; i < sink_component->length; i++) { + Chunk* source_chunk = sink_component->array[i]; + + if (source_chunk->index == current_sink_chunk->index) + continue; + + if (chunk_graph->schedule[source_chunk->index]) + continue; + + DEPENDENCY forward_dep = + chunk_graph + ->graph[source_chunk->index * chunk_graph->instances.length + + current_sink_chunk->index]; + + if (!forward_dep || !(forward_dep & DEPENDENCY_MAYBE_DIRECT)) + continue; + + chunk_dependencies[count++] = source_chunk->index; + current_sink_chunk = source_chunk; + break; + } + + // If we are back to the chunk we started, then stop + if (current_sink_chunk == sink_chunk) + break; + + // If we have visited all chunks inside the component then we have found all + // the sink_chunk dependencies, then stop + if (i == sink_component->length) + break; + + // If number of dependencies of the sink_chunk is greater than count of + // components, then we are obviously in a cycle, then stop + if (j > sink_component->length) { + aps_warning(sink_chunk, + "Number of dependencies of the chunk #%d is more than number " + "of chunks in the component, there is a nested cycle.", + sink_chunk->index); + break; + } + + j++; + } + + for (i = 0; i < count; i++) { + print_indent(i, stdout); + printf("<-"); + printf("Chunk #%d (", chunk_dependencies[i]); + print_chunk_info(&chunk_graph->instances.array[chunk_dependencies[i]]); + printf(")\n"); + } + + printf("\n"); + + if (current_sink_chunk == sink_chunk) { + printf("\n"); + aps_warning(sink_chunk, + "Debugging chunk dependency sequence failed because chunk #%d " + "directly depends on itself", + sink_chunk->index); + } +} + +/** + * Utility function that checks whether two chunks of a SCC component + * are in direct dependency cycle + * @param aug_graph augmented dependency graph + * @param chunk_graph chunk graph + * @param component SCC component to investigate + * @param chunk1 first chunk to check if it is in a direct dependency cycle + * @param chunk2 second chunk to check if it is in a direct dependency cycle + * @param state scheduling state + * @return boolean indicating whether chunk1 and chunk2 are in a direct dependency cycle + */ +static bool chunks_are_in_direct_cycle(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* component, + Chunk* chunk1, + Chunk* chunk2, + TOTAL_ORDER_STATE* state) { + int i, j; + int n = component->length; + + SccGraph scc_graph; + scc_graph_initialize(&scc_graph, n); + + // Add vertices to SCC graph + for (i = 0; i < n; i++) { + Chunk* chunk = (Chunk*)component->array[i]; + scc_graph_add_vertex(&scc_graph, (void*)chunk); + } + + // Add edges to SCC graph + for (i = 0; i < n; i++) { + Chunk* source_chunk = (Chunk*)component->array[i]; + for (j = 0; j < n; j++) { + Chunk* sink_chunk = (Chunk*)component->array[j]; + if (chunk_graph + ->graph[source_chunk->index * chunk_graph->instances.length + + sink_chunk->index] & + DEPENDENCY_MAYBE_DIRECT) { + scc_graph_add_edge(&scc_graph, source_chunk, sink_chunk); + } + } + } + + SCC_COMPONENTS* components = scc_graph_components(&scc_graph); + + scc_graph_destroy(&scc_graph); + + for (i = 0; i < components->length; i++) { + SCC_COMPONENT* comp = components->array[i]; + + if (comp->length > 1 && + scc_component_contains(comp, chunk1) && + scc_component_contains(comp, chunk2)) { + return true; + } + } + + return false; +} + +/** + * @brief Utility function that return boolean indicating whether single chunk + * is ready to go + */ +static bool chunk_ready_to_go(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* sink_component, + const int sink_component_index, + Chunk* sink_chunk, + TOTAL_ORDER_STATE* state) { + int i, j, k; + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf("Checking readiness of chunk #%d (", sink_chunk->index); + print_chunk_info(sink_chunk); + printf(") of %s augmented dependency graph\n", aug_graph_name(aug_graph)); + print_indent(1, stdout); + print_chunk(aug_graph, state, sink_chunk, 1); + } + + for (i = 0; i < sink_chunk->instances.length; i++) { + INSTANCE* source_instance = &sink_chunk->instances.array[i]; + if (!aug_graph->schedule[source_instance->index]) + break; + } + + if (chunk_graph->graph[sink_chunk->index * chunk_graph->instances.length + + sink_chunk->index] & + DEPENDENCY_MAYBE_DIRECT) { + aps_warning(sink_chunk, + "Scheduling chunk #%d of component #%d may not be possible as " + "it directly depends on itself.", + sink_chunk->index, sink_component_index); + } + + // All instances in this chunk have already been scheduled + if (sink_chunk->instances.length > 0 && i == sink_chunk->instances.length) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf(" All instances in this non-empty chunk have been schedule."); + } + return false; + } + + for (i = 0; i < sink_component->length; i++) { + Chunk* source_chunk = (Chunk*)sink_component->array[i]; + + // Check dependencies against myself + if (source_chunk->index == sink_chunk->index) + continue; + + // Already scheduled + if (chunk_graph->schedule[source_chunk->index]) + continue; + + DEPENDENCY forward_dep = + chunk_graph->graph[source_chunk->index * chunk_graph->instances.length + + sink_chunk->index]; + + DEPENDENCY backward_dep = + chunk_graph->graph[sink_chunk->index * chunk_graph->instances.length + + source_chunk->index]; + + if (!forward_dep || !(forward_dep & DEPENDENCY_MAYBE_DIRECT)) + continue; + + // Cannot check dependency maybe direct against a chunk that we are in cycle + // with kind = direct, it would be a cycle + if ((forward_dep & DEPENDENCY_MAYBE_DIRECT) && + (backward_dep & DEPENDENCY_MAYBE_DIRECT)) { + aps_warning(sink_chunk, + "Bi-directional direct dependency between chunk %d -> %d " + "(forward_dep=%d, backward_dep=%d)", + source_chunk->index, sink_chunk->index, forward_dep, + backward_dep); + continue; + } + + // If chunks are in a direct dependency cycle then we cannot check readiness + // using DEPENDENCY_MAYBE_DIRECT because it would result in a cycle. + if (chunks_are_in_direct_cycle(aug_graph, chunk_graph, sink_component, + source_chunk, sink_chunk, state)) { + aps_warning(sink_chunk, + "Detected direct dependency cycle between chunk %d -> %d " + "(forward_dep=%d, backward_dep=%d)", + source_chunk->index, sink_chunk->index, forward_dep, + backward_dep); + continue; + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Chunk #%d within component %d not ready because of direct " + "dependency (kind=%d) from chunk #%d (", + sink_chunk->index, sink_component_index, forward_dep, + source_chunk->index); + print_chunk_info(source_chunk); + printf(")\n"); + + debug_chunk_dependencies(chunk_graph, sink_component, sink_chunk); + printf("\n"); + + for (j = 0; j < source_chunk->instances.length; j++) { + INSTANCE* source_instance = &source_chunk->instances.array[j]; + CHILD_PHASE* source_group = + &state->instance_groups[source_instance->index]; + + if (aug_graph->schedule[source_instance->index]) + continue; + + for (k = 0; k < sink_chunk->instances.length; k++) { + INSTANCE* sink_instance = &sink_chunk->instances.array[k]; + CHILD_PHASE* sink_group = + &state->instance_groups[sink_instance->index]; + + if (MERGED_CONDITION_IS_IMPOSSIBLE( + instance_condition(source_instance), + instance_condition(sink_instance))) + continue; + + // See if there is a direct dependency between instance in this + // component and everyone else within the SCC of this component + DEPENDENCY dep = edgeset_kind( + aug_graph + ->graph[source_instance->index * aug_graph->instances.length + + sink_instance->index]); + if (dep & DEPENDENCY_MAYBE_DIRECT) { + print_indent(1, stdout); + printf("// "); + print_instance(source_instance, stdout); + printf(" <%+d,%+d> -> ", source_group->ph, source_group->ch); + print_instance(sink_instance, stdout); + printf(" <%+d,%+d> (kind=%d)\n", sink_group->ph, sink_group->ch, + dep); + } + } + } + } + return false; + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf(" Chunk #%d is ready to go.\n", sink_chunk->index); + } + + return true; +} + +/** + * @brief Utility function that return boolean indicating whether component of + * chunks is ready to go + */ +static bool chunk_component_ready_to_go(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + SCC_COMPONENT* sink_component, + const int sink_component_index) { + int i, j, k; + + // See if there is any instance in this chunk that has not been scheduled + for (i = 0; i < sink_component->length; i++) { + Chunk* chunk = (Chunk*)sink_component->array[i]; + + if (!chunk_graph->schedule[chunk->index]) + break; + } + + // All chunks in this component have already been scheduled, nothing else to + // be done with this component + if (i == sink_component->length) + return false; + + for (i = 0; i < chunk_graph->chunk_components->length; i++) { + SCC_COMPONENT* source_component = chunk_graph->chunk_components->array[i]; + + // Check readiness against other chunk components, not itself + if (i == sink_component_index) + continue; + + for (j = 0; j < source_component->length; j++) { + Chunk* source_chunk = (Chunk*)source_component->array[j]; + + // This chunk is already scheduled + if (chunk_graph->schedule[source_chunk->index]) + continue; + + for (k = 0; k < sink_component->length; k++) { + Chunk* sink_chunk = (Chunk*)sink_component->array[k]; + + if (chunk_graph + ->graph[source_chunk->index * chunk_graph->instances.length + + sink_chunk->index]) { + return false; + } + } + } + } + + return true; +} + +/** + * @brief Utility function that greedy schedules SCC of chunks + */ +static CTO_NODE* chunk_component_schedule(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int prev_chunk_component_index, + const int remaining, + const short parent_ph) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Starting chunk_component_schedule (%s) with (remaining: %d, " + "parent_ph: %d)\n", + aug_graph_name(aug_graph), remaining, parent_ph); + } + + int i, j; + + size_t rank_array_size = chunk_graph->chunk_components->length * sizeof(int); + int* rank_array = (int*)alloca(rank_array_size); + memset(rank_array, 0, rank_array_size); + + // Find SCC of chunk ready to be scheduled + for (i = 0; i < chunk_graph->chunk_components->length; i++) { + SCC_COMPONENT* chunk_component = chunk_graph->chunk_components->array[i]; + + // This is needed to avoid requesting to schedule the same SCC, not doing + // this can cause infinite loop because it will keep retrying to schedule + // the same component + if (prev_chunk_component_index == i) + continue; + + // Found SCC of chunk that is ready + if (chunk_component_ready_to_go(aug_graph, chunk_graph, chunk_component, + i)) { + // Schedule a chunk within this SCC of chunk + for (j = 0; j < chunk_component->length; j++) { + Chunk* chunk = (Chunk*)chunk_component->array[j]; + + rank_array[i] |= get_chunk_rank(chunk); + } + } + } + + int current_rank = 0; + int max_rank_index = 0; + for (i = 0; i < chunk_graph->chunk_components->length; i++) { + if (rank_array[i] > current_rank) { + max_rank_index = i; + current_rank = rank_array[i]; + } + } + + if (current_rank == 0) { + fatal_error( + "Not sure what to do at this point of scheduling for %s augmented " + "dependency graph because there is no component to schedule and " + "already tried scheduling component #%d and none of its chunks were " + "scheduled.", + aug_graph_name(aug_graph), prev_chunk_component_index); + return NULL; + } + + return chunk_schedule(aug_graph, chunk_graph, + chunk_graph->chunk_components->array[max_rank_index], + max_rank_index, prev, cond, state, remaining, + parent_ph); +} + +/** + * @brief Utility function that handles the necessary routine to end the + * parent phase + */ +static CTO_NODE* schedule_visit_end(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + Chunk* chunk, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + CHILD_PHASE* prev_group, + const short parent_ph) { + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf( + "Starting schedule_visit_end (%s) with " + "(remaining: %d parent_ph: %d)\n", + aug_graph_name(aug_graph), remaining, parent_ph); + } + + CTO_NODE* cto_node = (CTO_NODE*)HALLOC(sizeof(CTO_NODE)); + cto_node->cto_prev = prev; + cto_node->cto_instance = NULL; + cto_node->child_phase.ph = parent_ph; + cto_node->child_phase.ch = -1; + cto_node->visit = parent_ph; + cto_node->chunk_index = chunk->index; + cto_node->chunk_circular = chunk->circular; + cto_node->cto_next = schedule_visit_start( + aug_graph, chunk_graph, cto_node, cond, state, remaining, parent_ph + 1); + return cto_node; +} + +/** + * @brief Utility function that handles the necessary routine to start the + * parent phase + */ +static CTO_NODE* schedule_visit_start(AUG_GRAPH* aug_graph, + ChunkGraph* chunk_graph, + CTO_NODE* prev, + CONDITION cond, + TOTAL_ORDER_STATE* state, + const int remaining, + const short parent_ph) { + // Need to schedule parent inherited attributes of this phase + CHILD_PHASE* parent_inh = (CHILD_PHASE*)alloca(sizeof(CHILD_PHASE)); + parent_inh->ph = -parent_ph; + parent_inh->ch = -1; + + PHY_GRAPH* parent_phy = DECL_PHY_GRAPH(aug_graph->lhs_decl); + + // No more visit to schedule. + if (parent_ph > parent_phy->max_phase) + return NULL; + + // It is safe to assume inherited attribute of parents have no + // dependencies and should be scheduled right away + Chunk* chunk = find_chunk(aug_graph, chunk_graph, HalfLeft, parent_ph, -1); + if (chunk == NULL) { + fatal_error( + "Chunks are not properly created because chunk of parent inherited " + "attribute of phase %d is missing", + parent_inh); + return NULL; + } + + int chunk_component_index = + chunk_component_indexof(aug_graph, chunk_graph, chunk); + + if (chunk_component_index == -1) { + fatal_error( + "Chunk components are not properly created because component " + "containing chunk of parent inherited " + "attribute of phase %d is missing", + parent_inh); + } + + SCC_COMPONENT* chunk_component = + chunk_graph->chunk_components->array[chunk_component_index]; + + return group_schedule(aug_graph, chunk_graph, chunk_component, + chunk_component_index, chunk, prev, cond, state, + remaining, parent_inh, parent_ph); +} + +/** + * 1) collects chunks using already assigned to the instances + * 2) brings over dependencies from instances as chunk dependency + * 3) adds guiding dependencies between chunks + * 4) finds the SCC components of chunks + * @param aug_graph augmented dependency graph + * @param state scheduling state + * @return chunk graph + */ +static ChunkGraph* collect_aug_graph_chunks(AUG_GRAPH* aug_graph, + TOTAL_ORDER_STATE* state) { + PHY_GRAPH* parent_phy = DECL_PHY_GRAPH(aug_graph->lhs_decl); + + Chunk** chunks_array = + (Chunk**)alloca(aug_graph->instances.length * 10 * sizeof(Chunk*)); + int chunks_count = 0; + + int parent_ph, ph, ch; + int i, j, k, l; + INSTANCE* array = + (INSTANCE*)alloca(aug_graph->instances.length * sizeof(INSTANCE)); + int count; + + // Collect half chunks (halfleft and halfright) + for (parent_ph = 1; parent_ph <= parent_phy->max_phase; parent_ph++) { + // Collect parent inherited of this phase + array = (INSTANCE*)alloca(aug_graph->instances.length * sizeof(INSTANCE)); + count = 0; + + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + if (group->ph == -parent_ph && group->ch == -1) { + array[count++] = *in; + } + } + + Chunk* left_half_chunk = (Chunk*)malloc(sizeof(Chunk)); + left_half_chunk->type = HalfLeft; + left_half_chunk->ph = parent_ph; + left_half_chunk->ch = -1; // Irrelevant + left_half_chunk->circular = parent_phy->cyclic_flags[parent_ph]; + left_half_chunk->index = chunks_count; + + VECTORALLOC(left_half_chunk->instances, INSTANCE, count); + + for (i = 0; i < count; i++) { + left_half_chunk->instances.array[i] = array[i]; + } + + chunks_array[chunks_count++] = left_half_chunk; + + // Collect parent synthesized of this phase + count = 0; + + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + if (group->ph == parent_ph && group->ch == -1) { + array[count++] = *in; + } + } + + Chunk* right_half_chunk = (Chunk*)malloc(sizeof(Chunk)); + right_half_chunk->type = HalfRight; + right_half_chunk->ph = parent_ph; + right_half_chunk->ch = -1; // Irrelevant + right_half_chunk->circular = parent_phy->cyclic_flags[parent_ph]; + right_half_chunk->index = chunks_count; + + VECTORALLOC(right_half_chunk->instances, INSTANCE, count); + + for (i = 0; i < count; i++) { + right_half_chunk->instances.array[i] = array[i]; + } + + chunks_array[chunks_count++] = right_half_chunk; + } + + // Collect visits chunks + for (ch = 0; ch < state->children.length; ch++) { + Declaration child_decl = state->children.array[ch]; + PHY_GRAPH* child_phy = DECL_PHY_GRAPH(child_decl); + + if (child_phy == NULL) + continue; + + for (ph = 1; ph <= child_phy->max_phase; ph++) { + // Collect visit for this child phase + count = 0; + + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + if (abs(group->ph) == ph && group->ch == ch) { + array[count++] = *in; + } + } + + size_t size_of_chunk = sizeof(Chunk); + Chunk* visit_chunk = (Chunk*)malloc(size_of_chunk); + visit_chunk->type = Visit; + visit_chunk->ph = ph; + visit_chunk->ch = ch; + visit_chunk->circular = child_phy->cyclic_flags[ph]; + visit_chunk->index = chunks_count; + + VECTORALLOC(visit_chunk->instances, INSTANCE, count); + + for (i = 0; i < count; i++) { + visit_chunk->instances.array[i] = array[i]; + } + + chunks_array[chunks_count++] = visit_chunk; + } + } + + // Collect locals chunks + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + CHILD_PHASE* group = &state->instance_groups[in->index]; + + if (group->ph == 0 && group->ch == 0) { + Chunk* local_chunk = (Chunk*)malloc(sizeof(Chunk)); + local_chunk->type = Local; + local_chunk->ph = 0; // Irrelevant + local_chunk->ch = 0; // Irrelevant + local_chunk->index = chunks_count; + local_chunk->circular = instance_in_aug_cycle(aug_graph, in); + + VECTORALLOC(local_chunk->instances, INSTANCE, 1); + local_chunk->instances.array[0] = *in; + + chunks_array[chunks_count++] = local_chunk; + } + } + + ChunkGraph* chunk_graph = (ChunkGraph*)malloc(sizeof(ChunkGraph)); + chunk_graph->schedule = (bool*)calloc(sizeof(bool), chunks_count); + chunk_graph->graph = + (DEPENDENCY*)calloc(sizeof(DEPENDENCY), chunks_count * chunks_count); + + VECTORALLOC(chunk_graph->instances, Chunk, chunks_count); + + // Copy over from stack allocated array to heap array + for (i = 0; i < chunks_count; i++) { + chunk_graph->instances.array[i] = *chunks_array[i]; + } + + if ((oag_debug & DEBUG_ORDER)) { + printf("\nlist of chunks for %s\n", aug_graph_name(aug_graph)); + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk = &chunk_graph->instances.array[i]; + + print_indent(1, stdout); + if (oag_debug & DEBUG_ORDER_VERBOSE) { + print_chunk(aug_graph, state, chunk, 1); + } else { + print_chunk_info(chunk); + printf("\n"); + } + } + printf("\n"); + } + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + printf("Chunk dependency graph for %s augmented dependency graph:\n", + aug_graph_name(aug_graph)); + } + + // Add edges between chunks + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk_source = &chunk_graph->instances.array[i]; + + for (j = 0; j < chunk_graph->instances.length; j++) { + Chunk* chunk_sink = &chunk_graph->instances.array[j]; + + // Assign dependencies between chunks via their instances by going + // through the dependency of instances inside the chunk and carry them + // over to chunk dependency graph + for (k = 0; k < chunk_source->instances.length; k++) { + INSTANCE* instance_source = &chunk_source->instances.array[k]; + + for (l = 0; l < chunk_sink->instances.length; l++) { + INSTANCE* instance_sink = &chunk_sink->instances.array[l]; + + if (MERGED_CONDITION_IS_IMPOSSIBLE( + instance_condition(instance_source), + instance_condition(instance_sink))) + continue; + + DEPENDENCY old_dep = + chunk_graph->graph[i * chunk_graph->instances.length + j]; + + DEPENDENCY new_dep = edgeset_kind( + aug_graph + ->graph[instance_source->index * aug_graph->instances.length + + instance_sink->index]); + + if (new_dep) { + if ((oag_debug & DEBUG_ORDER) && + (oag_debug & DEBUG_ORDER_VERBOSE)) { + print_indent(2, stdout); + printf("Regular dependency from chunk %d (", i); + print_chunk_info(chunk_source); + printf(") to chunk %d (", j); + print_chunk_info(chunk_sink); + printf(") because of:\n"); + print_indent(4, stdout); + + print_instance(instance_source, stdout); + printf(" <%+d,%+d> -> ", + state->instance_groups[instance_source->index].ph, + state->instance_groups[instance_source->index].ch); + print_instance(instance_sink, stdout); + printf(" <%+d,%+d> (kind=%d)\n", + state->instance_groups[instance_sink->index].ph, + state->instance_groups[instance_sink->index].ch, new_dep); + } + + chunk_graph->graph[i * chunk_graph->instances.length + j] = + dependency_join(new_dep, old_dep); + } + } + } + + // Assign dependency between visits of the same children by adding edge + // from chunk visit (ph,ch) and (ph+1,ch) + if (chunk_source->type == Visit && chunk_sink->type == Visit && + chunk_source->ch == chunk_sink->ch && + chunk_source->ph < chunk_sink->ph) { + DEPENDENCY old_dep = + chunk_graph->graph[i * chunk_graph->instances.length + j]; + + chunk_graph->graph[i * chunk_graph->instances.length + j] = + dependency_join(old_dep, CHUNK_GUIDING_DEPENDENCY); + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + // Write debugging if its a new edge + print_indent(2, stdout); + printf("Guiding dependency from chunk %d (", i); + print_chunk_info(chunk_source); + printf(") to chunk %d (", j); + print_chunk_info(chunk_sink); + printf(")\n"); + } + } + + // Assign dependency between parent inherited and parent synthesized + // attributes of the parent for the same phase + if (chunk_source->type == HalfLeft && chunk_sink->type == HalfRight && + chunk_source->ph == chunk_sink->ph) { + DEPENDENCY old_dep = + chunk_graph->graph[i * chunk_graph->instances.length + j]; + + chunk_graph->graph[i * chunk_graph->instances.length + j] = + dependency_join(old_dep, CHUNK_GUIDING_DEPENDENCY); + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + // Write debugging if its a new edge + print_indent(2, stdout); + printf("Guiding dependency from chunk %d (", i); + print_chunk_info(chunk_source); + printf(") to chunk %d (", j); + print_chunk_info(chunk_sink); + printf(")\n"); + } + } + + // Assign dependency between synthesized/inherited attributes of the + // parent across different phases to make sure parent visits are + // sequential + if (chunk_source->type == HalfRight && chunk_sink->type == HalfLeft && + chunk_source->ph + 1 == chunk_sink->ph) { + DEPENDENCY old_dep = + chunk_graph->graph[i * chunk_graph->instances.length + j]; + + chunk_graph->graph[i * chunk_graph->instances.length + j] = + dependency_join(old_dep, CHUNK_GUIDING_DEPENDENCY); + + if ((oag_debug & DEBUG_ORDER) && (oag_debug & DEBUG_ORDER_VERBOSE)) { + print_indent(2, stdout); + printf("Guiding dependency from chunk %d (", i); + print_chunk_info(chunk_source); + printf(") to chunk %d (", j); + print_chunk_info(chunk_sink); + printf(")\n"); + } + } + } + } + + // Transitive closure + bool changed = false; + do { + changed = false; + for (i = 0; i < chunk_graph->instances.length; i++) { + for (j = 0; j < chunk_graph->instances.length; j++) { + for (k = 0; k < chunk_graph->instances.length; k++) { + // i->j && j->k then i->k + DEPENDENCY dep_ij = + chunk_graph->graph[i * chunk_graph->instances.length + j]; + DEPENDENCY dep_jk = + chunk_graph->graph[j * chunk_graph->instances.length + k]; + DEPENDENCY old_dep_ik = + chunk_graph->graph[i * chunk_graph->instances.length + k]; + DEPENDENCY new_dep_ik = dependency_trans(dep_ij, dep_jk); + + if (dep_ij && dep_jk && (old_dep_ik | new_dep_ik) != old_dep_ik) { + changed = true; + chunk_graph->graph[i * chunk_graph->instances.length + k] = + dependency_join(new_dep_ik, old_dep_ik); + } + } + } + } + } while (changed); + + SccGraph scc_graph; + scc_graph_initialize(&scc_graph, chunk_graph->instances.length); + + // Add vertices to SCC graph + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk = &chunk_graph->instances.array[i]; + scc_graph_add_vertex(&scc_graph, (void*)chunk); + } + + // Add edges to SCC graph + for (i = 0; i < chunk_graph->instances.length; i++) { + Chunk* chunk_source = &chunk_graph->instances.array[i]; + for (j = 0; j < chunk_graph->instances.length; j++) { + Chunk* chunk_sink = &chunk_graph->instances.array[j]; + if (chunk_graph->graph[i * chunk_graph->instances.length + j]) { + scc_graph_add_edge(&scc_graph, (void*)chunk_source, (void*)chunk_sink); + } + } + } + + SCC_COMPONENTS* components = scc_graph_components(&scc_graph); + scc_graph_destroy(&scc_graph); + + if (oag_debug & DEBUG_ORDER) { + printf("Components of chunks of aug_graph: %s\n", + aug_graph_name(aug_graph)); + + for (i = 0; i < components->length; i++) { + SCC_COMPONENT* comp = components->array[i]; + + printf("=> component #%d (%s)\n", i, aug_graph->component_cycle[i] ? "circular" : "non-circular"); + + for (j = 0; j < comp->length; j++) { + Chunk* chunk = (Chunk*)comp->array[j]; + + print_indent(4, stdout); + printf("(%d) ", chunk->index); + + if (oag_debug & DEBUG_ORDER_VERBOSE) { + print_chunk(aug_graph, state, chunk, 0); + } else { + print_chunk_info(chunk); + printf("\n"); + } + } + } + } + + chunk_graph->chunk_components = components; + + return chunk_graph; +} + +/** + * @brief utility function to free the memory allocated for chunk graph + * @param chunk_graph chunk graph struct + */ +static void free_chunk_graph(ChunkGraph* chunk_graph) { + free(chunk_graph->graph); + free(chunk_graph->schedule); + free(chunk_graph); +} + +/** + * Utility function to schedule augmented dependency graph + * @param aug_graph Augmented dependency graph + * @param original_state_dependency Original state dependency + */ +static void schedule_augmented_dependency_graph( + AUG_GRAPH* aug_graph, + DEPENDENCY original_state_dependency) { + int n = aug_graph->instances.length; + CONDITION cond; + int i, j, ch; + + (void)close_augmented_dependency_graph(aug_graph); + + // Find SCC components of instances given a augmented dependency graph + set_aug_graph_components(aug_graph); + + for (i = 0; i < aug_graph->components->length; i++) { + if (original_state_dependency == no_dependency && + aug_graph->component_cycle[i]) { + fatal_error( + "The scheduler cannot handle the AG (%s) since it has a " + "conditional cycle.", + aug_graph_name(aug_graph)); + } + } + + // Now schedule graph: we need to generate a conditional total order. + if (oag_debug & PROD_ORDER) { + printf("Scheduling conditional total order for %s\n", + aug_graph_name(aug_graph)); + } + + TOTAL_ORDER_STATE* state = + (TOTAL_ORDER_STATE*)alloca(sizeof(TOTAL_ORDER_STATE)); + + size_t instance_groups_size = n * sizeof(CHILD_PHASE); + CHILD_PHASE* instance_groups = (CHILD_PHASE*)alloca(instance_groups_size); + memset(instance_groups, (int)0, instance_groups_size); + + // Assign to each attribute instance + for (i = 0; i < n; i++) { + INSTANCE* in = &(aug_graph->instances.array[i]); + Declaration ad = in->fibered_attr.attr; + Declaration chdecl; + + int j = 0, ch = -1; + for (chdecl = aug_graph->first_rhs_decl; chdecl != 0; + chdecl = DECL_NEXT(chdecl)) { + if (in->node == chdecl) { + ch = j; + } + j++; + } + + if (in->node == aug_graph->lhs_decl || ch >= 0) { + PHY_GRAPH* npg = DECL_PHY_GRAPH(in->node); + int ph = attribute_schedule(npg, &(in->fibered_attr)); + instance_groups[i].ph = (short)ph; + instance_groups[i].ch = (short)ch; + } + } + + state->instance_groups = instance_groups; + + // Find children of augmented graph: this will be used as argument to + // visit calls + set_aug_graph_children(aug_graph, state); + + size_t schedule_size = n * sizeof(int); + aug_graph->schedule = (int*)alloca(schedule_size); + + // False here means nothing is scheduled yet + memset(aug_graph->schedule, 0, schedule_size); + + if (oag_debug & DEBUG_ORDER) { + printf("\nInstances %s:\n", aug_graph_name(aug_graph)); + for (i = 0; i < n; i++) { + INSTANCE* in = &(aug_graph->instances.array[i]); + CHILD_PHASE group = state->instance_groups[i]; + print_instance(in, stdout); + printf(":index: %d lineno: %d ", in->index, + tnode_line_number(in->fibered_attr.attr)); + + int cycle = instance_in_aug_cycle(aug_graph, in); + if (cycle > -1) { + printf(" [in cycle:%d] ", cycle); + } else { + printf(" [non-circular] "); + } + + if (!group.ph && !group.ch) { + printf("local\n"); + } else { + printf("<%+d,%+d>\n", group.ph, group.ch); + } + } + } + + ChunkGraph* chunk_graph = collect_aug_graph_chunks(aug_graph, state); + + cond.negative = 0; + cond.positive = 0; + + aug_graph->total_order = + schedule_visit_start(aug_graph, chunk_graph, NULL, cond, state, n, + 1 /* parent visit number */); + + if (aug_graph->total_order == NULL) { + fatal_error("Failed to create total order."); + } + + if (oag_debug & DEBUG_ORDER) { + printf("\nSchedule for %s (%d children):\n", aug_graph_name(aug_graph), + state->children.length); + print_total_order(aug_graph, aug_graph->total_order, -1, false, state, 0, + stdout); + } + + // Ensure generated total order is valid + assert_total_order(aug_graph, chunk_graph, state, aug_graph->total_order); + + free_chunk_graph(chunk_graph); +} + +/** + * @brief Computes total-preorder of set of attributes + * @param s state + */ +void compute_static_schedule(STATE* s) { + int i, j; + for (i = 0; i < s->phyla.length; i++) { + schedule_summary_dependency_graph(&s->phy_graphs[i]); + + dnc_close(s); + + /* now perform closure */ + int saved_analysis_debug = analysis_debug; + + if (oag_debug & TYPE_3_DEBUG) { + analysis_debug |= TWO_EDGE_CYCLE; + } + + if (analysis_debug & DNC_ITERATE) { + printf("\n**** After OAG schedule for phylum %d:\n\n", i); + } + + if (analysis_debug & ASSERT_CLOSED) { + for (j = 0; j < s->match_rules.length; j++) { + printf("Checking rule %d\n", j); + assert_closed(&s->aug_graphs[j]); + } + } + + analysis_debug = saved_analysis_debug; + + if (analysis_debug & DNC_ITERATE) { + printf("\n*** After closure after schedule OAG phylum %d\n\n", i); + print_analysis_state(s, stdout); + print_cycles(s, stdout); + } + } + + DEPENDENCY dep = analysis_state_cycle(s); + + for (i = 0; i < s->match_rules.length; i++) { + schedule_augmented_dependency_graph(&s->aug_graphs[i], + s->original_state_dependency); + } + schedule_augmented_dependency_graph(&s->global_dependencies, + s->original_state_dependency); + + if (analysis_debug & (DNC_ITERATE | DNC_FINAL)) { + printf("*** FINAL OAG ANALYSIS STATE ***\n"); + print_analysis_state(s, stdout); + print_cycles(s, stdout); + } +} diff --git a/analyze/aps-schedule.h b/analyze/aps-schedule.h index e69de29b..d0147ef1 100644 --- a/analyze/aps-schedule.h +++ b/analyze/aps-schedule.h @@ -0,0 +1,12 @@ +#ifndef APS_SCHEDULE_H +#define APS_SCHEDULE_H + +#include "aps-dnc.h" + +/** + * @brief Computes total-preorder of set of attributes + * @param s state + */ +void compute_static_schedule(STATE* s); + +#endif diff --git a/analyze/aps-type.c b/analyze/aps-type.c index 915d08c7..432b869f 100644 --- a/analyze/aps-type.c +++ b/analyze/aps-type.c @@ -5,6 +5,7 @@ #include "aps-ag.h" int type_debug = FALSE; +static int BUFFER_SIZE = 1000; static Type Boolean_Type; static Type Integer_Type; @@ -15,11 +16,28 @@ static Type error_type; int remote_type_p(Type ty); +Type infer_some_value_decl_type(Declaration d) { + if (Declaration_KEY(d) == KEYnormal_formal) { + return infer_formal_type(d); + } else { + return some_value_decl_type(d); + } +} + Type function_type_return_type(Type ft) { return value_decl_type(first_Declaration(function_type_return_values(ft))); } +Type constructor_return_type(Declaration decl) { + Type function_type = constructor_decl_type(decl); + Declaration rd = first_Declaration(function_type_return_values(function_type)); + Type rt = value_decl_type(rd); + return rt; +} + +Declaration current_module = NULL; + static void* do_typechecking(void* ignore, void*node) { // find places where Expression, Pattern or Default is used switch (ABSTRACT_APS_tnode_phylum(node)) { @@ -44,6 +62,60 @@ static void* do_typechecking(void* ignore, void*node) { { Declaration decl = (Declaration)node; switch (Declaration_KEY(decl)) { + case KEYmodule_decl: + current_module = (Declaration) node; + break; + case KEYconstructor_decl: + { + Declarations formals = function_type_formals(constructor_decl_type(decl)); + Declaration formals_ptr1, formals_ptr2; + int i, j; + + for (formals_ptr1 = first_Declaration(formals), i = 0; + formals_ptr1 != NULL; + formals_ptr1 = DECL_NEXT(formals_ptr1), i++) { + + char* formal_name = decl_name(formals_ptr1); + + for (formals_ptr2 = DECL_NEXT(formals_ptr1), j = i + 1; + formals_ptr2 != NULL; + formals_ptr2 = DECL_NEXT(formals_ptr2), j++) { + if (strcmp(formal_name, decl_name(formals_ptr2)) == 0) { + aps_error(decl, "Duplicate constructor formal name: \"%s\" at indices: %i, %i in \"%s\" constructor", formal_name, i, j, decl_name(decl)); + } + } + } + + Type rt = constructor_return_type(decl); + switch (Type_KEY(rt)) + { + case KEYremote_type: + aps_error(decl, "Constructor for remote type is forbidden (constructor %s(...): remote %s)", decl_name(decl), symbol_name(use_name(type_use_use(remote_type_nodetype(rt))))); + break; + case KEYtype_use: + { + TypeEnvironment type_env = Use_info(type_use_use(rt))->use_type_env; + while (type_env != NULL) { + switch (Declaration_KEY(type_env->source)) + { + case KEYmodule_decl: + if (type_env->source != current_module) { + aps_error(decl, "Adding a constructor \"%s\" in extending module is forbidden", decl_name(decl)); + } + break; + default: + break; + } + + type_env = type_env->outer; + } + break; + } + default: + break; + } + break; + } case KEYvalue_decl: check_default_type(value_decl_default(decl),value_decl_type(decl)); break; @@ -124,10 +196,51 @@ static void* do_typechecking(void* ignore, void*node) { } } break; + default: + break; } } /* FALL THROUGH */ case KEYcollect_assign: + { + Expression lhs = assign_lhs(decl); + Declaration lhs_use_decl = NULL; + switch (Expression_KEY(lhs)) + { + case KEYfuncall: + lhs_use_decl = Use_info(value_use_use(funcall_f(lhs)))->use_decl; + if (def_is_constant(declaration_def(lhs_use_decl))) { + aps_error(lhs_use_decl, "Attribute collection \"%s\" has to be declared as VAR to be assigned", decl_name(lhs_use_decl)); + } + break; + case KEYvalue_use: + lhs_use_decl = Use_info(value_use_use(lhs))->use_decl; + if (def_is_constant(declaration_def(lhs_use_decl))) { + aps_error(lhs_use_decl, "Global collection \"%s\" has to be declared as VAR to be assigned", decl_name(lhs_use_decl)); + } + break; + default: + break; + } + + // Check if variable or attribute is declared as a collection to use collect_assign operator + if (lhs_use_decl != NULL && Declaration_KEY(decl) == KEYcollect_assign) { + switch (Declaration_KEY(lhs_use_decl)) { + case KEYvalue_decl: + if (!direction_is_collection(value_decl_direction(lhs_use_decl))) { + aps_error(lhs_use_decl, "Global variable \"%s\" must be declared as a collection to use :> operator", decl_name(lhs_use_decl)); + } + break; + case KEYattribute_decl: + if (!direction_is_collection(attribute_decl_direction(lhs_use_decl))) { + aps_error(lhs_use_decl, "Attribute \"%s\" must be declared as a collection to use :> operator", decl_name(lhs_use_decl)); + } + break; + default: + break; + } + } + } check_expr_type(assign_rhs(decl),infer_expr_type(assign_lhs(decl))); return 0; break; @@ -190,6 +303,22 @@ static void* do_typechecking(void* ignore, void*node) { while (ABSTRACT_APS_tnode_phylum(tdecl) != KEYDeclaration) tdecl = (Declaration)tnode_parent(tdecl); + + switch (Declaration_KEY(tdecl)) + { + case KEYsome_value_decl: + { + char value_decl_type_str[BUFFER_SIZE]; + FILE* f = fmemopen(value_decl_type_str, sizeof(value_decl_type_str), "w"); + print_Type(some_value_decl_type(tdecl), f); + fclose(f); + aps_error(tdecl, "Expected use of a type declaration, not a type construction (%s).", value_decl_type_str); + return 0; + } + default: + break; + } + tu = use(def_name(some_type_decl_def(tdecl))); te->outer = USE_TYPE_ENV(mu); te->source = mdecl; @@ -215,6 +344,8 @@ static void* do_typechecking(void* ignore, void*node) { (void)check_actuals(type_inst_actuals(ty),fty,u); } return 0; + default: + break; } } break; @@ -281,6 +412,132 @@ static void init_types() { error_type = Boolean_Type; } +static char* trim_string_const_token(char* p) { + p++; + p[strlen(p)-1] = 0; + return p; +} + +static void* validate_canonicals(void* ignore, void*node) { + int BUFFER_SIZE = 1000; + Symbol symb_test_canonical_type = intern_symbol("test_canonical_type"); + Symbol symb_test_canonical_base_type = intern_symbol("test_canonical_base_type"); + Symbol symb_test_canonical_signature = intern_symbol("test_canonical_signature"); + + switch (ABSTRACT_APS_tnode_phylum(node)) + { + case KEYDeclaration: + { + Declaration decl = (Declaration) node; + switch (Declaration_KEY(decl)) + { + case KEYpragma_call: + { + Symbol pragma_value = pragma_call_name(decl); + if (symb_test_canonical_signature == pragma_value || + symb_test_canonical_type == pragma_value || + symb_test_canonical_base_type == pragma_value) { + Expressions exprs = pragma_call_parameters(decl); + Expression type_expr = first_Expression(exprs); + Expression result_expr = Expression_info(type_expr)->next_expr; + Type type; + + switch (Expression_KEY(type_expr)) { + case KEYtype_value: + type = type_value_T(type_expr); + break; + case KEYvalue_use: { + Use use = value_use_use(type_expr); + switch (Use_KEY(use)) { + case KEYqual_use: + type = qual_use_from(use); + break; + default: + aps_error(node, "only qual uses have a type that can be validated"); + return NULL; + } + break; + } + default: + aps_error(node, "unknown AST type passed into canonical type validation"); + return NULL; + } + + String expected_string = string_const_token(result_expr); + + char actual_to_string[BUFFER_SIZE]; + FILE* f = fmemopen(actual_to_string, sizeof(actual_to_string), "w"); + if (symb_test_canonical_signature == pragma_value) { + print_canonical_signature_set(infer_canonical_signatures(canonical_type(type)), f); + } else if (symb_test_canonical_type == pragma_value) { + print_canonical_type(canonical_type(type), f); + } else if (symb_test_canonical_base_type == pragma_value) { + print_canonical_type(canonical_type_base_type(canonical_type(type)), f); + } + fclose(f); + + char expected[BUFFER_SIZE]; + sprintf(expected, "%s", (char *)expected_string); + + // Remove double quotes from the beginning and the end of string + // This is needed because APS parser does not trim double quotes from KEYstring_const + char* expected_cleaned = trim_string_const_token(expected); + + if (strcmp(actual_to_string, expected_cleaned) != 0) { + char type_to_str[BUFFER_SIZE]; + f = fmemopen(type_to_str, sizeof(type_to_str), "w"); + print_Type(type, f); + fclose(f); + + aps_error(type,"Failed: `%s`:%d expected `%s` but got `%s`", type_to_str, tnode_line_number(type), expected_cleaned, actual_to_string); + } + } + } + default: + break; + } + } + default: + break; + } + return node; +} + +static Declaration module_TYPE; +static Declaration module_PHYLUM; + +static void* set_root_phylum(void *ignore, void *node) +{ + switch (ABSTRACT_APS_tnode_phylum(node)) + { + case KEYDeclaration: + { + Declaration d = (Declaration)node; + switch (Declaration_KEY(d)) + { + case KEYmodule_decl: + { + if (module_TYPE == 0 && streq(decl_name(d), "TYPE")) + { + module_TYPE = d; + } + else if (module_PHYLUM == 0 && streq(decl_name(d), "PHYLUM")) + { + module_PHYLUM = d; + } + + return NULL; + } + default: + break; + } + } + default: + break; + } + + return node; +} void type_Program(Program p) { @@ -304,6 +561,11 @@ void type_Program(Program p) if (type_debug) printf("Type checking code in \"%s.aps\"\n",aps_yyfilename); traverse_Program(do_typechecking,p,p); aps_yyfilename = saved_filename; + + traverse_Program(set_root_phylum, p, p); + initialize_canonical_signature(module_TYPE, module_PHYLUM); + + traverse_Program(validate_canonicals,p,p); } Type infer_expr_type(Expression e) @@ -335,7 +597,7 @@ Type infer_expr_type(Expression e) ty = infer_formal_type(decl); break; default: - aps_error(decl,"unknown expression decl"); + aps_error(decl,"unknown expression decl for %s", decl_name(decl)); break; } ty = type_subst(u,ty); @@ -400,6 +662,102 @@ Type infer_expr_type(Expression e) return ty; } +static Declaration is_inside_function(void* node) { + while (node != NULL) { + if (ABSTRACT_APS_tnode_phylum((Declaration)node) == KEYDeclaration && + Declaration_KEY((Declaration)node) == KEYfunction_decl) { + return (Declaration)node; + } + + node = tnode_parent(node); + } + return NULL; +} + +static bool sig_is_var(Signature sig) { + switch (Signature_KEY(sig)) { + case KEYsig_inst: + return sig_inst_is_var(sig); + default: + return false; + } +} + +static void ensure_non_var_use(Declaration fdecl, void* source, void* current) { + switch (ABSTRACT_APS_tnode_phylum(current)) { + case KEYExpression: { + Expression expr = (Expression) current; + switch (Expression_KEY(expr)) { + case KEYfuncall: { + ensure_non_var_use(fdecl, source, funcall_f(expr)); + break; + } + case KEYvalue_use: { + Use use = value_use_use(expr); + Declaration udecl = USE_DECL(use); + + switch (Use_KEY(use)) { + case KEYqual_use: { + // Convert type-use to Declaration + Declaration decl = canonical_type_decl(canonical_type(qual_use_from(use))); + switch (Declaration_KEY(decl)) { + case KEYsome_type_formal: + // Special case when type formal is VAR, its functions are considered constant + if (sig_is_var(some_type_formal_sig(decl))) { + return; + } + break; + default: + break; + } + break; + } + default: + break; + } + + ensure_non_var_use(fdecl, source, udecl); // investigate the use + break; + } + default: + break; + } + break; + } + case KEYDeclaration: { + Declaration decl = (Declaration) current; + switch (Declaration_KEY(decl)) { + case KEYvalue_decl: { + if (is_inside_function(decl) != fdecl && !def_is_constant(value_decl_def(decl))) { + aps_error(source, "non-constant value '%s' used inside constant function '%s' but declared outside of function", + decl_name(decl), decl_name(fdecl)); + } + break; + } + case KEYattribute_decl: { + if (!def_is_constant(attribute_decl_def(decl))) { + aps_error(source, "attribute '%s' used inside constant function '%s'", + decl_name(decl), decl_name(fdecl)); + } + break; + } + case KEYfunction_decl: { + if (!def_is_constant(function_decl_def(decl))) { + aps_error(source, "function call to non-constant function '%s' inside constant function '%s'", + decl_name(decl), decl_name(fdecl)); + } + break; + } + default: + break; + } + break; + } + default: + break; + } +} + void check_expr_type(Expression e, Type type) { if (type == 0) { @@ -410,6 +768,12 @@ void check_expr_type(Expression e, Type type) check_type_equal(e,type,Expression_info(e)->expr_type); return; } + + Declaration fdecl; + if ((fdecl = is_inside_function(e)) != NULL && def_is_constant(function_decl_def(fdecl))) { + ensure_non_var_use(fdecl, e, e); + } + switch (Expression_KEY(e)) { case KEYvalue_use: { @@ -437,7 +801,7 @@ void check_expr_type(Expression e, Type type) ty = infer_formal_type(decl); break; default: - aps_error(decl,"unknown expression decl"); + aps_error(decl,"unknown expression decl for %s", decl_name(decl)); break; } check_type_subst(e,type,u,ty); @@ -740,7 +1104,7 @@ Type infer_function_return_type(Expression f, Actuals args) { ty = infer_formal_type(decl); break; default: - aps_error(decl,"unknown expression decl"); + aps_error(decl,"unknown expression decl for %s", decl_name(decl)); return error_type; break; } @@ -789,8 +1153,9 @@ void check_function_return_type(Expression f, Actuals args, Type type) { break; case KEYformal: ty = infer_formal_type(decl); + break; default: - aps_error(decl,"unknown expression decl"); + aps_error(decl,"unknown expression decl for %s", decl_name(decl)); return; } ty = base_type(ty); @@ -1928,6 +2293,13 @@ int base_type_equal(Type b1, Type b2) { } } +BOOL type_is_scalar(Type t) { + return base_type_equal(t, Integer_Type) || + base_type_equal(t, Boolean_Type) || + base_type_equal(t, Real_Type) || + base_type_equal(t, Char_Type); +} + void check_type_equal(void *node, Type t1, Type t2) { if (t1 == t2) return; /* easy case */ if (t1 == 0 || t2 == 0) return; diff --git a/analyze/aps-type.h b/analyze/aps-type.h index 1a0a92b0..faf0ebdc 100644 --- a/analyze/aps-type.h +++ b/analyze/aps-type.h @@ -14,7 +14,7 @@ extern Type function_type_return_type(Type); * to the environment in the type. Hence, one cannot just use * the type of the use_decl + the TYPE_ENV stored in the Use: * you need to also check if the use is a qual use. Implicit qualification - * and polymorphic uses *will* have complete USE_TYPE_ENV informatio. + * and polymorphic uses *will* have complete USE_TYPE_ENV information. * * Thus instead of passing around TypeEnvironments, we will pass Use's * around which hold a chain of TypeEnvironment's, as in @@ -38,6 +38,7 @@ extern void check_default_type(Default,Type); extern void check_matchers_type(Matches,Type); extern Type infer_formal_type(Declaration formal); +extern Type infer_some_value_decl_type(Declaration d); /* for the following functions, we don't assume the actuals * have been checked yet. @@ -54,6 +55,8 @@ extern void check_type_actuals(TypeActuals,Declarations,Use); extern BOOL type_is_phylum(Type); extern Type type_element_type(Type); +extern BOOL type_is_scalar(Type); // true for Integer, Boolean, Real, Char + /* return true is environment is ready to use * (no remaining inference required) * Since polymorphic scopes may not export type declarations diff --git a/analyze/apsc.c b/analyze/apsc.c index 80b0a60f..b775deec 100644 --- a/analyze/apsc.c +++ b/analyze/apsc.c @@ -8,23 +8,28 @@ static char* argv0 = "apssched"; void usage() { fprintf(stderr,"apsc: usage: %s [-DH] [-D...] [-p apspath] file...\n",argv0); fprintf(stderr," schedule APS files (omit '.aps' extension)\n"); + fprintf(stderr," -C SCC chunk static scheduling\n"); + fprintf(stderr," -F ANC analysis\n"); fprintf(stderr," -DH print debug options\n"); exit(1); } -main(int argc,char **argv) { +int main(int argc,char **argv) { int i; argv0 = argv[0]; for (i=1; i < argc; ++i) { /* printf("argv[%d] = %s\n",i,argv[i]); */ if (argv[i][0] == '-') { char *options = argv[i]+1; - if (*options == '\0') usage(); if (*options == 'D') { set_debug_flags(options+1); } else if (*options == 'p') { set_aps_path(argv[++i]); - } + } else if (*options == 'C') { + static_scc_schedule = true; + } else if (*options == 'F') { + anc_analysis = true; + } else usage(); } else { Program p = find_Program(make_string(argv[i])); bind_Program(p); diff --git a/analyze/canonical-signature.c b/analyze/canonical-signature.c new file mode 100644 index 00000000..fea01ac9 --- /dev/null +++ b/analyze/canonical-signature.c @@ -0,0 +1,695 @@ +#include +#include +#include +#include "aps-ag.h" +#include "jbb-alloc.h" + +static CanonicalSignatureSet from_sig(Signature sig); +static CanonicalSignatureSet from_type(Type t); +static CanonicalSignatureSet from_declaration(Declaration decl); +static CanonicalSignatureSet substitute_canonical_signature_set_actuals(CanonicalType* source, CanonicalSignatureSet sig_set); +static int canonical_signature_compare(CanonicalSignature *sig1, CanonicalSignature *sig2); + +static Declaration module_TYPE; +static Declaration module_PHYLUM; +static bool initialized = false; + +/** + * Prints canonical signature + * @param untyped CanonicalSignature + * @param f output stream + */ +void print_canonical_signature(void *untyped, FILE *f) +{ + if (f == NULL) + { + f = stdout; + } + if (untyped == NULL) + { + fprintf(f, ""); + return; + } + + CanonicalSignature *canonical_signature = (CanonicalSignature *)untyped; + + fprintf(f, "%s", decl_name(canonical_signature->source_class)); + fputc('[', f); + + bool started = false; + int i; + for (i = 0; i < canonical_signature->num_actuals; i++) + { + if (started) + { + fputc(',', f); + } + else + { + started = true; + } + print_canonical_type(canonical_signature->actuals[i], f); + } + + fputc(']', f); +} + +/** + * Prints canonical signature set + * @param untyped CanonicalSignatureSet + * @param f output stream + */ +void print_canonical_signature_set(void *untyped, FILE *f) +{ + if (f == NULL) + { + f = stdout; + } + if (untyped == NULL) + { + fprintf(f, "{}"); + return; + } + + CanonicalSignatureSet set = (CanonicalSignatureSet)untyped; + + fprintf(f, "{"); + + if (set->num_elements > 0) + { + size_t struct_size = sizeof(struct hash_cons_set) + set->num_elements * sizeof(void *); + HASH_CONS_SET sorted_set = (HASH_CONS_SET)alloca(struct_size); + sorted_set->num_elements = 0; + + int i, j; + for (i = 0; i < set->num_elements; i++) + { + CanonicalSignature * item = (CanonicalSignature *)set->elements[i]; + int j = i - 1; + + while (j >= 0 && canonical_signature_compare((CanonicalSignature *)sorted_set->elements[j], item) > 0) + { + sorted_set->elements[j + 1] = sorted_set->elements[j]; + j--; + } + + sorted_set->elements[j + 1] = item; + sorted_set->num_elements++; + } + + int started = false; + for (i = 0; i < sorted_set->num_elements; i++) + { + if (started) + { + fprintf(f, ","); + } + else + { + started = true; + } + print_canonical_signature(sorted_set->elements[i], f); + } + } + + fprintf(f, "}"); +} + +/** + * Hashes list of CanonicalType + * @param count + * @param types + * @return combined hash + */ +long hash_canonical_types(int count, CanonicalType **types) +{ + long h = 17; + int i; + for (i = 0; i < count; i++) + { + h = hash_mix(h, canonical_type_hash(types[i])); + } + + return h; +} + +/** + * Hashes Class declaration + * @param cl Declaration + * @return hash integer value + */ +long hash_source_class(Declaration cl) +{ + return (long)cl; +} + +/** + * Hashes CanonicalSignature + * @param untyped InferredSignature + * @return hash integer value + */ +long canonical_signature_hash(void *untyped) +{ + CanonicalSignature *inferred_sig = (CanonicalSignature *)untyped; + + return hash_mix((int)inferred_sig->is_input, hash_mix((int)inferred_sig->is_var, hash_mix(hash_source_class(inferred_sig->source_class), hash_canonical_types(inferred_sig->num_actuals, inferred_sig->actuals)))); +} + +/** + * Equality test for CanonicalSignature + * @param untyped1 untyped CanonicalType + * @param untyped2 untyped CanonicalType + * @return boolean indicating the result of equality + */ +bool canonical_signature_equal(void *untyped1, void *untyped2) +{ + CanonicalSignature *inferred_sig1 = (CanonicalSignature *)untyped1; + CanonicalSignature *inferred_sig2 = (CanonicalSignature *)untyped2; + + if (inferred_sig1->num_actuals != inferred_sig2->num_actuals) + { + return false; + } + + bool actuals_equal = true; + int i; + for (i = 0; i < inferred_sig1->num_actuals && actuals_equal; i++) + { + actuals_equal &= (inferred_sig1->actuals[i] == inferred_sig2->actuals[i]); + } + + return actuals_equal && inferred_sig1->is_input == inferred_sig2->is_input && inferred_sig1->is_var == inferred_sig2->is_var && inferred_sig1->source_class == inferred_sig2->source_class; +} + +static struct hash_cons_table canonical_signature_table = {canonical_signature_hash, canonical_signature_equal}; + +/** + * Constructor to create a new canonical signature + * @param is_input + * @param is_var + * @param source_class + * @param num_actuals + * @param actuals + * @return canonical signature + */ +CanonicalSignature* new_canonical_signature(bool is_input, bool is_var, Declaration source_class, int num_actuals, CanonicalType **actuals) +{ + size_t struct_size = sizeof(CanonicalSignature) + num_actuals * (sizeof(CanonicalType *)); + + CanonicalSignature *result = (CanonicalSignature *)alloca(struct_size); + result->is_input = is_input; + result->is_var = is_var; + result->source_class = source_class; + result->num_actuals = num_actuals; + + int i; + for (i = 0; i < num_actuals; i++) + { + result->actuals[i] = actuals[i]; + } + + void *memory = hash_cons_get(result, struct_size, &canonical_signature_table); + return (CanonicalSignature *)memory; +} + +/** + * Counts number of Declarations + * @param type_actuals + * @return count of actuals in TypeActuals + */ +static int count_actuals(TypeActuals type_actuals) +{ + switch (TypeActuals_KEY(type_actuals)) + { + default: + fatal_error("count_declarations crashed"); + case KEYnil_TypeActuals: + return 0; + case KEYlist_TypeActuals: + return 1; + case KEYappend_TypeActuals: + return count_actuals(append_TypeActuals_l1(type_actuals)) + count_actuals(append_TypeActuals_l2(type_actuals)); + } +} + +// Collects parents and result canonical signatures +static CanonicalSignatureSet from_ctype(CanonicalSignature* csig) +{ + Declaration mdecl = csig->source_class; + Signature parent_sig = some_class_decl_parent(mdecl); + Declaration rdecl = some_class_decl_result_type(mdecl); + + return from_declaration(rdecl); +} + +/** + * Tries to resolve canonical signature set from a Signature inst + * @param sig Signature AST + * @return Canonical signature set + */ +static CanonicalSignatureSet from_sig_inst(Signature sig) +{ + Declaration mdecl = USE_DECL(class_use_use(sig_inst_class(sig))); + + TypeActuals actuals = sig_inst_actuals(sig); + int num_actuals = count_actuals(actuals); + size_t my_size = num_actuals * sizeof(CanonicalType *); + CanonicalType **cactuals = (CanonicalType **)alloca(my_size); + + int i = 0; + Type atype = first_TypeActual(actuals); + + while (atype != NULL) + { + cactuals[i] = canonical_type_base_type(canonical_type(atype)); + atype = TYPE_NEXT(atype); + i++; + } + + CanonicalSignature* csig = new_canonical_signature(sig_inst_is_input(sig), sig_inst_is_var(sig), mdecl, num_actuals, cactuals); + + return add_hash_cons_set(csig, from_ctype(csig)); +} + +/** + * Tries to resolve canonical signature set from a Signature mult + * @param sig Signature AST + * @return Canonical signature set + */ +static CanonicalSignatureSet from_mult_sig(Signature sig) +{ + return union_hash_const_set(from_sig(mult_sig_sig1(sig)), from_sig(mult_sig_sig2(sig))); +} + +/** + * Tries to resolve canonical signature set from a Signature use + * @param sig Signature AST + * @return Canonical signature set + */ +static CanonicalSignatureSet from_sig_use(Signature sig) +{ + Use use = sig_use_use(sig); + switch (Use_KEY(use)) + { + case KEYuse: + return from_declaration(USE_DECL(use)); + case KEYqual_use: + return from_type(qual_use_from(use)); + } +} + +/** + * Create canonical signature set from a Signature + * @param sig Signature AST node + * @return canonical signature set + */ +static CanonicalSignatureSet from_sig(Signature sig) +{ + switch (Signature_KEY(sig)) + { + case KEYsig_inst: + return from_sig_inst(sig); + case KEYmult_sig: + return from_mult_sig(sig); + case KEYsig_use: + return from_sig_use(sig); + case KEYno_sig: + case KEYfixed_sig: + return get_hash_cons_empty_set(); + } +} + +/** + * Returns a single canonical signature set + * @param csig canonical signature + * @return canonical signature set + */ +static CanonicalSignatureSet single_canonical_signature_set(CanonicalSignature* csig) +{ + size_t sig_set_size = sizeof(struct hash_cons_set) + 1 * sizeof(CanonicalSignature *); + CanonicalSignatureSet sig_set = (CanonicalSignatureSet)alloca(sig_set_size); + sig_set->num_elements = 1; + sig_set->elements[0] = csig; + + return new_hash_cons_set(sig_set); +} + +/** + * Comparator for two canonical signature + * @param sig1 first canonical signature + * @param sig2 second canonical signature + * @return integer value representing the comparison + */ +static int canonical_signature_compare(CanonicalSignature *sig1, CanonicalSignature *sig2) +{ + if (sig1->is_input != sig2->is_input) + { + return sig1->is_input - sig2->is_input; + } + + if (sig1->is_var != sig2->is_var) + { + return sig1->is_var - sig2->is_var; + } + + if (tnode_line_number(sig1->source_class) != tnode_line_number(sig2->source_class)) + { + return tnode_line_number(sig1->source_class) - tnode_line_number(sig2->source_class); + } + + if (sig1->num_actuals != sig2->num_actuals) + { + return sig1->num_actuals - sig2->num_actuals; + } + + int i; + for (i = 0; i < sig1->num_actuals; i++) + { + int actual_comp = canonical_type_compare(sig1->actuals[i], sig2->actuals[i]); + if (actual_comp != 0) + { + return actual_comp; + } + } + + return 0; +} + +/** + * Canonical signature set from Type AST + * @param t Type AST + * @return canonical signature set + */ +static CanonicalSignatureSet from_type(Type t) +{ + switch (Type_KEY(t)) + { + case KEYtype_use: + return infer_canonical_signatures(canonical_type(t)); + case KEYtype_inst: + { + Module m = type_inst_module(t); + Declaration mdecl = USE_DECL(module_use_use(m)); + int num_actuals = count_actuals(type_inst_type_actuals(t)); + size_t my_size = num_actuals * (sizeof(CanonicalSignature *)); + CanonicalType **cactuals = (CanonicalType **)alloca(my_size); + + int i = 0; + Type type = first_TypeActual(type_inst_type_actuals(t)); + while (type != NULL) + { + cactuals[i++] = canonical_type_base_type(canonical_type(type)); + type = TYPE_NEXT(type); + } + + CanonicalSignature* csig = new_canonical_signature(true, true, mdecl, num_actuals, cactuals); + return add_hash_cons_set(csig, from_ctype(csig)); + } + case KEYprivate_type: + case KEYno_type: + return single_canonical_signature_set(new_canonical_signature(true, true, type_is_phylum(t) ? module_PHYLUM : module_TYPE, 0, NULL)); + default: + aps_error(t, "Not sure how to find the canonical signature set given Type with Type_KEY of %d", (int)Type_KEY(t)); + return NULL; + } +} + +/** + * Resolve a canonical signature from a Declaration + * @param decl Declaration + * @return Canonical signature set + */ +static CanonicalSignatureSet from_declaration(Declaration decl) +{ + CanonicalSignatureSet re; + switch (Declaration_KEY(decl)) + { + case KEYsome_type_decl: + { + Signature sig = some_type_decl_sig(decl); + if (Signature_KEY(sig) == KEYno_sig) + { + Type t = some_type_decl_type(decl); + re = from_type(t); + } + else + { + re = union_hash_const_set(from_sig(sig), from_type(some_type_decl_type(decl))); + } + + switch (Type_KEY(some_type_decl_type(decl))) + { + case KEYtype_inst: + { + re = substitute_canonical_signature_set_actuals(new_canonical_type_use(decl), re); + break; + } + default: + break; + } + break; + } + case KEYsome_type_formal: + { + Signature sig = some_type_formal_sig(decl); + re = from_sig(sig); + break; + } + default: + aps_error(decl, "Not sure how to find the canonical signature set given Declaration with Declaration_KEY of %d", (int)Declaration_KEY(decl)); + return NULL; + } + + return re; +} + +static CanonicalSignature* join_canonical_signature_actuals(CanonicalType *source_ctype, CanonicalSignature *canonical_sig) +{ + switch (source_ctype->key) + { + case KEY_CANONICAL_FUNC: + { + aps_warning(source_ctype, "Not sure how to run substitution of actuals given function types"); + return canonical_sig; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *ctype_qual = (struct Canonical_qual_type *)source_ctype; + return join_canonical_signature_actuals(ctype_qual->source, join_canonical_signature_actuals(new_canonical_type_use(ctype_qual->decl), canonical_sig)); + } + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *ctype_use = (struct Canonical_use_type *)source_ctype; + + Declaration tdecl = ctype_use->decl; + Declaration mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + + CanonicalType **substituted_actuals = (CanonicalType **)alloca(canonical_sig->num_actuals); + + int i, j, k; + for (i = 0; i < canonical_sig->num_actuals; i++) + { + substituted_actuals[i] = canonical_type_join(source_ctype, canonical_sig->actuals[i], false); + + Declaration f1 = canonical_type_decl(canonical_sig->actuals[i]); + + j = 0; + Declaration f2 = first_Declaration(some_class_decl_type_formals(mdecl)); + while (f2 != NULL) + { + if (f1 == f2) + { + k = 0; + Type ta = first_TypeActual(type_inst_type_actuals(some_type_decl_type(tdecl))); + while (ta != NULL) + { + if (j == k) + { + substituted_actuals[i] = canonical_type_base_type(canonical_type(ta)); + } + + k++; + ta = TYPE_NEXT(ta); + } + } + + j++; + f2 = DECL_NEXT(f2); + } + } + + return new_canonical_signature(canonical_sig->is_input, canonical_sig->is_var, canonical_sig->source_class, canonical_sig->num_actuals, substituted_actuals); + } + default: + fatal_error("Unexpected source canonical type with key of %d", source_ctype->key); + return NULL; + } +} + +static CanonicalSignatureSet join_canonical_signature_set_actuals(CanonicalType* source_ctype, CanonicalSignatureSet sig_set) +{ + CanonicalSignatureSet result = get_hash_cons_empty_set(); + + int i; + for (i = 0; i < sig_set->num_elements; i++) + { + result = add_hash_cons_set(join_canonical_signature_actuals(source_ctype, sig_set->elements[i]), result); + } + + return result; +} + +static CanonicalSignature *substitute_canonical_signature_actuals(CanonicalType *source_ctype, CanonicalSignature *canonical_sig) +{ + switch (source_ctype->key) + { + case KEY_CANONICAL_FUNC: + { + aps_warning(source_ctype, "Not sure how to run substitution of actuals given a function type"); + return canonical_sig; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *ctype_qual = (struct Canonical_qual_type *)source_ctype; + return join_canonical_signature_actuals(ctype_qual->source, substitute_canonical_signature_actuals(new_canonical_type_use(ctype_qual->decl), canonical_sig)); + } + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *ctype_use = (struct Canonical_use_type *)source_ctype; + + Declaration tdecl = ctype_use->decl; + Declaration mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + + CanonicalType **substituted_actuals = (CanonicalType **)alloca(canonical_sig->num_actuals); + + int i, j, k; + for (i = 0; i < canonical_sig->num_actuals; i++) + { + substituted_actuals[i] = canonical_sig->actuals[i]; + + Declaration f1 = canonical_type_decl(canonical_sig->actuals[i]); + + j = 0; + Declaration f2 = first_Declaration(some_class_decl_type_formals(mdecl)); + while (f2 != NULL) + { + if (f1 == f2) + { + k = 0; + Type ta = first_TypeActual(type_inst_type_actuals(some_type_decl_type(tdecl))); + while (ta != NULL) + { + if (j == k) + { + substituted_actuals[i] = canonical_type_base_type(canonical_type(ta)); + } + + k++; + ta = TYPE_NEXT(ta); + } + } + + j++; + f2 = DECL_NEXT(f2); + } + } + + return new_canonical_signature(canonical_sig->is_input, canonical_sig->is_var, canonical_sig->source_class, canonical_sig->num_actuals, substituted_actuals); + } + default: + fatal_error("Unexpected source canonical type with key of %d", source_ctype->key); + return NULL; + } +} + +/** + * Given a canonical signature set and source canonical type, it would go through each and substitute + * @param source source canonical type + * @param sig_set canonical signature set + * @return substituted canonical signature set + */ +static CanonicalSignatureSet substitute_canonical_signature_set_actuals(CanonicalType *source_ctype, CanonicalSignatureSet sig_set) +{ + CanonicalSignatureSet result = get_hash_cons_empty_set(); + + int i; + for (i = 0; i < sig_set->num_elements; i++) + { + result = add_hash_cons_set(substitute_canonical_signature_actuals(source_ctype, sig_set->elements[i]), result); + } + + return result; +} + +/** + * Should accumulate the signatures in a restrictive way not additive manner + * @param ctype canonical type + * @return canonical signature set + */ +CanonicalSignatureSet infer_canonical_signatures(CanonicalType *ctype) +{ + if (!initialized) + { + fatal_error("canonical signature set is not initialized"); + } + + CanonicalSignatureSet result = get_hash_cons_empty_set(); + bool flag = true; + + if (ctype == NULL) + { + return result; + } + + do + { + switch (ctype->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type = (struct Canonical_use_type *)ctype; + + Declaration decl = canonical_use_type->decl; + result = union_hash_const_set(result, from_declaration(decl)); + break; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_type = (struct Canonical_qual_type *)ctype; + + Declaration decl = canonical_qual_type->decl; + result = union_hash_const_set(result, join_canonical_signature_set_actuals(canonical_qual_type->source, from_declaration(decl))); + break; + } + case KEY_CANONICAL_FUNC: + { + struct Canonical_function_type *canonical_function_type = (struct Canonical_function_type *)ctype; + flag = false; + break; + } + default: + break; + } + + // TODO: lets revisit this 12/14/2020 + CanonicalType *base_type = canonical_type_base_type(ctype); + flag &= !(base_type == NULL || base_type == ctype); + ctype = base_type; + + } while (flag); + + return result; +} + +/** + * Initializes the necessary stuff needed to find the canonical signatures + * @param module_TYPE_decl module decl for PHYLUM[] + * @param module_PHYLUM_decl type decl for TYPE[] + */ +void initialize_canonical_signature(Declaration module_TYPE_decl, Declaration module_PHYLUM_decl) +{ + module_TYPE = module_TYPE_decl; + module_PHYLUM = module_PHYLUM_decl; + + initialized = true; +} diff --git a/analyze/canonical-signature.h b/analyze/canonical-signature.h new file mode 100644 index 00000000..fdf7742c --- /dev/null +++ b/analyze/canonical-signature.h @@ -0,0 +1,45 @@ +#ifndef CANONICAL_SIGNATURE_H +#define CANONICAL_SIGNATURE_H + +struct CanonicalSignature_type +{ + bool is_input; + bool is_var; + Declaration source_class; + int num_actuals; + CanonicalType *actuals[]; +}; + +typedef struct CanonicalSignature_type CanonicalSignature; + +typedef HASH_CONS_SET CanonicalSignatureSet; + +/** + * Should accumulate the signatures in a restrictive way not additive manner + * @param ctype canonical type + * @return canonical signature set + */ +CanonicalSignatureSet infer_canonical_signatures(CanonicalType *ctype); + +/** + * Initializes the necessary stuff needed to find the canonical signatures + * @param module_PHYLUM + * @param type_PHYLUM + */ +void initialize_canonical_signature(Declaration module_PHYLUM, Declaration type_PHYLUM); + +/** + * Given a canonical signature, prints it to the file output + * @param untyped untyped canonical type + * @return f FILE output + */ +void print_canonical_signature(void *untyped, FILE *f); + +/** + * Given a canonical signature set, prints it to the file output + * @param untyped untyped canonical type + * @return f FILE output + */ +void print_canonical_signature_set(void *untyped, FILE *f); + +#endif diff --git a/analyze/canonical-type.c b/analyze/canonical-type.c new file mode 100755 index 00000000..f8eef3fc --- /dev/null +++ b/analyze/canonical-type.c @@ -0,0 +1,1091 @@ +#include +#include +#include +#include "aps-ag.h" +#include "aps-debug.h" +#include "jbb-alloc.h" + +static int BUFFER_SIZE = 1000; + +/** + * Joins two canonical type + * @param ctype_outer outer canonical type + * @param ctype_inner inner canonical type + * @param is_base_type flag indicating if we are resolving base type + * @return resulting canonical base type + */ +CanonicalType *canonical_type_join(CanonicalType *ctype_outer, CanonicalType *ctype_inner, bool is_base_type); + +/** + * Hash CanonicalType + * @param arg CanonicalType + * @return hash value + */ +long canonical_type_hash(void *arg) +{ + if (arg == NULL) + { + return 0; + } + + CanonicalType *canonical_type = (CanonicalType *)arg; + + switch (canonical_type->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type = (struct Canonical_use_type *)arg; + return hash_mix(canonical_use_type->key, (long)canonical_use_type->decl); + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_type = (struct Canonical_qual_type *)arg; + return hash_mix(canonical_qual_type->key, hash_mix((long)canonical_qual_type->decl, canonical_type_hash(canonical_qual_type->source))); + } + case KEY_CANONICAL_FUNC: + { + struct Canonical_function_type *canonical_function_type = (struct Canonical_function_type *)arg; + + int index; + long param_types_hash = 0; + for (index = 0; index < canonical_function_type->num_formals; index++) + { + param_types_hash = hash_mix(param_types_hash, canonical_type_hash(canonical_function_type->param_types[index])); + } + + return hash_mix(canonical_function_type->key, hash_mix(canonical_function_type->num_formals, hash_mix(param_types_hash, canonical_type_hash(canonical_function_type->return_type)))); + } + default: + return 0; + } +} + +/** + * Equality test for CanonicalType + * @param a untyped CanonicalType + * @param b untyped CanonicalType + * @return boolean indicating the result of equality + */ +bool canonical_type_equal(void *a, void *b) +{ + if (a == NULL || b == NULL) + { + return false; + } + + CanonicalType *canonical_type_a = (CanonicalType *)a; + CanonicalType *canonical_type_b = (CanonicalType *)b; + + if (canonical_type_a->key != canonical_type_b->key) + { + return false; + } + + switch (canonical_type_a->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type_a = (struct Canonical_use_type *)a; + struct Canonical_use_type *canonical_use_type_b = (struct Canonical_use_type *)b; + + return canonical_use_type_a->decl == canonical_use_type_b->decl; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_type_a = (struct Canonical_qual_type *)a; + struct Canonical_qual_type *canonical_qual_type_b = (struct Canonical_qual_type *)b; + + return (canonical_qual_type_a->decl == canonical_qual_type_b->decl) && + (canonical_qual_type_a->source == canonical_qual_type_b->source); + } + case KEY_CANONICAL_FUNC: + { + struct Canonical_function_type *canonical_function_type_a = (struct Canonical_function_type *)a; + struct Canonical_function_type *canonical_function_type_b = (struct Canonical_function_type *)b; + + if (canonical_function_type_a->num_formals != canonical_function_type_b->num_formals) + { + return false; + } + + bool params_equal = true; + int index; + + for (index = 0; index < canonical_function_type_a->num_formals && params_equal; index++) + { + params_equal &= canonical_type_equal(canonical_function_type_a->param_types[index], canonical_function_type_b->param_types[index]); + } + + return params_equal && (canonical_function_type_a->return_type == canonical_function_type_b->return_type); + } + default: + return false; + } +} + +/** + * Prints canonical type + * @param untyped CanonicalType + * @param f output stream + */ +void print_canonical_type(void *untyped, FILE *f) +{ + if (f == NULL) + { + f = stdout; + } + if (untyped == NULL) + { + fprintf(f, ""); + return; + } + + CanonicalType *canonical_type = (CanonicalType *)untyped; + + switch (canonical_type->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type = (struct Canonical_use_type *)canonical_type; + + fprintf(f, "%s", decl_name(canonical_use_type->decl)); + break; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_type = (struct Canonical_qual_type *)canonical_type; + + print_canonical_type(canonical_qual_type->source, f); + + fprintf(f, "$%s", decl_name(canonical_qual_type->decl)); + break; + } + case KEY_CANONICAL_FUNC: + { + bool started = false; + struct Canonical_function_type *canonical_func_type = (struct Canonical_function_type *)canonical_type; + + fputc('(', f); + int i; + for (i = 0; i < canonical_func_type->num_formals; i++) + { + if (started) + { + fputc(',', f); + } + else + { + started = true; + } + print_canonical_type(canonical_func_type->param_types[i], f); + } + fputc(')', f); + fprintf(f, "=>"); + print_canonical_type(canonical_func_type->return_type, f); + break; + } + default: + break; + } +} + +// Hashcons table for CanonicalTypes +static struct hash_cons_table canonical_type_table = {canonical_type_hash, canonical_type_equal}; + +/** + * Counts number of Declarations + */ +static int count_declarations(Declarations declarations) +{ + switch (Declarations_KEY(declarations)) + { + default: + fatal_error("count_declarations crashed"); + case KEYnil_Declarations: + return 0; + case KEYlist_Declarations: + return 1; + case KEYappend_Declarations: + return count_declarations(append_Declarations_l1(declarations)) + count_declarations(append_Declarations_l2(declarations)); + } +} + +/** + * Creates an instance of Canonical_use + * @param decl + * @return Canonical_use + */ +CanonicalType *new_canonical_type_use(Declaration decl) +{ + struct Canonical_use_type ctype_use = {KEY_CANONICAL_USE, decl}; + void *memory = hash_cons_get(&ctype_use, sizeof(ctype_use), &canonical_type_table); + return (CanonicalType *)memory; +} + +/** + * Creates an instance of Canonical_qual_type + * @param from + * @param decl + * @return Canonical_qual_type + */ +CanonicalType *new_canonical_type_qual(CanonicalType *from, Declaration decl) +{ + struct Canonical_qual_type ctype_qual = {KEY_CANONICAL_QUAL, decl, from}; + void *memory = hash_cons_get(&ctype_qual, sizeof(ctype_qual), &canonical_type_table); + return (CanonicalType *)memory; +} + +/** + * Check if node is inside a module + * @param mdecl module declaration + * @param node node to test if it is in the module + * @return boolean indicating whether node is inside module or not + */ +static bool is_inside_module(Declaration mdecl, void *node) +{ + void *thing = node; + while ((thing = tnode_parent(thing)) != NULL) + { + if (ABSTRACT_APS_tnode_phylum(thing) == KEYDeclaration && Declaration_KEY((Declaration)thing) == KEYmodule_decl && (Declaration)thing == mdecl) + { + return true; + } + } + + return false; +} + +/** + * Clone canonical function type + * @param canonical function type + * @return cloned canonical function type + */ +struct Canonical_function_type *shallow_clone_canonical_function_types(struct Canonical_function_type *canonical_function_type) +{ + size_t my_size = sizeof(struct Canonical_function_type) + canonical_function_type->num_formals * (sizeof(CanonicalType *)); + + struct Canonical_function_type *result = (struct Canonical_function_type *)malloc(my_size); + + result->key = KEY_CANONICAL_FUNC; + result->num_formals = canonical_function_type->num_formals; + result->return_type = canonical_function_type->return_type; + + int i; + for (i = 0; i < canonical_function_type->num_formals; i++) + { + result->param_types[i] = canonical_function_type->param_types[i]; + } + + return result; +} + +/** + * Returns the Declaration member of a CanonicalType + * @param canonical_type source canonical type + * @return declaration part of canonical type + */ +Declaration canonical_type_decl(CanonicalType *canonical_type) +{ + if (canonical_type == NULL) + { + return NULL; + } + + switch (canonical_type->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type = (struct Canonical_use_type *)canonical_type; + return canonical_use_type->decl; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_use_type = (struct Canonical_qual_type *)canonical_type; + return canonical_qual_use_type->decl; + } + default: + aps_error(canonical_type, "Failed to find the decl for CanonicalType key:%d", canonical_type->key); + return NULL; + } +} + +/** + * Check if declaration is a Result of some module without using declaration name (e.g. decl_name() == "Result") + * @param decl declaration + * @return boolean indicating if declaration is a result of some outer module + */ +static bool is_some_result_decl(Declaration decl) +{ + void *current = decl; + Declaration current_decl; + while (current != NULL && (current = tnode_parent(current)) != NULL) + { + switch (ABSTRACT_APS_tnode_phylum(current)) + { + case KEYDeclaration: + current_decl = (Declaration)current; + switch (Declaration_KEY(current_decl)) + { + case KEYmodule_decl: + return some_class_decl_result_type(current_decl) == decl; + default: + break; + } + default: + break; + } + } + + return false; +} + +/** + * Canonical type given a use + * @param use any use + * @return canonical type use + */ +static CanonicalType *canonical_type_use(Use use) +{ + Declaration td = Use_info(use)->use_decl; + + switch (Declaration_KEY(td)) + { + case KEYsome_type_decl: + { + switch (Type_KEY(some_type_decl_type(td))) + { + case KEYno_type: + case KEYtype_inst: + case KEYprivate_type: + return new_canonical_type_use(td); + case KEYtype_use: + { + // This will catch if we are going from D -> Result -> T + // In that case we just want to result Result + Declaration nested_use_decl = USE_DECL(type_use_use(some_type_decl_type(td))); + + if (is_some_result_decl(nested_use_decl)) + { + return new_canonical_type_use(nested_use_decl); + } + + return canonical_type(some_type_decl_type(td)); + } + + case KEYfunction_type: + return canonical_type(some_type_decl_type(td)); + case KEYremote_type: + return canonical_type(remote_type_nodetype(some_type_decl_type(td))); + default: + fatal_error("Unknown type use_decl type key %d", (int)Type_KEY(some_type_decl_type(td))); + return NULL; + } + } + case KEYtype_replacement: // XXX need to rethink this + return canonical_type(type_replacement_as(td)); + case KEYtype_renaming: + return canonical_type(type_renaming_old(td)); + case KEYtype_formal: + return new_canonical_type_use(td); + default: + aps_error(td, "Not sure how handle this decl type while finding canonical type use"); + } + + return NULL; +} + +/** + * Returns actual Type given formal declaration, type declaration and module declaration + * @param tdecl type declaration + * @param mdecl module declaration + * @param formal formal declaration + * @return actual Type (or NULL if it does not find a match) + */ +static Type get_actual_given_formal(Declaration tdecl, Declaration mdecl, Declaration formal) +{ + Declaration f; + Type actual; + + for (f = first_Declaration(some_class_decl_type_formals(mdecl)), + actual = first_TypeActual(type_inst_type_actuals(some_type_decl_type(tdecl))); + f != NULL; f = DECL_NEXT(f), actual = TYPE_NEXT(actual)) + { + if (formal == f) + { + return actual; + } + } + + fatal_error("Not sure how to find the actual given formal"); + + return NULL; +} + +/** + * Canonical type given a qual use + * @param use any use + * @return canonical type qual use + */ +static CanonicalType *canonical_type_qual_use(Use use) +{ + CanonicalType *ctype_outer = canonical_type(qual_use_from(use)); + CanonicalType *ctype_inner = canonical_type_use(use); + + return canonical_type_join(ctype_outer, ctype_inner, false); +} + +/** + * Canonical type given a function type + * @param use any use + * @return canonical type qual use + */ +static CanonicalType *canonical_type_function(Type t) +{ + int num_formals = count_declarations(function_type_formals(t)); + CanonicalType *return_type = canonical_type(function_type_return_type(t)); + + size_t my_size = sizeof(struct Canonical_function_type) + num_formals * (sizeof(CanonicalType *)); + + struct Canonical_function_type *ctype_function = (struct Canonical_function_type *)alloca(my_size); + + ctype_function->key = KEY_CANONICAL_FUNC; + ctype_function->num_formals = num_formals; + ctype_function->return_type = return_type; + + int index = 0; + Declaration f = first_Declaration(function_type_formals(t)); + + while (f != NULL) + { + switch (Declaration_KEY(f)) + { + case KEYseq_formal: + { + fatal_error("Not sure how to handle KEYseq_formal"); + ctype_function->param_types[index++] = canonical_type(seq_formal_type(f)); + break; + } + case KEYnormal_formal: + { + ctype_function->param_types[index++] = canonical_type(normal_formal_type(f)); + break; + } + default: + fatal_error("Not sure to handle the formal while finding canonical function type"); + return NULL; + } + + f = DECL_NEXT(f); + } + + void *memory = hash_cons_get(ctype_function, my_size, &canonical_type_table); + + return (CanonicalType *)memory; +} + +/** + * Converts a type into a canonical type + * @param t Type + * @return CanonicalType + */ +CanonicalType *canonical_type(Type t) +{ + if (t == NULL) + { + aps_warning(t, "NULL type was passed into canonical_type"); + return NULL; + } + + switch (Type_KEY(t)) + { + case KEYremote_type: + return canonical_type(remote_type_nodetype(t)); + case KEYtype_inst: + fatal_error("CanonicalType requested for type instance"); + return NULL; + case KEYtype_use: + { + Use use = type_use_use(t); + switch (Use_KEY(use)) + { + case KEYuse: + return canonical_type_use(use); + case KEYqual_use: + return canonical_type_qual_use(use); + default: + aps_error(t, "Case of type use %d is not implemented in canonical_type() for use", (int)Use_KEY(use)); + return NULL; + } + } + case KEYfunction_type: + return canonical_type_function(t); + default: + aps_error(t, "Case of type %d is not implemented in canonical_type()", (int)Type_KEY(t)); + return NULL; + } +} + +/** + * Returns the base type of a canonical type + * @param canonicalType + * @return base type of a canonicalType + */ +CanonicalType *canonical_type_base_type(CanonicalType *ctype) +{ + switch (ctype->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *ctype_use = (struct Canonical_use_type *)ctype; + Declaration decl = ctype_use->decl; + + switch (Declaration_KEY(decl)) + { + case KEYsome_type_formal: + return ctype; + case KEYsome_type_decl: + { + Type t = some_type_decl_type(decl); + + switch (Type_KEY(t)) + { + case KEYno_type: + case KEYprivate_type: + return ctype; // No change + case KEYtype_use: + return canonical_type_base_type(canonical_type(t)); + case KEYtype_inst: + { + Declaration tdecl = decl; + Declaration mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + if (module_decl_generating(mdecl)) + { + return ctype; // No change + } + else + { + return canonical_type_join(new_canonical_type_use(decl), canonical_type_base_type(new_canonical_type_use(some_class_decl_result_type(mdecl))), true); + } + } + default: + break; + } + } + default: + fatal_error("canonical_type_base_type failed"); + } + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *ctype_qual = (struct Canonical_qual_type *)ctype; + Declaration decl = ctype_qual->decl; + + switch (Declaration_KEY(decl)) + { + case KEYsome_type_formal: + return ctype; + case KEYsome_type_decl: + { + Type t = some_type_decl_type(decl); + + switch (Type_KEY(t)) + { + case KEYno_type: + case KEYprivate_type: + return ctype; // No change + case KEYtype_use: + return canonical_type_base_type(canonical_type(t)); + case KEYtype_inst: + { + Declaration tdecl = decl; + Declaration mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + if (module_decl_generating(mdecl)) + { + return ctype; // No change + } + else + { + return canonical_type_join(ctype_qual->source, canonical_type_join(new_canonical_type_use(decl), canonical_type_base_type(new_canonical_type_use(some_class_decl_result_type(mdecl))), true), true); + } + } + default: + break; + } + } + default: + break; + } + } + case KEY_CANONICAL_FUNC: + { + struct Canonical_function_type *canonical_type_function = (struct Canonical_function_type *)ctype; + struct Canonical_function_type *result = shallow_clone_canonical_function_types(canonical_type_function); + + size_t my_size = sizeof(struct Canonical_function_type) + canonical_type_function->num_formals * (sizeof(CanonicalType *)); + + result->return_type = canonical_type_base_type(result->return_type); + int i; + for (i = 0; i < result->num_formals; i++) + { + result->param_types[i] = canonical_type_base_type(result->param_types[i]); + } + + void *memory = hash_cons_get(result, my_size, &canonical_type_table); + return (CanonicalType *)memory; + } + default: + fatal_error("canonical_type_base_type failed"); + return NULL; + } +} + +/** + * Combine canonical type into qual types safely and recursively + * Note that structure of two canonical types dictates this + * @param ctype_left + * @param ctype_right + * @return combined canonical types + */ +static CanonicalType *canonical_type_left_refactor(CanonicalType *ctype_left, CanonicalType *ctype_right) +{ + switch (ctype_right->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *ctype_right_use = (struct Canonical_use_type *)ctype_right; + return new_canonical_type_qual(ctype_left, ctype_right_use->decl); + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *ctype_right_qual = (struct Canonical_qual_type *)ctype_right; + return new_canonical_type_qual(canonical_type_left_refactor(ctype_left, ctype_right_qual->source), ctype_right_qual->decl); + } + default: + { + aps_error(ctype_right, "Not sure how to do a canonical_type_left_refactor between %d and %d", ctype_left->key, ctype_right->key); + return NULL; + } + } +} + +/** + * Join of any type of canonical type with a function canonical type + * @param ctype_outer outer canonical type any + * @param ctype_inner inner canonical type function + * @param is_base_type + * @return resulting canonical base type + */ +static CanonicalType *canonical_type_any_function_join(CanonicalType *ctype_outer, struct Canonical_function_type *ctype_inner, bool is_base_type) +{ + struct Canonical_function_type *canonical_type_function = (struct Canonical_function_type *)shallow_clone_canonical_function_types(ctype_inner); + + size_t my_size = sizeof(struct Canonical_function_type) + canonical_type_function->num_formals * (sizeof(CanonicalType *)); + + canonical_type_function->return_type = canonical_type_join(ctype_outer, canonical_type_function->return_type, is_base_type); + int i; + for (i = 0; i < canonical_type_function->num_formals; i++) + { + canonical_type_function->param_types[i] = canonical_type_join(ctype_outer, canonical_type_function->param_types[i], is_base_type); + } + + void *memory = hash_cons_get(canonical_type_function, my_size, &canonical_type_table); + + return (CanonicalType *)memory; +} + +static Declaration get_module(Declaration decl) +{ + void *thing = decl; + while ((thing = tnode_parent(thing)) != NULL) + { + if (ABSTRACT_APS_tnode_phylum(thing) == KEYDeclaration && Declaration_KEY((Declaration)thing) == KEYmodule_decl) + { + return thing; + } + } + return NULL; +} + +/** + * Join of two canonical type use + * @param outer canonical type use + * @param inner canonical type use + * @param is_base_type true means base type requested, false is the opposite + * @return resulting canonical type + */ +static CanonicalType *canonical_type_use_use_join(struct Canonical_use_type *ctype_outer, struct Canonical_use_type *ctype_inner, bool is_base_type) +{ + Declaration mdecl = NULL; + Declaration tdecl = NULL; + + // Tries to resolve mdecl and tdecl from ctype_outer + Declaration some_decl = canonical_type_decl((CanonicalType *)ctype_outer); + switch (Declaration_KEY(some_decl)) + { + case KEYsome_type_decl: + { + tdecl = some_decl; + mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + break; + } + default: + { + char outer_type_to_str[BUFFER_SIZE]; + char inner_type_to_str[BUFFER_SIZE]; + memset(outer_type_to_str, 0, sizeof(outer_type_to_str)); // Ensure null-termination + memset(inner_type_to_str, 0, sizeof(inner_type_to_str)); // Ensure null-termination + FILE *f; + + f = fmemopen(outer_type_to_str, sizeof(outer_type_to_str), "w"); + print_canonical_type(ctype_outer, f); + fclose(f); + + f = fmemopen(inner_type_to_str, sizeof(inner_type_to_str), "w"); + print_canonical_type(ctype_inner, f); + fclose(f); + + aps_warning(NULL, "Not sure how to handle this type of canonical type while joining use with use %s and %s", outer_type_to_str, inner_type_to_str); + return (CanonicalType *)ctype_inner; + } + } + + Declaration decl = ctype_inner->decl; + + // If decl is not inside the module then short-circuit + if (!is_inside_module(mdecl, decl)) + { + return (CanonicalType *)ctype_inner; + } + + if (!is_base_type) + { + // If decl is the Result of module then return type decl + if (some_class_decl_result_type(mdecl) == decl) + { + return new_canonical_type_use(tdecl); + } + } + + // Going deeper will be a dead-end or G0$Result + if (module_decl_generating(mdecl) && decl == some_class_decl_result_type(mdecl)) + { + return (CanonicalType *)ctype_outer; + } + + switch (Declaration_KEY(decl)) + { + case KEYsome_type_formal: + return canonical_type(get_actual_given_formal(tdecl, mdecl, decl)); + case KEYsome_type_decl: + { + Type t = some_type_decl_type(decl); + switch (Type_KEY(t)) + { + case KEYno_type: + case KEYprivate_type: + case KEYtype_inst: + return new_canonical_type_qual((CanonicalType *)ctype_outer, decl); + case KEYtype_use: + return canonical_type_join((CanonicalType *)ctype_outer, canonical_type(t), is_base_type); + default: + aps_error(t, "Case of type use %d is not implemented in canonical_type_base_type() for type", (int)Type_KEY(t)); + return NULL; + } + } + default: + aps_error(tdecl, "Case of type use %d is not implemented in canonical_type_base_type() for type", (int)Declaration_KEY(tdecl)); + return NULL; + } +} + +/** + * Join of two canonical type use + * @param outer canonical type qual + * @param inner canonical type use + * @return resulting canonical base type + */ +static CanonicalType *canonical_type_qual_use_join(struct Canonical_qual_type *ctype_outer, struct Canonical_use_type *ctype_inner, bool is_base_type) +{ + Declaration mdecl = NULL; + Declaration tdecl = NULL; + + // Tries to resolve mdecl and tdecl from ctype_outer + Declaration some_decl = canonical_type_decl((CanonicalType *)ctype_outer); + switch (Declaration_KEY(some_decl)) + { + case KEYsome_type_decl: + { + tdecl = some_decl; + mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + break; + } + default: + fatal_error("Not sure what type of canonical type it is"); + } + + Declaration decl = ctype_inner->decl; + + // If decl is not inside the module then short-circuit + if (!is_inside_module(mdecl, decl)) + { + return new_canonical_type_use(decl); + } + + if (!is_base_type) + { + // If decl is the Result of module then return type decl + if (some_class_decl_result_type(mdecl) == decl) + { + return new_canonical_type_use(tdecl); + } + } + + if (module_decl_generating(mdecl) && decl == some_class_decl_result_type(mdecl)) + { + return (CanonicalType *)ctype_outer; + } + + switch (Declaration_KEY(decl)) + { + case KEYsome_type_decl: + { + Type tdecl_type = some_type_decl_type(decl); + switch (Type_KEY(tdecl_type)) + { + case KEYtype_inst: + { + Declaration nested_mdecl = USE_DECL(module_use_use(type_inst_module(tdecl_type))); + + if (module_decl_generating(nested_mdecl)) + { + CanonicalType *first = canonical_type_join(new_canonical_type_use(ctype_outer->decl), new_canonical_type_use(decl), is_base_type); + CanonicalType *second = canonical_type_join(ctype_outer->source, first, is_base_type); + return second; + } + else + { + CanonicalType *first = canonical_type_join(new_canonical_type_use(decl), new_canonical_type_use(some_class_decl_result_type(nested_mdecl)), is_base_type); + CanonicalType *second = canonical_type_join(new_canonical_type_use(ctype_outer->decl), first, is_base_type); + CanonicalType *third = canonical_type_join(ctype_outer->source, second, is_base_type); + + return third; + } + } + case KEYtype_use: + return canonical_type_join((CanonicalType *)ctype_outer, canonical_type(tdecl_type), is_base_type); + case KEYno_type: + case KEYprivate_type: + return new_canonical_type_qual((CanonicalType *)ctype_outer, decl); + default: + aps_error(tdecl_type, "Unexpected type %d in resolve_canonical_base_type()", (int)Type_KEY(tdecl_type)); + return NULL; + } + } + case KEYsome_type_formal: + return canonical_type_join(ctype_outer->source, canonical_type(get_actual_given_formal(tdecl, mdecl, decl)), is_base_type); + default: + aps_error(decl, "Unexpected decl %d in resolve_canonical_base_type()", (int)Declaration_KEY(decl)); + return NULL; + } +} + +/** + * Join of two canonical type use + * @param outer canonical type qual + * @param inner canonical type qual + * @return resulting canonical base type + */ +static CanonicalType *canonical_type_qual_qual_join(struct Canonical_qual_type *ctype_outer, struct Canonical_qual_type *ctype_inner, bool is_base_type) +{ + Declaration mdecl = NULL; + Declaration tdecl = NULL; + + // Tries to resolve mdecl and tdecl from ctype_outer + Declaration some_decl = canonical_type_decl((CanonicalType *)ctype_outer); + switch (Declaration_KEY(some_decl)) + { + case KEYsome_type_decl: + { + tdecl = some_decl; + mdecl = USE_DECL(module_use_use(type_inst_module(some_type_decl_type(tdecl)))); + break; + } + default: + fatal_error("Not sure what type of canonical type it is"); + } + + Declaration decl = ctype_inner->decl; + + // If decl is not inside the module then short-circuit + if (!is_inside_module(mdecl, decl)) + { + return new_canonical_type_use(decl); + } + + if (!is_base_type) + { + // If decl is the Result of module then return type decl + if (some_class_decl_result_type(mdecl) == decl) + { + return new_canonical_type_use(tdecl); + } + } + + // Going deeper will be a dead-end or G0$Result + if (module_decl_generating(mdecl) && decl == some_class_decl_result_type(mdecl)) + { + return (CanonicalType *)ctype_outer; + } + + switch (Declaration_KEY(decl)) + { + case KEYsome_type_decl: + { + Type tdecl_type = some_type_decl_type(decl); + switch (Type_KEY(tdecl_type)) + { + case KEYtype_inst: + { + Declaration nested_mdecl = USE_DECL(module_use_use(type_inst_module(tdecl_type))); + + if (module_decl_generating(nested_mdecl)) + { + CanonicalType *first = canonical_type_join(new_canonical_type_use(ctype_outer->decl), new_canonical_type_use(decl), is_base_type); + CanonicalType *second = canonical_type_join(ctype_outer->source, first, is_base_type); + + return second; + } + else + { + CanonicalType *first = canonical_type_join(new_canonical_type_use(decl), new_canonical_type_use(some_class_decl_result_type(nested_mdecl)), is_base_type); + CanonicalType *second = canonical_type_join(ctype_inner->source, first, is_base_type); + CanonicalType *third = canonical_type_join(new_canonical_type_use(ctype_outer->decl), second, is_base_type); + CanonicalType *fourth = canonical_type_join(ctype_outer->source, third, is_base_type); + + return fourth; + } + } + case KEYtype_use: + return canonical_type_join((CanonicalType *)ctype_outer, canonical_type(tdecl_type), is_base_type); + case KEYno_type: + case KEYprivate_type: + return canonical_type_left_refactor((CanonicalType *)ctype_outer, (CanonicalType *)ctype_inner); + default: + aps_error(tdecl_type, "Unexpected type %d in resolve_canonical_base_type()", (int)Type_KEY(tdecl_type)); + return NULL; + } + } + case KEYsome_type_formal: + return canonical_type_join(ctype_outer->source, canonical_type(get_actual_given_formal(tdecl, mdecl, decl)), is_base_type); + default: + aps_error(decl, "Unexpected decl %d in resolve_canonical_base_type()", (int)Declaration_KEY(decl)); + return NULL; + } +} + +/** + * Joining canonical types in case-by-case analysis fashion + * @param ctype_outer outer canonical type + * @param ctype_inner inner canonical type + * @param is_base_type true means base type requested, false is the opposite + * @return resulting canonical base type + */ +CanonicalType *canonical_type_join(CanonicalType *ctype_outer, CanonicalType *ctype_inner, bool is_base_type) +{ + switch (ctype_outer->key) + { + case KEY_CANONICAL_USE: + switch (ctype_inner->key) + { + case KEY_CANONICAL_USE: + return canonical_type_use_use_join((struct Canonical_use_type *)ctype_outer, (struct Canonical_use_type *)ctype_inner, is_base_type); + case KEY_CANONICAL_FUNC: + return canonical_type_any_function_join(ctype_outer, (struct Canonical_function_type *)ctype_inner, is_base_type); + case KEY_CANONICAL_QUAL: + return canonical_type_left_refactor(ctype_outer, ctype_inner); + default: + fatal_error("canonical_type_join failed"); + } + case KEY_CANONICAL_QUAL: + switch (ctype_inner->key) + { + case KEY_CANONICAL_USE: + return canonical_type_qual_use_join((struct Canonical_qual_type *)ctype_outer, (struct Canonical_use_type *)ctype_inner, is_base_type); + case KEY_CANONICAL_QUAL: + return canonical_type_qual_qual_join((struct Canonical_qual_type *)ctype_outer, (struct Canonical_qual_type *)ctype_inner, is_base_type); + case KEY_CANONICAL_FUNC: + return canonical_type_any_function_join(ctype_outer, (struct Canonical_function_type *)ctype_inner, is_base_type); + } + default: + fatal_error("canonical_type_join failed"); + return NULL; + } +} + +/** + * Comparator for two canonical types + * @param ctype1 first canonical type + * @param ctype2 second canonical type + * @return integer value representing the comparison + */ +int canonical_type_compare(CanonicalType *ctype1, CanonicalType *ctype2) +{ + if (ctype1->key != ctype2->key) + { + return ctype1->key - ctype2->key; + } + + switch (ctype1->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type1 = (struct Canonical_use_type *)ctype1; + struct Canonical_use_type *canonical_use_type2 = (struct Canonical_use_type *)ctype2; + + return tnode_line_number(canonical_use_type1->decl) - tnode_line_number(canonical_use_type2->decl); + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_type1 = (struct Canonical_qual_type *)ctype1; + struct Canonical_qual_type *canonical_qual_type2 = (struct Canonical_qual_type *)ctype2; + + if (tnode_line_number(canonical_qual_type1->decl) != tnode_line_number(canonical_qual_type2->decl)) + { + return tnode_line_number(canonical_qual_type1->decl) - tnode_line_number(canonical_qual_type2->decl); + } + + return canonical_type_compare(canonical_qual_type1->source, canonical_qual_type2->source); + } + case KEY_CANONICAL_FUNC: + { + struct Canonical_function_type *canonical_function_type1 = (struct Canonical_function_type *)ctype1; + struct Canonical_function_type *canonical_function_type2 = (struct Canonical_function_type *)ctype2; + + if (canonical_function_type1->num_formals != canonical_function_type2->num_formals) + { + return canonical_function_type1->num_formals - canonical_function_type2->num_formals > 0 ? 1 : -1; + } + + int return_type_comp = canonical_type_compare(canonical_function_type1->return_type, canonical_function_type2->return_type); + if (return_type_comp != 0) + { + return return_type_comp; + } + + int i; + for (i = 0; i < canonical_function_type1->num_formals; i++) + { + int formal_type_comp = canonical_type_compare(canonical_function_type1->param_types[i], canonical_function_type2->param_types[i]); + if (formal_type_comp != 0) + { + return formal_type_comp; + } + } + + return 0; + } + default: + fatal_error("canonical_type_compare failed"); + return 0; + } +} diff --git a/analyze/canonical-type.h b/analyze/canonical-type.h new file mode 100755 index 00000000..68338162 --- /dev/null +++ b/analyze/canonical-type.h @@ -0,0 +1,97 @@ +#ifndef CANONICAL_TYPE_H +#define CANONICAL_TYPE_H + +#define KEY_CANONICAL_FUNC 0 +#define KEY_CANONICAL_USE 1 +#define KEY_CANONICAL_QUAL 2 + +struct canonicalTypeBase +{ + int key; +}; +typedef struct canonicalTypeBase CanonicalType; + +// Type function_type(Declarations formals,Declarations return_values) +struct Canonical_function_type +{ + int key; /* KEY_CANONICAL_FUNC */ + int num_formals; + CanonicalType *return_type; + CanonicalType *param_types[]; +}; + +struct Canonical_qual_type +{ + int key; /* KEY_CANONICAL_QUAL */ + Declaration decl; + CanonicalType *source; +}; + +struct Canonical_use_type +{ + int key; /* KEY_CANONICAL_USE */ + Declaration decl; +}; + +typedef struct CanonicalTypeSet_type CanonicalTypeSet; + +/** + * Given an AST Type, it calculates a hashconsed canonical type + * @param type Type AST + * @return Hashconsed canonical type + */ +CanonicalType *canonical_type(Type type); + +/** + * Given a hashconsed canonical type, it returns a canonical base type + * @param canonical_type Hashconsed canonical type + * @return Hashconsed canonical base type + */ +CanonicalType *canonical_type_base_type(CanonicalType *canonical_type); + +/** + * Joins two canonical types and returns a hashconsed canonical type + * @param ctype_outer outer canonical type + * @param ctype_inner inner canonical type + * @param is_base_type flag indicating whether result should be base_type or not + * @return Hashconsed canonical type + */ +CanonicalType *canonical_type_join(CanonicalType *ctype_outer, CanonicalType *ctype_inner, bool is_base_type); + +/** + * Creates a new canonical type use + * @param decl Declaration + * @return Hashconsed canonical type use + */ +CanonicalType *new_canonical_type_use(Declaration decl); + +/** + * Compares two canonical types + * @param ctype1 Canonical type A + * @param ctype2 Canonical type B + * @return Integer value representing comparison of two canonical types + */ +int canonical_type_compare(CanonicalType *ctype1, CanonicalType *ctype2); + +/** + * Given a canonical type, it returns a Declaration + * @param canonical_type Canonical type + * @return Declaration + */ +Declaration canonical_type_decl(CanonicalType *canonical_type); + +/** + * Given an untyped CanonicalType, it returns a its hash value + * @arg untyped CanonicalType + * @return hash value + */ +long canonical_type_hash(void *arg); + +/** + * Given a canonical type, prints it to the file output + * @param untyped untyped canonical type + * @return f FILE output + */ +void print_canonical_type(void *untyped, FILE *f); + +#endif diff --git a/analyze/jbb-alist.h b/analyze/jbb-alist.h index 158be8e1..e320f2e0 100644 --- a/analyze/jbb-alist.h +++ b/analyze/jbb-alist.h @@ -1,3 +1,6 @@ +#ifndef JBB_ALIST_H +#define JBB_ALIST_H + typedef struct alist *ALIST; extern ALIST acons(void *key, void *value, ALIST rest); extern void *assoc(void *key, ALIST alist); @@ -11,3 +14,4 @@ extern void* alist_key(ALIST); extern void* alist_value(ALIST); extern ALIST alist_next(ALIST); +#endif diff --git a/analyze/jbb-table.h b/analyze/jbb-table.h index 9c7621b2..c023b5d7 100644 --- a/analyze/jbb-table.h +++ b/analyze/jbb-table.h @@ -1,3 +1,6 @@ +#ifndef JBB_TABLE_H +#define JBB_TABLE_H + typedef struct table *TABLE; extern TABLE new_table(); @@ -7,3 +10,5 @@ extern void set(TABLE,void *key, void *value); #endif extern void *table_get(TABLE,void *key); extern void table_set(TABLE,void *key, void *value); + +#endif diff --git a/analyze/jbb-vector.h b/analyze/jbb-vector.h index 34ede06b..39612ad2 100644 --- a/analyze/jbb-vector.h +++ b/analyze/jbb-vector.h @@ -1,2 +1,7 @@ +#ifndef JBB_VECTOR_H +#define JBB_VECTOR_H + #define VECTOR(type) struct { type *array; int length; } #define VECTORALLOC(v,type,n) (v).array=(type *)HALLOC(n*sizeof(type)); (v).length=n + +#endif diff --git a/aps2scala/Makefile b/aps2scala/Makefile index d182fa53..f2b08a08 100644 --- a/aps2scala/Makefile +++ b/aps2scala/Makefile @@ -1,22 +1,28 @@ CPP=g++ -CPPFLAGS=-Wall -g -DUSING_CXX -I../parse -I../analyze +CPPFLAGS=-Wall -g -Wno-unused-variable -DUSING_CXX -DAPS2SCALA -I../parse -I../analyze -I../codegen -I../utilities -APS2SCALAOBJS = aps2scala.o dump-scala.o implement.o dyn-impl.o static-impl.o -APS2SCALALIBS = ../lib/aps-lib.o ../lib/aps-ag.a +APS2SCALAOBJS = aps2scala.o dump-scala.o implement.o dyn-impl.o static-impl.o static-scc-impl.o synth-impl.o +APS2SCALALIBS = ../lib/aps-lib.o ../lib/aps-ag.a ../utilities/utilities.o aps2scala : ${APS2SCALAOBJS} ${APS2SCALALIBS} ${CPP} ${CPPFLAGS} ${APS2SCALAOBJS} ${APS2SCALALIBS} -o aps2scala -${APS2SCALAOBJS} : implement.h dump-scala.h - -%.cc : RCS/%.cc,v - co $< - -%.h : RCS/%.h,v - co $< +${APS2SCALAOBJS} : dump-scala.h install: aps2scala mv aps2scala ../bin/. +synth-impl.o : ../codegen/synth-impl.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ + +static-impl.o : ../codegen/static-impl.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ + +static-scc-impl.o : ../codegen/static-scc-impl.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ + +implement.o : ../codegen/implement.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ + clean: rm -f aps2scala *.o core diff --git a/aps2scala/aps2scala.cc b/aps2scala/aps2scala.cc index 1d7c104c..7e21fa8e 100644 --- a/aps2scala/aps2scala.cc +++ b/aps2scala/aps2scala.cc @@ -12,8 +12,6 @@ extern "C" { #include "implement.h" #include "version.h" -using namespace std; - extern "C" { int callset_AI(Declaration module, STATE *s) { return 0; } @@ -30,6 +28,7 @@ void usage() { fprintf(stderr," -DH list debugging flags\n"); fprintf(stderr," -V increase verbosity of generation code\n"); fprintf(stderr," -G add Debug calls for every function\n"); + fprintf(stderr," -C SCC chunk static scheduling\n"); fprintf(stderr," -p path set the APSPATH (overriding env. variable)\n"); exit(1); } @@ -39,7 +38,29 @@ extern int aps_yyparse(void); } Implementation* impl; -bool static_schedule = 0; +bool static_schedule = false; +bool is_tree_only_program = false; +bool synth_implementation = false; + +static void* program_is_tree_only(void *scope, void *node) { + if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { + Declaration decl = (Declaration)node; + switch (Declaration_KEY(decl)) { + case KEYsome_class_decl: { + is_tree_only_program |= first_Declaration(some_class_decl_type_formals(decl)) == NULL; + return NULL; + } + default: + break; + } + } + + return scope; +} + +bool should_include_ast_for_objects() { + return !static_schedule && !is_tree_only_program; +} int main(int argc,char **argv) { argv0 = argv[0]; @@ -58,11 +79,20 @@ int main(int argc,char **argv) { } else if (streq(argv[i],"-S") || streq(argv[i],"--static")) { static_schedule = true; continue; + } else if (streq(argv[i],"-C") || streq(argv[i],"--static-scc")) { + static_schedule = true; + static_scc_schedule = true; + continue; + } else if (streq(argv[i],"-F") || streq(argv[i],"--synth")) { + synth_implementation = true; + anc_analysis = true; + continue; } else if (streq(argv[i],"-V") || streq(argv[i],"--verbose")) { ++verbose; continue; } else if (streq(argv[i],"-G") || streq(argv[i],"--debug")) { ++debug; + include_comments = true; continue; } else if (streq(argv[i],"-p") || streq(argv[i],"--apspath")) { set_aps_path(argv[++i]); @@ -83,13 +113,18 @@ int main(int argc,char **argv) { bind_Program(p); aps_check_error("binding"); type_Program(p); + traverse_Program(program_is_tree_only, p, p); aps_check_error("type"); - if (static_schedule) { - impl = static_impl; + if (static_schedule || synth_implementation) { + if (static_schedule) { + impl = static_scc_schedule ? static_scc_impl : static_impl; + } else { + impl = synth_impl; + } analyze_Program(p); aps_check_error("analysis"); if (!impl) { - cerr << "Warning: static scheduling not implemented: reverting to dynamic..." << endl; + std::cerr << "Warning: static scheduling not implemented: reverting to dynamic..." << std::endl; impl = dynamic_impl; } } else { @@ -97,12 +132,16 @@ int main(int argc,char **argv) { } char* outfilename = str2cat(argv[i],".scala"); - ofstream out(outfilename); + std::ofstream out(outfilename); + if (out.fail()) + { + std::cerr << "Failed to open output file " << outfilename << std::endl; + dump_scala_Program(p,std::cout); + exit(1); + } + dump_scala_Program(p,out); + out.close(); } exit(0); } - - - - diff --git a/aps2scala/dump-scala.cc b/aps2scala/dump-scala.cc index c2ae012e..a709e6d8 100644 --- a/aps2scala/dump-scala.cc +++ b/aps2scala/dump-scala.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -18,8 +19,6 @@ String get_code_name(Symbol); #include "implement.h" #include "version.h" -using namespace std; - using std::string; // extra decl_flags flags: @@ -27,6 +26,8 @@ using std::string; extern int aps_yylineno; int nesting_level = 0; +bool activate_static_circular = false; +bool include_comments = false; ostream& operator<<(ostream&o,Symbol s) { @@ -100,7 +101,7 @@ void dump_scala_Program(Program p,std::ostream&oss) { aps_yyfilename = (char *)program_name(p); string id = program_id(aps_yyfilename); - oss << "// Generated by aps2scala version " << VERSION << endl; + oss << "// Generated by aps2scala version " << VERSION << std::endl; // oss << "import edu.uwm.cs.aps._;" << endl; // oss << "import APS._;\n"; oss << "import basic_implicit._;\n"; @@ -116,7 +117,7 @@ void dump_scala_Program(Program p,std::ostream&oss) switch(Unit_KEY(u)) { case KEYno_unit: break; case KEYwith_unit: - oss << "import " << program_id((char*)(with_unit_name(u))) + oss << indent() << "import " << program_id((char*)(with_unit_name(u))) << "_implicit._;\n"; break; case KEYdecl_unit: @@ -139,7 +140,7 @@ void dump_scala_Program(Program p,std::ostream&oss) --nesting_level; oss << "}\n"; - oss << "import " << id << "_implicit._;\n" << endl; + oss << "import " << id << "_implicit._;\n" << std::endl; for (Unit u = first_Unit(program_units(p)); u; u = UNIT_NEXT(u)) { switch(Unit_KEY(u)) { @@ -181,11 +182,12 @@ void dump_formal(Declaration formal, ostream&os) if (KEYseq_formal == Declaration_KEY(formal)) os << "*"; } -void dump_function_prototype(string name, Type ft, ostream& oss) +void dump_function_prototype(string name, Type ft, bool dump_anchor_actual, ostream& oss) { oss << indent() << "val v_" << name << " = f_" << name << " _;\n"; oss << indent() << "def f_" << name << "("; + bool started = false; Declarations formals = function_type_formals(ft); for (Declaration formal = first_Declaration(formals); formal != NULL; @@ -193,6 +195,13 @@ void dump_function_prototype(string name, Type ft, ostream& oss) if (formal != first_Declaration(formals)) oss << ", "; dump_formal(formal,oss); + started = true; + } + if (dump_anchor_actual) { + if (started) { + oss << ", "; + } + oss << "anchor: Any"; } oss << ")"; @@ -240,7 +249,7 @@ void dump_pattern_prototype(string name, Type ft, ostream& oss) void dump_function_debug_start(const char *name, Type ft, ostream& os) { Declarations formals = function_type_formals(ft); - os << indent() << "try {" << endl; + os << indent() << "try {" << std::endl; ++nesting_level; os << indent() << "Debug.begin(\"" << name << "(\""; bool started = false; @@ -256,11 +265,12 @@ void dump_function_debug_start(const char *name, Type ft, ostream& os) void dump_debug_end(ostream& os) { --nesting_level; - os << indent() << "} finally { Debug.end(); }" << endl; + os << indent() << "} finally { Debug.end(); }" << std::endl; } // Output Scala pattern for APS pattern +int formal_count = 0; static void dump_pattern_call(Pattern p, Pattern result, const char* resultS, ostream& os) { Pattern pf = pattern_call_func(p); @@ -281,9 +291,11 @@ static void dump_pattern_call(Pattern p, Pattern result, const char* resultS, os } else { os << resultS; } + formal_count = 0; for (Pattern pa = first_PatternActual(pactuals); pa ; pa = PAT_NEXT(pa)) { os << ","; dump_Pattern(pa,os); + formal_count++; } os << ")"; } @@ -350,7 +362,7 @@ void dump_Pattern(Pattern p, ostream& os) { Declaration f = pattern_var_formal(p); string n = symbol_name(def_name(formal_def(f))); - if (n == "_") os << "_"; + if (n == "_") os << "v_" << std::to_string(formal_count); else { os << "v_" << n; // Generating a type causes Scala to warn about erasure @@ -423,7 +435,8 @@ bool type_is_syntax(Type t) static Declaration find_basic_decl(string name) { - Program p = find_Program(make_string("basic")); + char* basic = (char*)"basic"; + Program p = find_Program(make_string(basic)); Units us = program_units(p); for (Unit u = first_Unit(us); u; u = UNIT_NEXT(u)) { switch (Unit_KEY(u)) { @@ -444,7 +457,7 @@ static Declaration find_basic_decl(string name) // Currently inheritances does the transfer of values, // but we need this to do the transfer of types: -class ServiceRecord : public map { +class ServiceRecord : public std::map { public: void add(Declaration d) { int namespaces = decl_namespaces(d); @@ -655,7 +668,7 @@ void dump_some_attribute(Declaration d, string i, bool is_cir = direction_is_circular(dir); bool is_attr = Declaration_KEY(d) == KEYattribute_decl; - ostringstream tmp; + std::ostringstream tmp; if (nt == 0) { tmp << "Null"; } else { @@ -664,16 +677,18 @@ void dump_some_attribute(Declaration d, string i, string ntt = tmp.str(); // tmp.str() = ""; //XXX: doesn't work. I can't see how to reset stream. - ostringstream tmps; + std::ostringstream tmps; tmps << "[" << ntt << "," << vt << "]"; string typeargs = tmps.str(); oss << indent() << "private class E" << i << "_" << name - << "(anchor : " << ntt << ") extends Evaluation" << typeargs + << "(anchor : " << ntt << ")\n" + << indent(nesting_level + 1) << "extends Evaluation" << typeargs << "(anchor," << (nt == 0 ? "" : "anchor.toString()+\".\"+") << "\"" << name << "\")" - << (is_cir ? " with CircularEvaluation" + tmps.str() : "") - << (is_col ? " with CollectionEvaluation" + tmps.str() : "") + << (is_cir ? "\n" + indent(nesting_level + 1) + "with CircularEvaluation" + tmps.str() : "") + << (is_col ? "\n" + indent(nesting_level + 1) + "with CollectionEvaluation" + tmps.str() : "") + << (activate_static_circular && is_cir ? "\n" + indent(nesting_level + 1) + "with StaticCircularEvaluation" + tmps.str() : "") << " {\n"; ++nesting_level; @@ -716,8 +731,8 @@ void dump_some_attribute(Declaration d, string i, } if (is_cir) { - oss << indent() << "def lattice() : C_LATTICE[" << vt << "] = " - << as_val(vt) << ";\n" << endl; + oss << indent() << "def lattice : C_LATTICE[" << vt << "] = " + << as_val(vt) << ";\n" << std::endl; } if (Declaration_KEY(d) == KEYvalue_decl) { @@ -739,6 +754,7 @@ void dump_some_attribute(Declaration d, string i, oss << indent() << "private object a" << i << "_" << name << " extends Attribute" << tmps.str() << "(" << as_val(nt) << "," << as_val(vt) << ",\"" << name << "\")" + << (activate_static_circular && is_cir ? "\n" + indent(nesting_level + 1) + "with ChangeTrackingAttribute" + tmps.str() : "") << " {\n"; ++nesting_level; @@ -760,11 +776,13 @@ void dump_local_attributes(Block b, Type at, Implementation::ModuleInfo* info, default: aps_error(d,"Cannot handle this kind of statement"); break; + case KEYfor_in_stmt: + break; case KEYvalue_decl: { static int unique = 0; LOCAL_UNIQUE_PREFIX(d) = ++unique; - ostringstream ns; + std::ostringstream ns; ns << unique; dump_some_attribute(d,ns.str(),at,value_decl_type(d), value_decl_direction(d), @@ -811,7 +829,7 @@ void dump_TypeFormals(TypeFormals tfs, ostream& os) } void dump_TypeFormal_value(Declaration tf, ostream& os) { - ostringstream ss; + std::ostringstream ss; ss << "T_" << decl_name(tf); string tname = ss.str(); @@ -826,6 +844,151 @@ void dump_TypeFormal_value(Declaration tf, ostream& os) { void dump_Type_Signature(Type,string,ostream&); +void dump_CanonicalType(CanonicalType* ctype, ostream& oss) +{ + switch (ctype->key) + { + case KEY_CANONICAL_USE: + { + struct Canonical_use_type *canonical_use_type = (struct Canonical_use_type *)ctype; + oss << decl_name(canonical_use_type->decl); + break; + } + case KEY_CANONICAL_QUAL: + { + struct Canonical_qual_type *canonical_qual_type = (struct Canonical_qual_type *)ctype; + std::ostringstream buffer; + dump_CanonicalType(canonical_qual_type->source, oss); + oss << buffer.str() << "." << decl_name(canonical_qual_type->decl); + break; + } + case KEY_CANONICAL_FUNC: + aps_error(ctype, "Not sure how to convert canonical type function type to string"); + break; + } +} + +void dump_TypeDecl_Traits(Declaration tdecl, Type ti, string n, ostream &oss) +{ + Declaration mdecl = USE_DECL(module_use_use(type_inst_module(ti))); + if (module_decl_generating(mdecl)) + { + return; + } + + int nesting_level = 2; + oss << indent(nesting_level) << "/* dumping traits */" << "\n"; + + vector sv; + ServiceRecord sr; + + Declarations body; + Declaration service_decl; + + string actual_str = "null"; + string prefix = "override"; + + CanonicalType* actual_canonical_type = canonical_type_base_type(new_canonical_type_use(tdecl)); + std::ostringstream buffer; + dump_CanonicalType(actual_canonical_type, buffer); + actual_str = buffer.str(); + string actual_type = "T_" + actual_str; + string actual_value = "t_" + actual_str; + + // Add all module services + body = block_body(some_class_decl_contents(mdecl)); + service_decl = first_Declaration(body); + while (service_decl != NULL) + { + sr.add(service_decl); + service_decl = DECL_NEXT(service_decl); + } + + CanonicalSignatureSet csig_set = infer_canonical_signatures(canonical_type_base_type(new_canonical_type_use(tdecl))); + + int i, j; + for (i = 0; i < csig_set->num_elements; i++) + { + CanonicalSignature *csig = (CanonicalSignature *)csig_set->elements[i]; + if (!def_is_public(some_class_decl_def(csig->source_class))) + { + continue; + } + + // If module is missing these inherited services then add them + body = block_body(some_class_decl_contents(csig->source_class)); + service_decl = first_Declaration(body); + while (service_decl != NULL) + { + if (sr.missing(service_decl)) + { + sv.push_back(service_decl); + sr.add(service_decl); + } + service_decl = DECL_NEXT(service_decl); + } + + oss << (i > 0 ? "\n" : "") << indent(nesting_level) << "with C_" << decl_name(csig->source_class) << "["; + + // dump result type + oss << actual_type; + + for (j = 0; j < csig->num_actuals; j++) + { + oss << ", "; + + CanonicalType *cactual = csig->actuals[j]; + buffer.clear(); + buffer.str(""); + dump_CanonicalType(cactual, buffer); + oss << "T_" << buffer.str(); + } + + oss << "]"; + } + + oss << " {\n"; + nesting_level++; + + vector::iterator it; + for (it = sv.begin(); it != sv.end(); it++) + { + Declaration d = *it; + + string n = decl_code_name(d); + switch (Declaration_KEY(d)) + { + default: + break; + case KEYpattern_decl: + case KEYconstructor_decl: + oss << indent(nesting_level) << prefix << " " << "val p_" << n + << " = " << actual_value << ".p_" << n << ";\n"; + if (Declaration_KEY(d) == KEYpattern_decl) + break; + /* fall through */ + case KEYvalue_decl: + case KEYvalue_renaming: + case KEYfunction_decl: + case KEYattribute_decl: + oss << indent(nesting_level) << prefix << " " << "val v_" << n + << " = " << actual_value << ".v_" << n << ";\n"; + break; + case KEYtype_decl: + case KEYphylum_decl: + case KEYtype_renaming: + oss << indent(nesting_level) << "type T_" << n + << " = " << actual_value << ".T_" << n << ";\n"; + oss << indent(nesting_level) << "val t_" << n + << " = " << actual_value << ".t_" << n << ";\n"; + break; + } + } + + nesting_level--; + oss << indent(nesting_level) << "}\n"; +} + void dump_some_class_decl(Declaration decl, ostream& oss) { // cout << "dump_some_class_decl(" << decl_name(decl) << ")" << endl; @@ -852,7 +1015,7 @@ void dump_some_class_decl(Declaration decl, ostream& oss) break; } dump_Signature(some_class_decl_parent(decl),"T_Result",oss); - oss << " {" << endl; + oss << " {" << std::endl; ++nesting_level; Declarations body = block_body(some_class_decl_contents(decl)); for (Declaration d=first_Declaration(body); d; d=DECL_NEXT(d)) { @@ -903,7 +1066,7 @@ void dump_some_class_decl(Declaration decl, ostream& oss) break; case KEYpattern_decl: dump_pattern_prototype(n,pattern_decl_type(d),oss); - oss << ";" << endl; + oss << ";" << std::endl; break; case KEYpragma_call: case KEYtop_level_match: @@ -911,13 +1074,13 @@ void dump_some_class_decl(Declaration decl, ostream& oss) } } --nesting_level; - oss << indent() << "}\n" << endl; + oss << indent() << "}\n" << std::endl; } // THIS CODE IS COMPLETELY BROKEN: DO NOTUSE!!! static Type hack_type_subst(Use u, Type t) { - cout << "hack_type_subst(" << Type_KEY(t) << ")" << endl; + std::cout << "hack_type_subst(" << Type_KEY(t) << ")" << std::endl; switch (Type_KEY(t)) { case KEYtype_use: case KEYfunction_type: @@ -998,7 +1161,7 @@ static string type_inst_as_scala_type(Type ty) case KEYno_type: { #endif - ostringstream oss; + std::ostringstream oss; oss << "/*TI*/"; oss << "T_" << decl_name(m); bool started = false; @@ -1041,7 +1204,7 @@ static string type_inst_as_scala_type(Type ty) #endif } -static void dump_type_inst(string n, string nameArg, Type ti, ostream& oss) +static void dump_type_inst(string n, string nameArg, Declaration decl, Type ti, ostream& oss) { Module m = type_inst_module(ti); TypeActuals tas = type_inst_type_actuals(ti); @@ -1053,9 +1216,9 @@ static void dump_type_inst(string n, string nameArg, Type ti, ostream& oss) default: break; case KEYtype_inst: { - ostringstream ss; + std::ostringstream ss; ss << n << ++u; - dump_type_inst(ss.str(),nameArg+"+\"$\"+"+ss.str(),ta,oss); + dump_type_inst(ss.str(),nameArg+"+\"$\"+"+ss.str(),decl, ta,oss); break; } } @@ -1091,9 +1254,9 @@ static void dump_type_inst(string n, string nameArg, Type ti, ostream& oss) } if (started) oss << "]"; - oss << "(" << nameArg; + oss << "(\n" << indent(nesting_level + 1) << nameArg; for (Type ta = first_TypeActual(tas); ta ; ta = TYPE_NEXT(ta)) { - oss << ","; + oss << ",\n" << indent(nesting_level + 1); switch (Type_KEY(ta)) { default: oss << as_val(ta); @@ -1106,9 +1269,12 @@ static void dump_type_inst(string n, string nameArg, Type ti, ostream& oss) } } for (Expression a = first_Actual(as); a; a = EXPR_NEXT(a)) { - oss << "," << a; + oss << ",\n" << indent(nesting_level + 1) << a; } - oss << ");\n"; + oss << "\n" << indent() << ")\n"; + indent(); + dump_TypeDecl_Traits(decl,ti, n, oss); + oss << "\n"; oss << indent() << "type T_" << n << " = " << type_inst_as_scala_type(ti) << ";\n"; } @@ -1118,7 +1284,7 @@ static void dump_new_type(string n, string nameArg, ostream& oss) oss << indent() << "class T_" << n << "(t : I_TYPE[T_" << n << "]) extends Value(t) { }\n"; oss << indent() << "val t_" << n << " = new I_TYPE[T_" << n << "](" - << nameArg << ");\n" << endl; + << nameArg << ");\n" << std::endl; } static void dump_new_phylum(string n, string nameArg, bool isStart, ostream& oss) @@ -1131,7 +1297,7 @@ static void dump_new_phylum(string n, string nameArg, bool isStart, ostream& oss } oss << "}\n"; oss << indent() << "val t_" << n << " = new I_PHYLUM[T_" << n << "](" - << nameArg << ");\n" << endl; + << nameArg << ");\n" << std::endl; } static void dump_scala_pattern_function( @@ -1146,20 +1312,51 @@ static void dump_scala_pattern_function( Declarations rdecls = function_type_return_values(ft); Type rt = value_decl_type(first_Declaration(rdecls)); + bool dump_anchor_actual = false; + if (should_include_ast_for_objects()) { + switch (Declaration_KEY(decl)) + { + case KEYconstructor_decl: { + Declaration cdecl = constructor_decl_base_type_decl(decl); + switch (Declaration_KEY(cdecl)) { + case KEYphylum_decl: + dump_anchor_actual = true; + break; + default: + break; + } + break; + } + default: + break; + } + } + // helper: "(v_a1,v_a2)" and "(x,v_a1,v_a2)" - ostringstream argss; + std::ostringstream argss; bool started = false; argss << "("; for (Declaration f = first_Declaration(formals); f; f = DECL_NEXT(f)) { if (started) argss << ","; else started = true; argss << "v_" << decl_name(f); } + + std::ostringstream cloned_oss; + cloned_oss << argss.str(); + if (dump_anchor_actual) { + if (started) { + argss << ", "; + } + argss << "anchor"; + } argss << ")"; + cloned_oss << ")"; string args = argss.str(); - string uargs = started ? ("(x," + args.substr(1)) : "x"; + string args_without_anchor = cloned_oss.str(); + string uargs = started ? ("(x," + cloned_oss.str().substr(1)) : "x"; // helper: "(T_Result,T_A1,T_A2)" - ostringstream typess; + std::ostringstream typess; typess << "(" << rt; for (Declaration f = first_Declaration(formals); f; f = DECL_NEXT(f)) { typess << ","; @@ -1197,7 +1394,7 @@ static void dump_scala_pattern_function( if (!body) { // the constructor function: - dump_function_prototype(name,ft,oss); + dump_function_prototype(name,ft,dump_anchor_actual,oss); oss << " = c_" << name << args; if (is_syntax) oss << ".register"; oss << ";\n"; @@ -1206,7 +1403,7 @@ static void dump_scala_pattern_function( // the pattern function oss << indent() << "val p_" << name << " = new PatternFunction[" << utypes << "]" - << "(u_" << name << ");\n" << endl; + << "(u_" << name << ");\n" << std::endl; } void dump_scala_Declaration_header(Declaration decl, ostream& oss) @@ -1237,7 +1434,7 @@ void dump_scala_Declaration_header(Declaration decl, ostream& oss) switch (Type_KEY(rut)) { case KEYno_type: break; default: - oss << "type T_" << name; + oss << indent() << "type T_" << name; bool started = false; for (Declaration tf = first_Declaration(tfs); tf; tf=DECL_NEXT(tf)) { if (started) oss << ","; @@ -1264,7 +1461,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) name = (const char*)get_code_name(def_name(declaration_def(decl))); if (!name) name = decl_name(decl); if (verbose) { - cout << "dump_scala_Declaration(" << name << ")" << endl; + std::cout << "dump_scala_Declaration(" << name << ")" << std::endl; } break; default: @@ -1312,10 +1509,10 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) default: break; case KEYtype_use: { - ostringstream results; + std::ostringstream results; results << rut; result_type = results.str(); - ostringstream resultvals; + std::ostringstream resultvals; resultvals << as_val(rut); result_typeval = resultvals.str(); @@ -1324,7 +1521,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) break; case KEYno_type: { - ostringstream results; + std::ostringstream results; results << "T_" << name; bool started = false; for (Declaration tf = first_Declaration(tfs); tf; tf=DECL_NEXT(tf)) { @@ -1342,7 +1539,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) oss << ",T_" << decl_name(tf); } oss << "]) extends " << (rdecl_is_phylum ? "Node" : "Value") - << "(t) { }\n" << endl; + << "(t) { }\n" << std::endl; } } @@ -1366,9 +1563,9 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) // define extends if (result_typeval != "") { - oss << indent() << " extends Module(name)\n"; + oss << indent(nesting_level + 1) << "extends Module(name)\n"; } else if (result_type != "") { - oss << indent() << " extends I_" + oss << indent(nesting_level + 1) << "extends I_" << (rdecl_is_phylum ? "PHYLUM" : "TYPE") << "[" << result_type << "](name)\n"; } else { @@ -1378,7 +1575,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) break; case KEYtype_inst: { - oss << indent() << " "; + oss << indent(nesting_level + 1); dump_Use(module_use_use(type_inst_module(rut)),"M_",oss); bool started = false; @@ -1405,7 +1602,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) } // define with - oss << indent() << " with C_" << name << "[" << result_type; + oss << indent(nesting_level + 1) << "with C_" << name << "[" << result_type; for (Declaration tf=first_Declaration(tfs); tf ; tf = DECL_NEXT(tf)) { oss << ",T_" << decl_name(tf); } @@ -1413,6 +1610,12 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) oss << indent() << "{\n"; ++nesting_level; + STATE *s = (STATE*)Declaration_info(decl)->analysis_state; + if ((static_scc_schedule || anc_analysis) && s != NULL) + { + activate_static_circular = s->loop_required; + } + if (result_typeval != "") { oss << indent() << "type T_" << rname << " = " << result_type << ";\n"; @@ -1427,7 +1630,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) if (rdecl_is_phylum) { oss << indent() << "val nodes = " << result_typeval << ".nodes;\n"; } - oss << endl; + oss << std::endl; } oss << indent() << "val t_" << rname << " : this.type = this;\n"; @@ -1478,7 +1681,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) << ") = " << "a_" << n << ".set(value);\n"; } - oss << endl; + oss << std::endl; } break; @@ -1497,7 +1700,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) oss << indent() << "val v_" << n << " : " << infer_formal_type(f) << " => " << value_decl_type(rdecl) << " = a_" << n << ".get _;\n"; - + if (direction_is_input(attribute_decl_direction(d))) { oss << indent() << "def s_" << n << "(node:" << infer_formal_type(f) @@ -1505,7 +1708,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) << ") = " << "a_" << n << ".assign(node,value);\n"; } - oss << endl; + oss << std::endl; } break; @@ -1518,7 +1721,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) info->implement(oss); --nesting_level; - oss << indent() << "}\n" << endl; + oss << indent() << "}\n" << std::endl; } break; @@ -1537,7 +1740,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) } break; case KEYtype_inst: - dump_type_inst(name,qname,type,oss); + dump_type_inst(name,qname,decl,type,oss); break; default: oss << indent() << "type T_" << name << " = " << type << ";\n"; @@ -1575,6 +1778,15 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) if (started) oss << ","; else started = true; dump_formal(f,oss); } + + if (should_include_ast_for_objects()) { + if (is_syntax) { + if (started) { + oss << ", "; + } + oss << "ast: Any"; + } + } oss << ") extends " << rt << "(" << as_val(rt) << ") {\n"; ++nesting_level; if (is_syntax) { @@ -1712,7 +1924,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) Type fty = function_decl_type(decl); Declaration rdecl = first_Declaration(function_type_return_values(fty)); Block b = function_decl_body(decl); - dump_function_prototype(name,fty,oss); + dump_function_prototype(name,fty,false /* dump_anchor_actual */,oss); // three kinds of definitions: // 1. the whole thing: a non-empty body: @@ -1725,7 +1937,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) if (debug) dump_debug_end(oss); --nesting_level; - oss << indent() << "}\n" << endl; + oss << indent() << "}\n" << std::endl; return; } else if (rdecl) { // 2. simple default @@ -1791,7 +2003,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) Type ty = infer_expr_type(old); oss << indent() << "val v_" << name << " : " << ty << " = "; dump_Expression(old,oss); - oss << ";" << endl; + oss << ";" << std::endl; } break; case KEYtype_renaming: @@ -1812,7 +2024,7 @@ void dump_scala_Declaration(Declaration decl,ostream& oss) } break; default: - cout << "Not handling declaration " << decl_name(decl) << endl; + std::cout << "Not handling declaration " << decl_name(decl) << std::endl; } } @@ -1852,7 +2064,7 @@ void dump_Signature(Signature s, string n, ostream& o) case KEYsig_inst: { Class c = sig_inst_class(s); - o << " with C_" << decl_name(USE_DECL(class_use_use(c))) << "[" << n; + o << "\n" << indent(nesting_level + 1) << "with C_" << decl_name(USE_DECL(class_use_use(c))) << "[" << n; TypeActuals tas = sig_inst_actuals(s); for (Type ta = first_TypeActual(tas); ta; ta=TYPE_NEXT(ta)) { o << "," << ta; @@ -1900,7 +2112,7 @@ void dump_Type_Signature(Type t, string name, ostream& o) Module m = type_inst_module(t); TypeActuals tas = type_inst_type_actuals(t); Declaration mdecl = USE_DECL(module_use_use(m)); - o << "with C_" << decl_name(mdecl) << "[" << name; + o << "\n" << indent(nesting_level + 1) << "with C_" << decl_name(mdecl) << "[" << name; for (Type ta = first_TypeActual(tas); ta; ta = TYPE_NEXT(ta)) { o << "," << ta; } @@ -1947,6 +2159,29 @@ void dump_Type(Type t, ostream& o) break; case KEYfunction_type: { + bool dump_anchor = false; + + if (should_include_ast_for_objects()) { + Type rtype = function_type_return_type(t); + switch (Type_KEY(rtype)) { + case KEYtype_use: { + Declaration udecl = USE_DECL(type_use_use(rtype)); + + switch (Declaration_KEY(udecl)) { + case KEYphylum_decl: { + dump_anchor = true; + break; + } + default: + break; + } + break; + } + default: + break; + } + } + o << "("; bool started = false; for (Declaration f=first_Declaration(function_type_formals(t)); @@ -1956,6 +2191,12 @@ void dump_Type(Type t, ostream& o) if (Declaration_KEY(f) == KEYseq_formal) o << "*"; } + if (dump_anchor) { + if (started) { + o << ", "; + } + o << "Any"; + } o << ") => "; Declaration rdecl = first_Declaration(function_type_return_values(t)); if (rdecl) { @@ -2088,6 +2329,24 @@ void dump_collect_Actuals(Type ctype, Actuals as, ostream& o) } } +STATE* current_state = NULL; +AUG_GRAPH* current_aug_graph = NULL; +std::vector synth_functions_states; +SYNTH_FUNCTION_STATE* current_synth_functions_state = NULL; + +static bool find_instance(AUG_GRAPH* aug_graph, Declaration node, Declaration attr, INSTANCE** instance_out) { + int i; + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* instance = &aug_graph->instances.array[i]; + if (instance->node == node && instance->fibered_attr.attr == attr) { + *instance_out = instance; + return true; + } + } + + return false; +} + void dump_Expression(Expression e, ostream& o) { switch (Expression_KEY(e)) { @@ -2101,18 +2360,67 @@ void dump_Expression(Expression e, ostream& o) o << string_const_token(e); break; case KEYfuncall: + { if (funcall_is_collection_construction(e)) { // inline code to call append, single and null constructors dump_collect_Actuals(infer_expr_type(e),funcall_actuals(e),o); return; } + + Declaration attr; + if (impl == synth_impl && (attr = attr_ref_p(e)) != nullptr) { + vector visited; + struct Expression_info * index = Expression_info(e); + Declaration node = USE_DECL(value_use_use(first_Actual(funcall_actuals(e)))); + INSTANCE* instance; + if (find_instance(current_aug_graph, node, attr, &instance)) { + impl->dump_synth_instance(instance, o); + return; + } else { + fatal_error("failed to find instance"); + return; + } + } + + bool dump_anchor_actual = false; + if (should_include_ast_for_objects()) { + Expression fexpr = funcall_f(e); + switch (Expression_KEY(fexpr)) { + case KEYvalue_use: { + Declaration udecl = USE_DECL(value_use_use(fexpr)); + switch (Declaration_KEY(udecl)) { + case KEYconstructor_decl: { + Declaration tdecl = constructor_decl_base_type_decl(udecl); + switch (Declaration_KEY(tdecl)) { + case KEYphylum_decl: + dump_anchor_actual = true; + break; + default: + break; + } + break; + } + default: + break; + } + break; + } + default: + break; + } + } + + Declaration tplm; + bool inside_top_level_match = check_surrounding_decl(e, KEYtop_level_match, &tplm); + dump_Expression(funcall_f(e),o); o << "("; { bool start = true; + nesting_level++; FOR_SEQUENCE(Expression,arg,Actuals,funcall_actuals(e), if (start) start = false; - else o << ","; + else { o << ",\n"; o << indent(); } dump_Expression(arg,o)); Declarations fs = function_type_formals(infer_expr_type(funcall_f(e))); for (Declaration f=first_Declaration(fs); f; f=DECL_NEXT(f)) @@ -2120,9 +2428,24 @@ void dump_Expression(Expression e, ostream& o) if (start) start = false; else o << ","; o << "0"; } + + if (dump_anchor_actual) { + if (first_Actual(funcall_actuals(e)) != NULL) { + o << ", "; + } + if (inside_top_level_match) { + o << "anchor"; + } else { + o << "null"; + } } + + nesting_level--; o << ")"; + + } break; + } case KEYvalue_use: { Use u = value_use_use(e); @@ -2287,10 +2610,38 @@ void debug_Instance(INSTANCE *i, ostream& os) { string operator+(string s, int i) { - ostringstream os; + std::ostringstream os; os << s << i; return os.str(); } string indent(int nl) { return string(indent_multiple*nl,' '); } +bool check_surrounding_decl(void* node, KEYTYPE_Declaration decl_key, Declaration* result_decl) { + while (node != NULL && ABSTRACT_APS_tnode_phylum(node) != KEY_ABSTRACT_APS_None) { + if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { + Declaration decl = (Declaration)node; + if (Declaration_KEY(decl) == decl_key) { + *result_decl = decl; + return true; + } + } + node = tnode_parent(node); + } + + *result_decl = NULL; + return false; +} + +bool check_surrounding_node(void* node, KEYTYPE_ABSTRACT_APS_Phylum ast_key, void** result_node) { + while (node != NULL && ABSTRACT_APS_tnode_phylum(node) != KEY_ABSTRACT_APS_None) { + if (ABSTRACT_APS_tnode_phylum(node) == ast_key) { + *result_node = node; + return true; + } + node = tnode_parent(node); + } + + *result_node = NULL; + return false; +} diff --git a/aps2scala/dump-scala.h b/aps2scala/dump-scala.h index d1880188..7fdc8fd7 100644 --- a/aps2scala/dump-scala.h +++ b/aps2scala/dump-scala.h @@ -15,6 +15,9 @@ extern bool incremental; extern bool static_schedule; extern int verbose; extern int debug; +extern bool include_comments; +extern bool static_scc_schedule; +extern bool anc_analysis; class Implementation; @@ -45,7 +48,7 @@ void dump_Expression(Expression,ostream&); void dump_Use(Use,const char *prefix,ostream&); void dump_vd_Default(Declaration,ostream&); -void dump_function_prototype(string,Type ft, ostream& oss); +void dump_function_prototype(string name, Type ft, bool dump_anchor_actual, ostream& oss); void dump_debug_end(ostream& os); // these two must always be called in pairs: the first @@ -95,4 +98,8 @@ inline ostream& operator<<(ostream&os, INSTANCE*i) { extern string operator+(string, int); +extern bool check_surrounding_decl(void* node, KEYTYPE_Declaration decl_key, Declaration* result_decl); + +extern bool should_include_ast_for_objects(); + #endif diff --git a/aps2scala/dyn-impl.cc b/aps2scala/dyn-impl.cc index fa8074ec..22770c5e 100644 --- a/aps2scala/dyn-impl.cc +++ b/aps2scala/dyn-impl.cc @@ -23,8 +23,6 @@ extern "C" { // end; // It will say z is 4, when it should be zero. -using namespace std; - struct ContextRecordNode { Declaration context; void *extra; /* branch in context (if any) */ @@ -84,19 +82,10 @@ static void dump_context_open(void *c, ostream& os) { return; case KEYfor_in_stmt: { - aps_error(decl,"Still generating C++ here..."); - Declaration f = for_in_stmt_formal(decl); - Type ty = infer_formal_type(f); - os << indent() << "for (CollectionIterator<"; - dump_Type(ty,os); - os << "> ci = CollectionIterator<"; - dump_Type(ty,os); - os << ">("; - dump_Expression(for_in_stmt_seq(decl),os); - os << "); ci.has_item(); ci.advance()) {"; + os << indent() << "for (" << "v_" << decl_name(for_in_stmt_formal(decl)) << " <- "; + dump_Expression(for_in_stmt_seq(decl), os); + os << ") {\n"; ++nesting_level; - os << indent() << ty - << " v_" << decl_name(f) << " = ci.item();\n"; } return; case KEYtop_level_match: @@ -178,7 +167,11 @@ static void activate_attr_context(ostream& os) static void dump_context_close(void *c, ostream& os) { switch (ABSTRACT_APS_tnode_phylum(c)) { case KEYDeclaration: - switch (Declaration_KEY((Declaration)c)) { + switch (Declaration_KEY((Declaration)c)) { + case KEYfor_in_stmt: + --nesting_level; + os << indent() << "}\n"; + break; case KEYtop_level_match: os << indent() << "case _ => {}\n"; /*FALLTHROUGH*/ @@ -350,6 +343,11 @@ void dump_Block(Block b,ASSIGNFUNC f,void*arg,ostream&os) dump_local_decl(arg,d,os); } break; + case KEYfor_in_stmt: + push_attr_context(d); + dump_Block(for_in_stmt_body(d),f,arg,os); + pop_attr_context(os); + break; default: aps_error(d,"cannot handle this kind of statement"); os << "0"; @@ -464,6 +462,7 @@ void implement_local_attributes(vector& local_attributes, void implement_attributes(const vector& attrs, const vector& tlms, + const vector& constructors, ostream& oss) { int n = attrs.size(); @@ -480,6 +479,16 @@ void implement_attributes(const vector& attrs, bool inh = (ATTR_DECL_IS_INH(ad) != 0); bool is_col = direction_is_collection(attribute_decl_direction(ad)); + Declaration attr_decl_phylum = attribute_decl_phylum(ad); + + Declaration mdecl = NULL; + if (!check_surrounding_decl(attr_decl_phylum, KEYmodule_decl, &mdecl)) { + fatal_error("Cannot find surrounding module for phylum of attribute %s", decl_name(ad)); + } + + // If phylum is defined in extended class/module then its a object + bool is_syntax = first_Declaration(some_class_decl_type_formals(mdecl)) != NULL; + oss << indent() << "def c_" << name << "(anode : " << at << ") : " << rt << " = {\n"; string fn = decl_name(af); @@ -496,7 +505,39 @@ void implement_attributes(const vector& attrs, ++nesting_level; oss << indent() << "val anchorNodes = anchor.myType.nodes;\n"; } else { - oss << indent() << "val anchor = anode;\n"; + auto pgraph = Declaration_info(attr_decl_phylum)->decl_flags; + if (!is_syntax || !should_include_ast_for_objects()) { + oss << indent() << "val anchor = anode;\n"; + } else { + oss << indent() << "val anchor = anode match {\n"; + nesting_level++; + for (vector::const_iterator it = constructors.begin(); it != constructors.end(); ++it) { + Declaration decl = *it; + if (constructor_decl_phylum(decl) == attr_decl_phylum) { + oss << indent() << "case c_" << decl_name(decl) << "("; + + bool started = false; + Type ft = constructor_decl_type(decl); + Declarations formals = function_type_formals(ft); + for (Declaration formal = first_Declaration(formals); formal != NULL; formal = DECL_NEXT(formal)) { + if (started) { + oss << ", "; + } else { + started = true; + } + oss << "_"; + } + + if (started) { + oss << ", "; + } + oss << "ast) if ast != null => ast\n"; + } + } + oss << indent() << "case _ => anode\n"; + nesting_level--; + oss << indent() << "}\n"; + } } } // The scala compiler generates exponential code for @@ -620,7 +661,19 @@ class Dynamic : public Implementation void implement(ostream& oss) { implement_local_attributes(local_attributes,oss); - implement_attributes(attribute_decls,top_level_matches,oss); + + vector constructors; + for (Declaration d = first_Declaration(block_body(module_decl_contents(module_decl))); d != NULL; d = DECL_NEXT(d)) { + switch(Declaration_KEY(d)) { + default: + break; + case KEYconstructor_decl: + constructors.push_back(d); + break; + } + } + + implement_attributes(attribute_decls,top_level_matches,constructors,oss); implement_var_value_decls(var_value_decls,top_level_matches,oss); // const char *name = decl_name(module_decl); diff --git a/aps2scala/implement.h b/aps2scala/implement.h deleted file mode 100644 index a4dfc428..00000000 --- a/aps2scala/implement.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef IMPLEMENT_H -#define IMPLEMENT_H -// Implementing APS in C++ -// Different scheduling algorithms - -// This file contains an abstract class that -// is used to schedule attribute grammars expressed -// in APS using C++ templates. - -// The main code is in dump-cpp.cc, -// but some parts are different depending on whether -// we have static or dynamic scheduling. - -#include -#include - -using std::ostream; -using std::vector; - -#define LOCAL_UNIQUE_PREFIX(ld) Def_info(value_decl_def(ld))->unique_prefix - -// Abstract class: -class Implementation { - public: - virtual ~Implementation() {} - - class ModuleInfo { - protected: - Declaration module_decl; - vector top_level_matches; - vector attribute_decls; - vector var_value_decls; - vector local_attributes; - - public: - ModuleInfo(Declaration module); - virtual ~ModuleInfo() {}; - - virtual void note_top_level_match(Declaration tlm, - ostream& oss); - - virtual void note_local_attribute(Declaration la, - ostream& oss); - - virtual void note_attribute_decl(Declaration ad, - ostream& oss); - - // Declaration will be declared by caller, but not initialized - // unless this function desires to: - virtual void note_var_value_decl(Declaration vd, - ostream& oss); - - // implement tlm's and var value decls, and generate finish() routine. - virtual void implement(ostream& oss) = 0; - }; - - virtual ModuleInfo* get_module_info(Declaration module) = 0; - - // header is done, and indentation is set. - virtual void implement_function_body(Declaration f, ostream&) = 0; - - // not sure what to do here - // virtual void implement_procedure(Declaration p, ostream&) = 0; - - // if a Declaration has an implementation mark on it, - // this function is called to implement its use: - virtual void implement_value_use(Declaration vd, ostream&) = 0; -}; - -extern Implementation *dynamic_impl; -extern Implementation *static_impl; - -#define IMPLEMENTATION_MARKS (127<<24) - -// an implementation may wish to use these flags: -#define LOCAL_ATTRIBUTE_FLAG (1<<24) -#define VAR_VALUE_DECL_FLAG (1<<25) -#define ATTRIBUTE_DECL_FLAG (1<<26) - -void clear_implementation_marks(Declaration d); - -#endif diff --git a/apscpp/Makefile b/apscpp/Makefile index 0f5067ac..06f68562 100644 --- a/apscpp/Makefile +++ b/apscpp/Makefile @@ -1,12 +1,21 @@ CPP=g++ -CPPFLAGS=-Wall -g -DUSING_CXX -I../parse -I../analyze +CPPFLAGS=-Wall -g -Wno-write-strings -Wno-unused-variable -DUSING_CXX -I../parse -I../analyze -I../codegen -I../utilities -APSCPPOBJS = apscpp.o dump-cpp.o implement.o dyn-impl.o static-impl.o -APSCPPLIBS = ../lib/aps-lib.o ../lib/aps-ag.a +APSCPPOBJS = apscpp.o dump-cpp.o implement.o dyn-impl.o static-impl.o static-scc-impl.o +APSCPPLIBS = ../lib/aps-lib.o ../lib/aps-ag.a ../utilities/utilities.o apscpp : ${APSCPPOBJS} ${APSCPPLIBS} ${CPP} ${CPPFLAGS} ${APSCPPOBJS} ${APSCPPLIBS} -o apscpp -${APSCPPOBJS} : implement.h dump-cpp.h +${APSCPPOBJS} : dump-cpp.h + +static-impl.o : ../codegen/static-impl.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ + +static-scc-impl.o : ../codegen/static-scc-impl.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ + +implement.o : ../codegen/implement.cc + ${CPP} -c ${CPPFLAGS} $< -o $@ install: apscpp mv apscpp ../bin/. diff --git a/apscpp/apscpp.cc b/apscpp/apscpp.cc index 478f3c2c..0bfb1c45 100644 --- a/apscpp/apscpp.cc +++ b/apscpp/apscpp.cc @@ -2,7 +2,7 @@ extern "C" { #include #include #include -#include "/usr/include/string.h" +#include #include #include "aps-ag.h" } @@ -17,7 +17,7 @@ extern "C" { int callset_AI(Declaration module, STATE *s) { return 0; } -static char* argv0 = "apscpp"; +static const char* argv0 = "apscpp"; void usage() { fprintf(stderr,"%s: usage: %s [-SVG] [-D...] \n",argv0,argv0); fprintf(stderr," compile APS to C++\n"); @@ -27,6 +27,7 @@ void usage() { fprintf(stderr," -DH list debugging flags\n"); fprintf(stderr," -V increase verbosity of generation code\n"); fprintf(stderr," -G add Debug calls for every function\n"); + fprintf(stderr," -C SCC chunk static scheduling\n"); fprintf(stderr," -p path set the APSPATH (overriding env. variable)\n"); exit(1); } @@ -55,6 +56,10 @@ int main(int argc,char **argv) { } else if (streq(argv[i],"-S") || streq(argv[i],"--static")) { static_schedule = true; continue; + } else if (streq(argv[i],"-C") || streq(argv[i],"--static-scc")) { + static_schedule = true; + static_scc_schedule = true; + continue; } else if (streq(argv[i],"-V") || streq(argv[i],"--verbose")) { ++verbose; continue; @@ -82,7 +87,7 @@ int main(int argc,char **argv) { type_Program(p); aps_check_error("type"); if (static_schedule) { - impl = static_impl; + impl = static_scc_schedule ? static_scc_impl : static_impl; analyze_Program(p); aps_check_error("analysis"); if (!impl) { diff --git a/apscpp/dump-cpp.cc b/apscpp/dump-cpp.cc index c3deb4ad..65d7dcc5 100644 --- a/apscpp/dump-cpp.cc +++ b/apscpp/dump-cpp.cc @@ -16,8 +16,6 @@ String get_code_name(Symbol); #include "dump-cpp.h" #include "implement.h" -using namespace std; - using std::string; // extra decl_flags flags: @@ -78,8 +76,10 @@ void impl_module(char *mname, char*type) } bool incremental = false; //! unused +bool in_constructor = false; // kludge int verbose = 0; int debug = 0; +bool include_comments = false; int inline_definitions = 0; @@ -88,14 +88,14 @@ void dump_cpp_Program(Program p,std::ostream&hs,std::ostream&cpps) String name=program_name(p); inline_definitions = 0; aps_yyfilename = (char *)program_name(p); - hs << "#ifndef "; print_uppercase(name,hs); hs << "_H " << endl; - hs << "#define "; print_uppercase(name,hs); hs << "_H " << endl; - cpps << "#include \"aps-impl.h\"" << endl; + hs << "#ifndef "; print_uppercase(name,hs); hs << "_H " << std::endl; + hs << "#define "; print_uppercase(name,hs); hs << "_H " << std::endl; + cpps << "#include \"aps-impl.h\"" << std::endl; if (!streq(aps_yyfilename,"basic")) - cpps << "#include \"basic.h\"" << endl; + cpps << "#include \"basic.h\"" << std::endl; cpps << "#include \"" << name << ".h\"\n\n"; dump_cpp_Units(program_units(p),hs,cpps); - hs << "#endif" << endl; + hs << "#endif" << std::endl; } void dump_cpp_Units(Units us,std::ostream&hs,std::ostream&cpps) @@ -113,7 +113,7 @@ void dump_cpp_Units(Units us,std::ostream&hs,std::ostream&cpps) void dump_cpp_Unit(Unit u,std::ostream&hs,std::ostream&cpps) { - ostringstream is; + std::ostringstream is; switch(Unit_KEY(u)) { case KEYno_unit: break; case KEYwith_unit: @@ -123,7 +123,7 @@ void dump_cpp_Unit(Unit u,std::ostream&hs,std::ostream&cpps) char buf[n+1]; realize_string(buf,name); buf[n-1] = '\0'; // clear final quote - hs << "#include \"" << buf+1 << ".h\"" << endl; + hs << "#include \"" << buf+1 << ".h\"" << std::endl; } break; case KEYdecl_unit: @@ -142,7 +142,7 @@ Declaration constructor_decl_base_type_decl(Declaration decl) return tdecl; } -void dump_formal(Declaration formal,char *prefix,ostream&s) +void dump_formal(Declaration formal,const char *prefix,ostream&s) { dump_Typed_decl(infer_formal_type(formal),formal,prefix,s); if (KEYseq_formal == Declaration_KEY(formal)) s << ",..."; @@ -261,7 +261,7 @@ void dump_seq_Pattern_cond(Pattern pa, Type st, string node, ostream& os) if (PAT_NEXT(next_pa)) { aps_error(next_pa,"Sequence pattern too complicated for now"); } else { - ostringstream ns; + std::ostringstream ns; dump_seq_function(infer_pattern_type(pa),st,"last",ns); ns << "(" << node << ")"; dump_Pattern_cond(next_pa,ns.str(),os); @@ -276,12 +276,12 @@ void dump_seq_Pattern_cond(Pattern pa, Type st, string node, ostream& os) os << "!"; dump_seq_function(pat,st,"empty",os); os << "(" << node << ")&&"; - ostringstream ns; + std::ostringstream ns; dump_seq_function(pat,st,"first",ns); ns << "(" << node << ")"; dump_Pattern_cond(pa,ns.str(),os); os << "&&"; - ostringstream rs; + std::ostringstream rs; dump_seq_function(pat,st,"butfirst",rs); rs << "(" << node << ")"; dump_seq_Pattern_cond(next_pa,st,rs.str(),os); @@ -334,10 +334,10 @@ void dump_Pattern_cond(Pattern p, string node, ostream& os) aps_error(pfuse,"Cannot handle calls to pattern functions"); } } - os << node << "->cons=="; - dump_Use(pfuse,"this->c_",os); // added "this->" because of C++ compiler + os << node << "->cons==this->"; // added "this->" because of C++ compiler + dump_Use(pfuse,"c_",os); - ostringstream ts; + std::ostringstream ts; ts << "(("; dump_TypeEnvironment(USE_TYPE_ENV(pfuse),ts); ts << "V_" << decl_name(pfdecl) << "*)" << node << ")"; @@ -421,7 +421,7 @@ void dump_Pattern_bindings(Pattern p, ostream& os) string matcher_bindings(string node, Match m) { Pattern p = matcher_pat(m); - ostringstream os1, os2; + std::ostringstream os1, os2; dump_Pattern_cond(p,node,os1); dump_Pattern_bindings(p,os2); // ignore os1 contents @@ -676,12 +676,16 @@ void dump_Type_superinit(bool is_phylum, Type ty, ostream& os) break; case KEYtype_inst: { + bool saved_in_constructor = in_constructor; + in_constructor = true; dump_type_inst_construct(ty,os); + in_constructor = saved_in_constructor; } break; case KEYtype_use: { Declaration td = USE_DECL(type_use_use(ty)); + // 2024/07/08: JTB: I don't understand why we have "*" on the next line: os << "C_" << decl_name(td) << "(*_t_" << decl_name(td) << ")"; break; } @@ -694,7 +698,7 @@ void dump_Type_superinit(bool is_phylum, Type ty, ostream& os) // Currently inheritances does the transfer of values, // but we need this to do the transfer of types: -class ServiceRecord : public map { +class ServiceRecord : public std::map { public: void add(Declaration d) { int namespaces = decl_namespaces(d); @@ -985,7 +989,7 @@ void dump_local_attributes(Block b, Type at, Implementation::ModuleInfo* info, { static int unique = 0; LOCAL_UNIQUE_PREFIX(d) = ++unique; - ostringstream ns; + std::ostringstream ns; ns << unique; dump_some_attribute(d,ns.str(),at,value_decl_type(d), value_decl_direction(d), @@ -1204,7 +1208,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) } hs << " public:\n"; - ostringstream is; + std::ostringstream is; // The Result type as signature: hs << " typedef C_" << name << " C_Result;\n"; @@ -1415,7 +1419,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) cpps << "C_" << name << " *t_" << name << " = new C_" << name << "();\n"; } - hs << endl; + hs << std::endl; } break; default: @@ -1439,12 +1443,12 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) << " = " << as_val(type) << ";\n" << " return t_" << name << ";\n" << "}\n"; } - hs << endl; + hs << std::endl; } break; } if (!is_result && context) { - is << ",\n t_" << name << "("; + is << ",\n t_" << name << "("; // previously added "this->" for templates dump_Type_value(type,is); is << ")"; } @@ -1477,7 +1481,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) /* header file */ hs << " struct V_" << name << " : public " << (is_syntax ? "C_PHYLUM::Node" : "C_TYPE::Node") - << " {" << endl; + << " {" << std::endl; for (Declaration f = first_Declaration(formals); f; f = DECL_NEXT(f)) { hs << " "; dump_formal(f,"v_",hs); hs << ";\n"; } @@ -1591,14 +1595,14 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) else hs << "extern "; dump_Typed_decl(value_decl_type(decl),decl,"v_",hs); - hs << ";\n" << endl; + hs << ";\n" << std::endl; if (context == 0) { switch (Default_KEY(value_decl_default(decl))) { case KEYsimple: dump_Typed_decl(value_decl_type(decl),decl,"v_",cpps); cpps << " = "; dump_Expression(simple_value(value_decl_default(decl)),cpps); - cpps << ";\n" << endl; + cpps << ";\n" << std::endl; break; case KEYno_default: // native value @@ -1626,7 +1630,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) if (context) hs << " "; dump_function_prototype("",name,fty,hs); if (!inline_definitions) { - hs << ";\n" << endl; + hs << ";\n" << std::endl; } // three kinds of definitions: // 1. the whole thing: a non-empty body: @@ -1641,7 +1645,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) dump_function_debug(name,fty,cpps); impl->implement_function_body(decl,cpps); --nesting_level; - cpps << indent() << "}\n" << endl; + cpps << indent() << "}\n" << std::endl; return; } else if (rdecl) { // 2. simple default @@ -1694,7 +1698,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) hs << " C_" << decl_name(f); hs << " *t_" << decl_name(f) << ";\n"; } - ostringstream is; + std::ostringstream is; is << " "; for (Declaration d=first_Declaration(body); d; d=DECL_NEXT(d)) { @@ -1748,18 +1752,18 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) hs << " v_" << name; if (context) { // initialization done by module: - hs << ";\n" << endl; + hs << ";\n" << std::endl; is << ",\n v_" << name << "("; dump_Expression(old,is); is << ")"; } else { if (!inline_definitions) { - hs << ";\n" << endl; + hs << ";\n" << std::endl; dump_Type(ty,cpps); cpps << " v_" << name; } dump_Expression(old,cpps); - cpps << ";\n" << endl; + cpps << ";\n" << std::endl; } } } @@ -1782,7 +1786,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) dump_Type_value(old,cpps); } if (context) { - is << ",\n t_" << name << "("; + is << ",\n t_" << name << "(this->"; // added "this->" because of templates dump_Type_value(old,is); is << ")"; } @@ -1794,7 +1798,7 @@ void dump_cpp_Declaration(Declaration decl,const output_streams& oss) //! patterns not implemented break; default: - cout << "Not handling declaration " << decl_name(decl) << endl; + std::cout << "Not handling declaration " << decl_name(decl) << std::endl; } } @@ -1911,14 +1915,15 @@ void dump_Type(Type t, ostream& o) } } -void dump_Typed_decl(Type t, Declaration decl, char*prefix,ostream& o) +void dump_Typed_decl(Type t, Declaration decl, const char*prefix,ostream& o) { Symbol sym = def_name(declaration_def(decl)); switch (Type_KEY(t)) { case KEYfunction_type: { - static int n = 0; - ++n; + /* static int n = 0; + * ++n; + */ Declaration rdecl = first_Declaration(function_type_return_values(t)); dump_Type(value_decl_type(rdecl),o); o << "(*" << prefix << sym << ")("; @@ -1945,13 +1950,17 @@ void dump_Type_value(Type t, ostream& o) switch (Type_KEY(t)) { case KEYtype_use: { - void *p = tnode_parent(USE_DECL(type_use_use(t))); + Declaration decl = USE_DECL(type_use_use(t)); + void *p = tnode_parent(decl); if (p != 0 && ABSTRACT_APS_tnode_phylum(p) == KEYUnit) { // A top-level type! // To avoid problems, we call the "get" function dump_Use(type_use_use(t),"get_",o); o << "()"; + } else if (in_constructor) { + dump_Use(type_use_use(t),"_t_",o); } else { + o << "this->"; // added because of C++ templates dump_Use(type_use_use(t),"t_",o); } } @@ -2210,7 +2219,7 @@ void dump_TypeEnvironment(TypeEnvironment te, ostream&os) os << "::"; } -void dump_Use(Use u, char *prefix, ostream& os) +void dump_Use(Use u, const char *prefix, ostream& os) { Symbol sym; switch (Use_KEY(u)) { @@ -2294,7 +2303,7 @@ void debug_Instance(INSTANCE *i, ostream& os) { string operator+(string s, int i) { - ostringstream os; + std::ostringstream os; os << s << i; return os.str(); } diff --git a/apscpp/dump-cpp.h b/apscpp/dump-cpp.h index 55385e52..31a0a510 100644 --- a/apscpp/dump-cpp.h +++ b/apscpp/dump-cpp.h @@ -15,6 +15,7 @@ extern bool incremental; extern bool static_schedule; extern int verbose; extern int debug; +extern bool include_comments; class Implementation; @@ -55,13 +56,13 @@ void dump_Type_prefixed(Type,ostream&); void dump_Type(Type,ostream&); void dump_Type_value(Type,ostream&); void dump_Type_signature(Type,ostream&); -void dump_Typed_decl(Type,Declaration,char*prefix,ostream&); +void dump_Typed_decl(Type,Declaration,const char*prefix,ostream&); void dump_Expression(Expression,ostream&); -void dump_Use(Use,char *prefix,ostream&); +void dump_Use(Use,const char *prefix,ostream&); void dump_TypeEnvironment(TypeEnvironment,ostream&); void dump_vd_Default(Declaration,ostream&); -void dump_function_prototype(char*,Type ft, const output_streams& oss); +void dump_function_prototype(string,Type ft, output_streams& oss); // these two must always be called in pairs: the first // leaves information around for the second: diff --git a/apscpp/dyn-impl.cc b/apscpp/dyn-impl.cc index 4d28e6ab..89ac49d5 100644 --- a/apscpp/dyn-impl.cc +++ b/apscpp/dyn-impl.cc @@ -388,7 +388,7 @@ string local_attribute_context_bindings(Declaration d) } void implement_local_attributes(vector& local_attributes, - const output_streams& oss) + output_streams& oss) { ostream& hs = oss.hs; ostream& cpps = oss.cpps; @@ -433,7 +433,7 @@ void implement_local_attributes(vector& local_attributes, void implement_attributes(const vector& attrs, const vector& tlms, - const output_streams& oss) + output_streams& oss) { ostream& hs = oss.hs; ostream& cpps = oss.cpps; @@ -500,7 +500,7 @@ void implement_attributes(const vector& attrs, void implement_var_value_decls(const vector& vvds, const vector& tlms, - const output_streams& oss) + output_streams& oss) { ostream& hs = oss.hs; ostream& cpps = oss.cpps; @@ -569,11 +569,11 @@ class Dynamic : public Implementation public: ModuleInfo(Declaration mdecl) : Implementation::ModuleInfo(mdecl) {} - void note_top_level_match(Declaration tlm, const output_streams& oss) { + void note_top_level_match(Declaration tlm, output_streams& oss) { Super::note_top_level_match(tlm,oss); } - void dump_compute(string cfname, const output_streams& oss) { + void dump_compute(string cfname, output_streams& oss) { ostream& bs = inline_definitions ? oss.hs : oss.cpps; oss << header_return_type("value_type") << " " @@ -588,19 +588,19 @@ class Dynamic : public Implementation bs << indent() << "}\n"; } - void note_local_attribute(Declaration ld, const output_streams& oss) { + void note_local_attribute(Declaration ld, output_streams& oss) { Super::note_local_attribute(ld,oss); Declaration_info(ld)->decl_flags |= LOCAL_ATTRIBUTE_FLAG; int i = LOCAL_UNIQUE_PREFIX(ld); dump_compute(string("c")+i+"_"+decl_name(ld),oss); } - void note_attribute_decl(Declaration ad, const output_streams& oss) { + void note_attribute_decl(Declaration ad, output_streams& oss) { Super::note_attribute_decl(ad,oss); dump_compute(string("c_")+decl_name(ad),oss); } - void note_var_value_decl(Declaration vd, const output_streams& oss) { + void note_var_value_decl(Declaration vd, output_streams& oss) { Super::note_var_value_decl(vd,oss); Declaration_info(vd)->decl_flags |= VAR_VALUE_DECL_FLAG; char *name = decl_name(vd); @@ -608,7 +608,7 @@ class Dynamic : public Implementation oss.is << ",\n s_" << name << "(UNEVALUATED)"; } - void implement(const output_streams& oss) { + void implement(output_streams& oss) { implement_local_attributes(local_attributes,oss); implement_attributes(attribute_decls,top_level_matches,oss); implement_var_value_decls(var_value_decls,top_level_matches,oss); diff --git a/apscpp/implement.cc b/apscpp/implement.cc deleted file mode 100644 index ce4bc669..00000000 --- a/apscpp/implement.cc +++ /dev/null @@ -1,47 +0,0 @@ -#include -extern "C" { -#include -#include "aps-ag.h" -} -#include "dump-cpp.h" -#include "implement.h" - -Implementation::ModuleInfo::ModuleInfo(Declaration module) - : module_decl(module) -{} - -void Implementation::ModuleInfo::note_top_level_match(Declaration tlm, - const output_streams&) -{ - top_level_matches.push_back(tlm); -} - -void Implementation::ModuleInfo::note_var_value_decl(Declaration vd, - const output_streams&) -{ - var_value_decls.push_back(vd); -} - -void Implementation::ModuleInfo::note_local_attribute(Declaration ld, - const output_streams&) -{ - local_attributes.push_back(ld); -} - -void Implementation::ModuleInfo::note_attribute_decl(Declaration ad, - const output_streams&) -{ - attribute_decls.push_back(ad); -} - -static void *clear_impl_marks(void *ignore, void *node) { - if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { - Declaration_info((Declaration)node)->decl_flags &= ~IMPLEMENTATION_MARKS; - } - return ignore; -} - -void clear_implementation_marks(Declaration d) { - int nothing; - traverse_Declaration(clear_impl_marks,¬hing,d); -} diff --git a/apscpp/static-impl.cc b/apscpp/static-impl.cc index e0915f1a..fafe0eed 100644 --- a/apscpp/static-impl.cc +++ b/apscpp/static-impl.cc @@ -8,8 +8,6 @@ extern "C" { #define LOCAL_VALUE_FLAG (1<<28) -using namespace std; - /** Return phase (synthesized) or -phase (inherited) * for fibered attribute, given the phylum's summary dependence graph. */ diff --git a/base/basic.aps b/base/basic.aps index e07c8b67..91a2690c 100644 --- a/base/basic.aps +++ b/base/basic.aps @@ -699,4 +699,16 @@ end; [T] function debug_output(x : T) : String; -- get the line number: -[Node :: PHYLUM[]] function lineno(x : Node) : Integer; +[Node :: PHYLUM[]] function lineno(x : remote Node) : Integer; + +module TUPLE_LATTICE[ElemType :: LATTICE[]; ST :: LIST[ElemType]] + :: LIST[ElemType], LATTICE[] extends ST +begin + bottom = ST${}; + function compare(t1, t2 : ST) : Boolean; + function compare_equal(t1, t2 : ST) : Boolean; + function join(t1, t2 : ST) : ST; + function meet(t1, t2 : ST) : ST; + + function nth(i : Integer; l : ST) : ElemType; +end; diff --git a/base/cpp/.gitignore b/base/cpp/.gitignore new file mode 100644 index 00000000..a4bf6ad4 --- /dev/null +++ b/base/cpp/.gitignore @@ -0,0 +1,11 @@ +basic.cpp +basic.h +table.cpp +table.h +flat.cpp +flat.h +test-list +test-set +test-table +test-flat +gen* diff --git a/base/cpp/Makefile b/base/cpp/Makefile index be1030e5..6b4f65ee 100644 --- a/base/cpp/Makefile +++ b/base/cpp/Makefile @@ -39,22 +39,26 @@ basic.h basic.cpp : ../basic.aps table.cpp table.h : ../table.aps ${APSCPP} ${APSCPPFLAGS} --omit TABLE table - echo '#include "table.handcode.h"' >> table.h + mv table.h gen-table.h + echo '#include "table.handcode.h"' | cat - gen-table.h > table.h basic.o : basic.handcode.h basic.handcode.cpp table.o : table.handcode.h -test : test-list test-set test-table - -test-list - -test-set - -test-table +test : test-list test-set test-table test-flat + -./test-list + -./test-set + -./test-table + -./test-flat + +test-flat: flat.h % : %.cpp basecpp.a ${CPP} ${CPPFLAGS} $*.cpp basecpp.a -o $* clean : - rm -f *.o *.a core test-list test-set test-table - rm -f basic.cpp table.cpp + rm -f *.o *.a core test-list test-set test-table test-flat gen* + rm -f basic.cpp table.cpp flat.cpp realclean : clean - rm -f basic.h table.h + rm -f basic.h table.h flat.h diff --git a/base/cpp/aps-impl.cpp b/base/cpp/aps-impl.cpp index 597e3187..dfdb210b 100644 --- a/base/cpp/aps-impl.cpp +++ b/base/cpp/aps-impl.cpp @@ -89,6 +89,12 @@ class Depth { int get() { return print_depth; } }; +int Debug::print_depth(int d) { + int result = ::print_depth; + ::print_depth = d; + return result; +} + template <> std::string s_string(int n) { diff --git a/base/cpp/aps-impl.h b/base/cpp/aps-impl.h index 6eb60e7c..f7539738 100644 --- a/base/cpp/aps-impl.h +++ b/base/cpp/aps-impl.h @@ -12,12 +12,13 @@ class Debug { ~Debug(); // decrease indentation // there are no private instance data members so we don't need to overload = - Debug(const std::string&); // increase indendation and print entry string + Debug(const std::string&); // increase indentation and print entry string void returns(const std::string&); // return value std::ostream& out(); // print a debugging comment static std::ostream& out(std::ostream&); // change the debugging stream - private: + static int print_depth(int); // change print depth +private: static int depth; static std::ostream* output; }; @@ -380,7 +381,7 @@ class Collection : public A { : A(nt,vt,n), initial(init) {} virtual void set(node_type n, value_type v) { - Attribute::set(n,combine(get(n),v)); + Attribute::set(n,combine(this->get(n),v)); } protected: @@ -427,6 +428,7 @@ typedef class C_BOOLEAN C_Boolean; typedef bool T_Boolean; extern C_Boolean *t_Boolean; extern C_Boolean *get_Boolean(); +extern T_Boolean v_not(T_Boolean v__27); template std::string s_string(T_T n); diff --git a/base/cpp/basic.handcode.h b/base/cpp/basic.handcode.h index 23999eb6..f4da7127 100644 --- a/base/cpp/basic.handcode.h +++ b/base/cpp/basic.handcode.h @@ -419,7 +419,7 @@ inline bool C_SET::v_member(T_ElemType x, T_Result s) template inline typename C_SET::T_Result C_SET::v_union(T_Result s1, T_Result s2) { - // C++ can't handle this code without the followign repeated typedefs: + // C++ can't handle this code without the following repeated typedefs: typedef typename C_SET::V_append V_append; typedef typename C_SET::V_single V_single; typedef typename C_SET::V_none V_none; @@ -444,7 +444,7 @@ inline typename C_SET::T_Result C_SET::v_union(T_Result s1, T_Result s2) template inline typename C_SET::T_Result C_SET::v_intersect(T_Result s1, T_Result s2) { - // C++ can't handle this code without the followign repeated typedefs: + // C++ can't handle this code without the following repeated typedefs: typedef typename C_SET::V_append V_append; typedef typename C_SET::V_single V_single; typedef typename C_SET::V_none V_none; @@ -469,7 +469,7 @@ inline typename C_SET::T_Result C_SET::v_intersect(T_Result s1, T_Result s template inline typename C_SET::T_Result C_SET::v_difference(T_Result s1, T_Result s2) { - // C++ can't handle this code without the followign repeated typedefs: + // C++ can't handle this code without the following repeated typedefs: typedef typename C_SET::V_append V_append; typedef typename C_SET::V_single V_single; typedef typename C_SET::V_none V_none; diff --git a/base/cpp/table.handcode.h b/base/cpp/table.handcode.h index 19458689..6a3da403 100644 --- a/base/cpp/table.handcode.h +++ b/base/cpp/table.handcode.h @@ -9,7 +9,7 @@ struct APS_less { typedef typename C_T::T_Result T_T; C_T *t_T; APS_less(C_T *_t_T) : t_T(_t_T) {} - bool operator()(T_T x, T_T y) { return t_T->v_less(x,y); } + bool operator()(T_T x, T_T y) const { return t_T->v_less(x,y); } }; template @@ -45,7 +45,7 @@ class C_TABLE : public C_TYPE { V_Table(c), t_KeyType(_t_KeyType), t_ValueType(_vt), - entries(Key_Less(t_KeyType)) {} + entries(Key_Less(_t_KeyType)) {} V_full_table *as_full() { return this; } void add_to(V_full_table *table) { typename Entries::const_iterator diff --git a/base/cpp/test-flat.cpp b/base/cpp/test-flat.cpp new file mode 100644 index 00000000..50f0694c --- /dev/null +++ b/base/cpp/test-flat.cpp @@ -0,0 +1,102 @@ +#include +#include "aps-impl.h" +#include "basic.h" +#include "flat.h" + +using namespace std; + +typedef C_FLAT_LATTICE C_LiftedInteger; +typedef C_LiftedInteger::T_Result T_LiftedInteger; + +static bool verbose = false; +static int failed = 0; + +template +static void assert_equals(const string &test, T expected, T actual) +{ + bool passed = actual == expected; + if (verbose || !passed) cout << test << " = " << actual << endl; + if (passed) return; + ++failed; + cout << " failed! (Expected " << expected << ")" << endl; +} + +int main(int argc, char **argv) +{ + if (argc > 1) verbose = true; + T_LiftedInteger i1, i2, i1a, top, bot; + C_LiftedInteger* t_LiftedInteger = new C_LiftedInteger(get_Integer()); + C_LiftedInteger& t = *t_LiftedInteger; + i1 = t.v_lift(1); + i2 = t.v_lift(2); + i1a = t.v_lift(1); + top = t.v_top; + bot = t.v_bottom; + + assert_equals("bot",string("BOT"),t.v_string(bot)); + assert_equals("i1",string("LIFT(1)"),t.v_string(i1)); + assert_equals("i2",string("LIFT(2)"),t.v_string(i2)); + assert_equals("top",string("TOP"),t.v_string(top)); + + assert_equals("bot < bot",false,t.v_compare(bot,bot)); + assert_equals("bot < i1",true,t.v_compare(bot,i1)); + assert_equals("bot < i2",true,t.v_compare(bot,i2)); + assert_equals("bot < top",true,t.v_compare(bot,top)); + + assert_equals("i1 < bot",false,t.v_compare(i1,bot)); + assert_equals("i1 < i1",false,t.v_compare(i1,i1)); + assert_equals("i1 < i2",false,t.v_compare(i1,i2)); + assert_equals("i1 < top",true,t.v_compare(i1,top)); + + assert_equals("top < bot",false,t.v_compare(top,bot)); + assert_equals("top < i1",false,t.v_compare(top,i1)); + assert_equals("top < i2",false,t.v_compare(top,i2)); + assert_equals("top < top",false,t.v_compare(top,top)); + + assert_equals("bot <= bot",true,t.v_compare_equal(bot,bot)); + assert_equals("bot <= i1",true,t.v_compare_equal(bot,i1)); + assert_equals("bot <= i2",true,t.v_compare_equal(bot,i2)); + assert_equals("bot <= top",true,t.v_compare_equal(bot,top)); + + assert_equals("i1 <= bot",false,t.v_compare_equal(i1,bot)); + assert_equals("i1 <= i1",true,t.v_compare_equal(i1,i1)); + assert_equals("i1 <= i2",false,t.v_compare_equal(i1,i2)); + assert_equals("i1 <= top",true,t.v_compare_equal(i1,top)); + + assert_equals("top <= bot",false,t.v_compare_equal(top,bot)); + assert_equals("top <= i1",false,t.v_compare_equal(top,i1)); + assert_equals("top <= i2",false,t.v_compare_equal(top,i2)); + assert_equals("top <= top",true,t.v_compare_equal(top,top)); + + assert_equals("bot \\/ bot",string("BOT"),t.v_string(t.v_join(bot,bot))); + assert_equals("bot \\/ i1",string("LIFT(1)"),t.v_string(t.v_join(bot,i1))); + assert_equals("bot \\/ i2",string("LIFT(2)"),t.v_string(t.v_join(bot,i2))); + assert_equals("bot \\/ top",string("TOP"),t.v_string(t.v_join(bot,top))); + + assert_equals("i1 \\/ bot",string("LIFT(1)"),t.v_string(t.v_join(i1,bot))); + assert_equals("i1 \\/ i1",string("LIFT(1)"),t.v_string(t.v_join(i1,i1))); + assert_equals("i1 \\/ i2",string("TOP"),t.v_string(t.v_join(i1,i2))); + assert_equals("i1 \\/ top",string("TOP"),t.v_string(t.v_join(i1,top))); + + assert_equals("top \\/ bot",string("TOP"),t.v_string(t.v_join(top,bot))); + assert_equals("top \\/ i1",string("TOP"),t.v_string(t.v_join(top,i1))); + assert_equals("top \\/ i2",string("TOP"),t.v_string(t.v_join(top,i2))); + assert_equals("top \\/ top",string("TOP"),t.v_string(t.v_join(top,top))); + + assert_equals("bot /\\ bot",string("BOT"),t.v_string(t.v_meet(bot,bot))); + assert_equals("bot /\\ i1",string("BOT"),t.v_string(t.v_meet(bot,i1))); + assert_equals("bot /\\ i2",string("BOT"),t.v_string(t.v_meet(bot,i2))); + assert_equals("bot /\\ top",string("BOT"),t.v_string(t.v_meet(bot,top))); + + assert_equals("i1 /\\ bot",string("BOT"),t.v_string(t.v_meet(i1,bot))); + assert_equals("i1 /\\ i1",string("LIFT(1)"),t.v_string(t.v_meet(i1,i1))); + assert_equals("i1 /\\ i2",string("BOT"),t.v_string(t.v_meet(i1,i2))); + assert_equals("i1 /\\ top",string("LIFT(1)"),t.v_string(t.v_meet(i1,top))); + + assert_equals("top /\\ bot",string("BOT"),t.v_string(t.v_meet(top,bot))); + assert_equals("top /\\ i1",string("LIFT(1)"),t.v_string(t.v_meet(top,i1))); + assert_equals("top /\\ i2",string("LIFT(2)"),t.v_string(t.v_meet(top,i2))); + assert_equals("top /\\ top",string("TOP"),t.v_string(t.v_meet(top,top))); + + return failed; +} diff --git a/base/cpp/test-list.cpp b/base/cpp/test-list.cpp index 3cad0f6b..d859fd3e 100644 --- a/base/cpp/test-list.cpp +++ b/base/cpp/test-list.cpp @@ -9,18 +9,44 @@ typedef C_IntegerList::T_Result T_IntegerList; bool eq_int(int x, int y) { return x == y; } -main() +static bool verbose = false; +static int failed = 0; + +template +static void assert_equals(const string &test, T expected, T actual) +{ + bool passed = actual == expected; + if (verbose || !passed) cout << test << " = " << actual << endl; + if (passed) return; + ++failed; + cout << " failed! (Expected " << expected << ")" << endl; +} + +int main(int argc, char**argv) { + if (argc > 1) verbose = true; T_IntegerList is, is2; C_IntegerList* t_IntegerList = new C_IntegerList(get_Integer()); COLL iscoll(t_IntegerList,get_Integer()); C_IntegerList& t = *t_IntegerList; is = t.v_append(t.v_append(t.v_single(1),t.v_single(2)),t.v_single(3)); // is2 = t.v_append(t.v_append(t.v_single(3),t.v_single(0)),t.v_single(2)); - cout << iscoll.to_string(is) << endl; - for (int i=0; i < 5; ++i) { - cout << "Position(" << i << ") = " << t.v_position(i,is) << endl; - cout << "Member(" << i << ") = " << t.v_member(i,is) << endl; - cout << "NthFromEnd(" << i << ") = " << t.v_nth_from_end(i,is) << endl; - } + + assert_equals("to_string(l)",string("{1,2,3}"),iscoll.to_string(is)); + + assert_equals("position(1,l)",0,t.v_position(1,is)); + assert_equals("position(2,l)",1,t.v_position(2,is)); + assert_equals("position(3,l)",2,t.v_position(3,is)); + assert_equals("position(0,l)",-1,t.v_position(0,is)); + + assert_equals("member(0,l)",false,t.v_member(0,is)); + assert_equals("member(1,l)",true,t.v_member(1,is)); + assert_equals("member(2,l)",true,t.v_member(2,is)); + assert_equals("member(3,l)",true,t.v_member(3,is)); + + assert_equals("nth_from_end(0,l)",3,t.v_nth_from_end(0,is)); + assert_equals("nth_from_end(1,l)",2,t.v_nth_from_end(1,is)); + assert_equals("nth_from_end(2,l)",1,t.v_nth_from_end(2,is)); + + return failed; } diff --git a/base/cpp/test-set.cpp b/base/cpp/test-set.cpp index 22a9f3ea..7d56d892 100644 --- a/base/cpp/test-set.cpp +++ b/base/cpp/test-set.cpp @@ -7,17 +7,44 @@ using namespace std; typedef C_SET C_IntegerSet; typedef C_IntegerSet::T_Result T_IntegerSet; -main() +static bool verbose = false; +static int failed = 0; + +template +static void assert_equals(const string &test, T expected, T actual) +{ + bool passed = actual == expected; + if (verbose || !passed) cout << test << " = " << actual << endl; + if (passed) return; + ++failed; + cout << " failed! (Expected " << expected << ")" << endl; +} + +int main(int argc, char **argv) { - T_IntegerSet is, is2; + if (argc > 1) verbose = true; + T_IntegerSet is, is2, is3; C_IntegerSet* t_IntegerSet = new C_IntegerSet(get_Integer()); COLL iscoll(t_IntegerSet,get_Integer()); C_IntegerSet& t = *t_IntegerSet; is = t.v_append(t.v_append(t.v_single(1),t.v_single(2)),t.v_single(3)); is2 = t.v_append(t.v_append(t.v_single(3),t.v_single(0)),t.v_single(2)); - cout << iscoll.to_string(is) << " with " - << iscoll.to_string(is2) << endl; - cout << "Union = " << iscoll.to_string(t.v_union(is,is2)) << endl; - cout << "Intersection = " << iscoll.to_string(t.v_intersect(is,is2)) << endl; - cout << "Difference = " << iscoll.to_string(t.v_difference(is,is2)) << endl; + is3 = t.v_append(t.v_append(t.v_single(3),t.v_single(1)),t.v_single(2)); + + assert_equals("s1",string("{1,2,3}"),iscoll.to_string(is)); + assert_equals("s2",string("{3,0,2}"),iscoll.to_string(is2)); + assert_equals("s3",string("{3,1,2}"),iscoll.to_string(is3)); + + assert_equals("union(s1,s2)",string("{1,3,0,2}"), + iscoll.to_string(t.v_union(is,is2))); + assert_equals("intersect(s1,s2)",string("{2,3}"), + iscoll.to_string(t.v_intersect(is,is2))); + assert_equals("difference(s1,s2)",string("{1}"), + iscoll.to_string(t.v_difference(is,is2))); + + assert_equals("equal(s1,s2)",false,t.v_equal(is,is2)); + assert_equals("equal(s1,s3)",true,t.v_equal(is,is3)); + assert_equals("equal(s2,s3)",false,t.v_equal(is2,is3)); + + return failed; } diff --git a/base/cpp/test-table.cpp b/base/cpp/test-table.cpp index ca160392..03500839 100644 --- a/base/cpp/test-table.cpp +++ b/base/cpp/test-table.cpp @@ -6,11 +6,28 @@ using namespace std; -int main () +static bool verbose = false; +static int failed = 0; + +template +static void assert_equals(const string &test, T expected, T actual) +{ + bool passed = actual == expected; + if (verbose || !passed) cout << test << " = " << actual << endl; + if (passed) return; + ++failed; + cout << " failed! (Expected " << expected << ")" << endl; +} + +int main (int argc, char**argv) { - Debug::out(cout); + if (argc > 1) { + Debug::out(cout); + Debug::print_depth(3); + verbose = true; + } typedef C_BAG C_Strings; - C_Strings t_Strings(t_String); + C_Strings t_Strings(get_String()); typedef COLL Coll; C_Strings::T_Result b1 = t_Strings.v_single("s1"); @@ -20,23 +37,34 @@ int main () C_Strings::T_Result b3 = t_Strings.v_none(); typedef C_TABLE C_Table; - C_Table t_Table(t_Integer,&t_Strings); + C_Table t_Table(get_Integer(),&t_Strings); C_Table::T_Result t1 = t_Table.v_table_entry(3,b1); C_Table::T_Result t2 = t_Table.v_table_entry(3,b2); C_Table::T_Result t3 = t_Table.v_table_entry(1,b3); t1 = t_Table.v_combine(t1,t3); t1 = t_Table.v_combine(t1,t2); + for (int i=1; i < 4; ++i) { C_Table::T_Result t4 = t_Table.v_select(t1,i); - - cout << "t.select(" << i << ") = " << t4 << endl; + if (verbose) { + cout << "t.select(" << i << ") = " << t4 << endl; + } if (C_Table::V_table_entry* te = dynamic_cast(t4)) { - cout << "t[" << i << "] = " << - Coll(&t_Strings,t_String).to_string(te->v_val) << endl; + string result = Coll(&t_Strings,t_String).to_string(te->v_val); + if (i == 3) { + assert_equals("t[3]",string("{s1,s2a,s2b}"), result); + + } else { + assert_equals("t[" + to_string(i) + "]", string("{}"), result); + } } else { cout << "t[" << i << "] is undefined" << endl; + cout << " failed test, expected to be defined" << endl; + ++failed; } } + + return failed; } diff --git a/base/flat.aps b/base/flat.aps new file mode 100644 index 00000000..7daa0424 --- /dev/null +++ b/base/flat.aps @@ -0,0 +1,134 @@ +module FLAT_LATTICE[E :: BASIC[], PRINTABLE[]] + :: COMBINABLE[], LATTICE[], PRINTABLE[] +begin + constructor fbottom() : Result; + constructor ftop() : Result; + constructor normal(value : E) : Result; + + bottom : Result := fbottom(); + top : Result := ftop(); + + function string(x : Result) : String begin + case x begin + match fbottom() begin + result := "BOT"; + end; + match ftop() begin + result := "TOP"; + end; + match normal(?v) begin + result := "LIFT(" ++ E$string(v) ++ ")"; + end; + end; + end; + + function compare(v1, v2 : Result) : Boolean begin + case v1 begin + match fbottom() begin + result := v2 /= v1; + end; + match normal(?) begin + result := v2 = top; + end; + else + result := false; + end; + end; + + function compare_equal(v1, v2 : Result) : Boolean begin + case v1 begin + match ftop() begin + result := v1 = v2; + end; + match normal(?) begin + result := v1 = v2 or v2 = top; + end; + else + result := true; + end; + end; + + function join(v1, v2 : Result) : Result begin + case v1 begin + match fbottom() begin + result := v2; + end; + match normal(?) begin + if v2 = bottom or v2 = v1 then + result := v1; + else + result := top; + endif; + end; + else + result := top; + end; + end; + + function meet(v1, v2 : Result) : Result begin + case v1 begin + match ftop() begin + result := v2; + end; + match normal(?) begin + if v2 = top or v2 = v1 then + result := v1; + else + result := bottom; + endif; + end; + else + result := bottom; + end; + end; + + function lift(e : E) : Result := normal(e); + + -- Current APS compilers cannot handle functional returns + -- function liftf1(base : function (x:E) : E) f : function(x:Result) : Result begin + -- function resultf(x : Result) : Result begin + -- case x begin + -- match normal(?v) begin + -- result := normal(base(v)); + -- end; + -- else + -- result := x; + -- end; + -- end; + -- f := resultf; + --end; + + -- Since liftf1 doesn't work + function applyf1(base : function (v:E) : E; x:Result) : Result begin + case x begin + match normal(?v) begin + result := normal(base(v)); + end; + else + result := x; + end; + end; + + function applyf2(base : function (v1,v2 : E) : E; x1,x2 : Result) : Result begin + case x1 begin + match normal(?v1) begin + case x2 begin + match normal(?v2) begin + result := normal(base(v1,v2)); + end; + else + result := x2; + end; + end; + else + if x2 = bottom then + result := x2; + else + result := x1; + endif; + end; + end; + + initial = bottom; + function combine(x1,x2 : Result) : Result := join(x1,x2); +end; diff --git a/base/scala/Makefile b/base/scala/Makefile index 8dccb622..64b5ad65 100644 --- a/base/scala/Makefile +++ b/base/scala/Makefile @@ -1,14 +1,20 @@ .PHONY: all install clean -SCALA= aps-impl.scala basic.handcode.scala table.handcode.scala -SVERSION=2.11 +SCALA= aps-impl.scala basic.handcode.scala table.handcode.scala symbol.scala symbol.handcode.scala flat.scala +SVERSION=2.12 +LIB=../../lib # We can't make 2.9 anymore because it doesn't understand Java 8 -all : aps-library-2.10.jar aps-library-2.11.jar +all : aps-library-2.10.jar aps-library-2.11.jar aps-library-2.12.jar -install : aps-library-2.10.jar aps-library-2.11.jar - cp aps-library*.jar ../../lib +test: test-multiset.run test-set.run test-flat.run + +flat.scala: ../flat.aps ../../bin/aps2scala + ../../bin/aps2scala -p .. flat + +install : + mkdir -p ${LIB} && cp aps-library*.jar ${LIB} aps-library-2.9.jar : ${SCALA} @rm -f *.class @@ -27,17 +33,25 @@ aps-library-2.11.jar : ${SCALA} /afs/cs.uwm.edu/package/scala/scala-2.11.12/common/bin/scalac -deprecation ${SCALA} jar cvf $@ *.class -.PHONY: test-multiset.run -test-multiset.run : aps-library-${SVERSION}.jar - scalac -cp aps-library-${SVERSION}.jar test-multiset.scala - scala -cp aps-library-${SVERSION}.jar:. TestMultiSet +aps-library-2.12.jar : ${SCALA} + @rm -f *.class + /afs/cs.uwm.edu/package/scala/scala-2.12.8/common/bin/scalac -deprecation ${SCALA} + jar cvf $@ *.class -clean : - rm -f *.class *.jar *-2.*.scala +.PHONY: test-%.run +test-%.run : test-%.scala aps-library-${SVERSION}.jar + scala -cp aps-library-${SVERSION}.jar $< -%.scala : RCS/%.scala,v - co $*.scala +aps-library-2.13.jar : ${SCALA} + @rm -f *.class + /afs/cs.uwm.edu/package/scala/scala-2.13.7/common/bin/scalac -deprecation ${SCALA} + jar cvf $@ *.class + +aps-library.jar : ${SCALA} + @rm -f *.class + scalac -deprecation ${SCALA} + jar cvf $@ *.class -Makefile : RCS/Makefile,v - co Makefile +clean : + rm -f *.class *.jar *-2.*.scala diff --git a/base/scala/aps-impl.scala b/base/scala/aps-impl.scala index 7fc26c60..36971090 100644 --- a/base/scala/aps-impl.scala +++ b/base/scala/aps-impl.scala @@ -4,6 +4,7 @@ import scala.collection.mutable.Buffer; import scala.collection.mutable.ArrayBuffer; +import java.util.concurrent.atomic.AtomicBoolean; object Debug { private var depth : Int = 0; @@ -14,7 +15,7 @@ object Debug { def activate() : Unit = _active = true; - def indent() { + def indent(): Unit = { for (i <- 0 until depth) print(' '); } @@ -91,7 +92,7 @@ class I_TYPE[T](name : String) extends Module(name) with C_TYPE[T] { case t:Value => assert(t.myType == this) }; def f_equal(x : T_Result, y : T_Result) : Boolean = f_node_equivalent(x,y); - def f_node_equivalent(x : T_Result, y : T_Result) : Boolean = x.equals(y); + def f_node_equivalent(x : T_Result, y : T_Result) : Boolean = x != null && y != null && x == y; def f_string(x : T_Result) : String = x.toString(); } @@ -193,7 +194,24 @@ object Evaluation { case class CyclicAttributeException(w : String) extends APSException("cyclic attribute: " + w) {} object StubError extends APSException("stub error") {} - import scala.collection.mutable.Stack; + class Stack[A](private var elems: List[A] = List.empty[A]) extends Iterable[A] { + def push(v: A): Stack[A] = { + elems = v :: elems; + this; + } + + def pop(): A = { + if (elems.isEmpty) { + throw new NoSuchElementException("Empty Stack"); + } else { + val popped = elems.head; + elems = elems.tail; + popped; + } + } + + override def iterator: Iterator[A] = elems.iterator + } val pending : Stack[Evaluation[_,_]] = new Stack(); } @@ -208,6 +226,8 @@ class Evaluation[T_P, T_V](val anchor : T_P, val name : String) var status : EvalStatus = UNINITIALIZED; var value : ValueType = null.asInstanceOf[ValueType]; + // Flag that can be overridden to prevent testing for TooLateError + var checkForLateUpdate = true; def inCycle : CircularEvaluation[_,_] = null; def setInCycle(ce : CircularEvaluation[_,_]) : Unit = { @@ -251,7 +271,7 @@ class Evaluation[T_P, T_V](val anchor : T_P, val name : String) def set(v : ValueType) : Unit = { status match { - case EVALUATED => throw TooLateError; + case EVALUATED => if (checkForLateUpdate) throw TooLateError else (); case _ => (); } value = v; @@ -263,13 +283,13 @@ class Evaluation[T_P, T_V](val anchor : T_P, val name : String) } def detectedCycle : ValueType = { - throw new CyclicAttributeException(anchor+"."+name); + throw new CyclicAttributeException(s"$anchor.$name"); } def compute : ValueType = getDefault; def getDefault : ValueType = { - throw new UndefinedAttributeException(anchor+"."+name); + throw new UndefinedAttributeException(s"$anchor.$name"); }; } @@ -330,7 +350,7 @@ extends Module("Attribute " + name) } def createEvaluation(anchor : NodeType) : Evaluation[NodeType,ValueType] = { - return new Evaluation(anchor, anchor + "." + name); + return new Evaluation(anchor, s"$anchor.$name"); } } @@ -481,7 +501,7 @@ trait CircularEvaluation[V_P, V_T] extends Evaluation[V_P,V_T] { } } - private def check(newValue : ValueType) : Unit = { + def check(newValue : ValueType) : Unit = { if (!lattice.v_equal(value,newValue)) { inCycle.helper.modified = true; if (!lattice.v_compare(value,newValue)) { @@ -515,4 +535,45 @@ object P_AND { def unapply[T](x : T) : Option[(T,T)] = Some((x,x)); } +trait StaticCircularEvaluation[V_P, V_T] extends CircularEvaluation[V_P, V_T] { + def assign(v: ValueType, changed: AtomicBoolean): Unit = { + Debug.out(name + " := " + v); + this.set(v, changed) + } + + def set(newValue: ValueType, changed: AtomicBoolean): Unit = { + val prevValue = value; + super.set(newValue); + if (prevValue != value) { + changed.set(true); + } + } + + override def check(newValue: ValueType): Unit = { + if (value != null) { + if (!lattice.v_equal(value, newValue)) { + if (!lattice.v_compare(value, newValue)) { + throw new Evaluation.CyclicAttributeException("non-monotonic " + name); + } + } + } + } + + // Needed to prevent TooLateError + checkForLateUpdate = false; +} + +trait ChangeTrackingAttribute[T_P <: Node, T_V] { + this: Attribute[T_P, T_V] => + + def assign(n: T_P, v: T_V, changed: AtomicBoolean): Unit = { + Debug.begin(t_P.v_string(n) + "." + name + ":=" + v); + this.set(n, v, changed); + Debug.end(); + } + + def set(n: T_P, v: T_V, changed: AtomicBoolean): Unit = checkNode(n) + .asInstanceOf[StaticCircularEvaluation[T_P, T_V]] + .set(v, changed); +} diff --git a/base/scala/basic.handcode.scala b/base/scala/basic.handcode.scala index 57ac7d1a..40b8597c 100644 --- a/base/scala/basic.handcode.scala +++ b/base/scala/basic.handcode.scala @@ -1,14 +1,22 @@ import Evaluation._; object basic_implicit { + type AtomicBoolean = java.util.concurrent.atomic.AtomicBoolean; + + private object Conversions { + implicit def lazyToEager[T](f: (=>T, =>T) => T): (T, T) => T = (a: T, b: T) => f(a, b) + } + + import Conversions._ + val t_Boolean = new M_BOOLEAN("Boolean"); type T_Boolean = Boolean; val v_true:T_Boolean = true; val v_false:T_Boolean = false; val v_and = f_and _; - def f_and(v__23 : T_Boolean, v__24 : T_Boolean):T_Boolean = v__23 && v__24; + def f_and(v__23: => T_Boolean, v__24: => T_Boolean):T_Boolean = v__23 && v__24; val v_or = f_or _; - def f_or(v__25 : T_Boolean, v__26 : T_Boolean):T_Boolean = v__25 || v__26; + def f_or(v__25: => T_Boolean, v__26: => T_Boolean):T_Boolean = v__25 || v__26; val v_not = f_not _; def f_not(v__27 : T_Boolean):T_Boolean = !v__27; @@ -51,12 +59,30 @@ object basic_implicit { type T_MAKE_LATTICE[L] = L; - val t_OrLattice = new M_MAKE_LATTICE[T_Boolean]("OrLattice",t_Boolean,v_false,v_cand,v_implies,v_or,v_and); + val t_OrLattice = new M_MAKE_LATTICE[T_Boolean]("OrLattice",t_Boolean,v_false,v_cand,v_implies,v_or,v_and) + with C_TYPE[Boolean] + with C_COMBINABLE[Boolean] + with C_LATTICE[Boolean] { + override val v_assert = t_Boolean.v_assert + override val v_node_equivalent = t_Boolean.v_node_equivalent + override val v_string = t_Boolean.v_string + } type T_OrLattice = T_Boolean; - val t_AndLattice = new M_MAKE_LATTICE[T_Boolean]("AndLattice",t_Boolean,v_true,v_andc,v_revimplies,v_and,v_or); + val t_AndLattice = new M_MAKE_LATTICE[T_Boolean]("AndLattice",t_Boolean,v_true,v_andc,v_revimplies,v_and,v_or) + with C_TYPE[Boolean] + with C_COMBINABLE[Boolean] + with C_LATTICE[Boolean] { + override val v_assert = t_Boolean.v_assert + override val v_node_equivalent = t_Boolean.v_node_equivalent + override val v_string = t_Boolean.v_string + } type T_AndLattice = T_Boolean; + type T_MAX_LATTICE[T] = T; + type T_MIN_LATTICE[T] = T; + + type T_BAG[T] = List[T]; type T_LIST[T] = List[T]; type T_SET[T] = scala.collection.immutable.Set[T]; @@ -64,7 +90,9 @@ object basic_implicit { type T_UNION_LATTICE[T_E,T_T] = T_T; type T_INTERSECTION_LATTICE[T_E,T_T] = T_T; - + + type T_TUPLE_LATTICE[T_ElemType, T_ST] = T_ST; + val t_String = new M_STRING("String"); type T_String = String; @@ -519,7 +547,7 @@ trait C_COLLECTION[T_Result, T_ElemType] extends C_READ_ONLY_COLLECTION[T_Result val v_append : (T_Result,T_Result) => T_Result; val v_single : (T_ElemType) => T_Result; val v_none : () => T_Result; - val v__op_AC : (T_ElemType*) => T_Result; + val v__op_AC : (Seq[T_ElemType]) => T_Result; } class M__basic_14[T_ElemType,T_T](t_ElemType:Any,t_T:C_READ_ONLY_COLLECTION[T_T,T_ElemType]) { @@ -529,7 +557,7 @@ class M__basic_14[T_ElemType,T_T](t_ElemType:Any,t_T:C_READ_ONLY_COLLECTION[T_T, }; class M__basic_15[T_ElemType,T_T](t_ElemType:Any,t_T:C_COLLECTION[T_T,T_ElemType]) { - val v__op_AC : (T_ElemType*) => T_T = t_T.v__op_AC; + val v__op_AC : (Seq[T_ElemType]) => T_T = t_T.v__op_AC; }; trait C_READ_ONLY_ORDERED_COLLECTION[T_Result, T_ElemType] extends C_READ_ONLY_COLLECTION[T_Result,T_ElemType] { @@ -623,7 +651,7 @@ with C_SEQUENCE[T_SEQUENCE[T_ElemType],T_ElemType] case c_none() => List() }; def u__op_AC(x:Any) : Option[(T_Result,Seq[T_ElemType])] = x match { - case x : T_Result => Some((x,toList(x))); + case x : T_Result @unchecked => Some((x,toList(x))); case _ => None }; val p__op_AC = new PatternSeqFunction[T_Result,T_ElemType](u__op_AC); @@ -716,11 +744,11 @@ with C_BAG[List[T_ElemType],T_ElemType] def f_assert(v__88 : T_Result) : Unit = {}; val v__op_AC = f__op_AC _; - def f__op_AC(v_l : T_ElemType*):T_Result = v_l.toList; + def f__op_AC(v_l : Seq[T_ElemType]):T_Result = v_l.toList; val p__op_AC = new PatternSeqFunction[T_Result,T_ElemType](u__op_AC); def u__op_AC(x:Any) : Option[(T_Result,Seq[T_ElemType])] = x match { - case x:T_Result => Some((x,x)) + case x:T_Result @unchecked => Some((x,x)) case _ => None }; @@ -731,7 +759,7 @@ with C_BAG[List[T_ElemType],T_ElemType] val v_append = f_append _; def f_append(v_l1 : T_Result, v_l2 : T_Result):T_Result = v_l1 ++ v_l2; def u_append(x:Any) : Option[(T_Result,T_Result,T_Result)] = x match { - case x:T_Result => x match { + case x:T_Result @unchecked => x match { case x1::x2::l => Some((x,List(x1),x2::l)) case _ => None }; @@ -742,7 +770,7 @@ with C_BAG[List[T_ElemType],T_ElemType] val v_single = f_single _; def f_single(v_x : T_ElemType):T_Result = List(v_x); def u_single(x:Any) : Option[(T_Result,T_ElemType)] = x match { - case x:T_Result => x match { + case x:T_Result @unchecked => x match { case v_x::Nil => Some((x,v_x)); case _ => None }; case _ => None }; @@ -751,7 +779,7 @@ with C_BAG[List[T_ElemType],T_ElemType] val v_none = f_none _; def f_none():T_Result = Nil; def u_none(x:Any) : Option[T_Result] = x match { - case x@Nil => Some((x)); + case x@Nil => Some((x).asInstanceOf[T_Result]); case _ => None }; val p_none = new PatternFunction[T_Result](u_none); @@ -841,11 +869,11 @@ with C_SET[Set[T_ElemType],T_ElemType] override def f_assert(v__88 : T_Result) : Unit = {}; val v__op_AC = f__op_AC _; - def f__op_AC(v_l : T_ElemType*):T_Result = ListSet(v_l:_*); + def f__op_AC(v_l : Seq[T_ElemType]):T_Result = ListSet(v_l:_*); val p__op_AC = new PatternSeqFunction[T_Result,T_ElemType](u__op_AC); def u__op_AC(x:Any) : Option[(T_Result,Seq[T_ElemType])] = x match { - case x:T_Result => Some((x,x.toSeq)); + case x:T_Result @unchecked => Some((x,x.toSeq)); case _ => None }; @@ -857,7 +885,7 @@ with C_SET[Set[T_ElemType],T_ElemType] def f_append(v_l1 : T_Result, v_l2 : T_Result):T_Result = v_l1 ++ v_l2; def u_append(x:Any) : Option[(T_Result,T_Result,T_Result)] = x match { - case x:T_Result => + case x:T_Result @unchecked => if (x.size > 1) { val y : T_ElemType = x.iterator.next(); Some((x,ListSet(y),x - y)) @@ -871,7 +899,7 @@ with C_SET[Set[T_ElemType],T_ElemType] val v_single = f_single _; def f_single(v_x : T_ElemType):T_Result = ListSet(v_x); def u_single(x:Any) : Option[(T_Result,T_ElemType)] = x match { - case x:T_Result => if (x.size == 1) Some(x,x.iterator.next()) else None; + case x:T_Result @unchecked => if (x.size == 1) Some(x,x.iterator.next()) else None; case _ => None }; val p_single = new PatternFunction[(T_Result,T_ElemType)](u_single); @@ -879,7 +907,7 @@ with C_SET[Set[T_ElemType],T_ElemType] val v_none = f_none _; def f_none():T_Result = ListSet(); def u_none(x:Any) : Option[T_Result] = x match { - case x:T_Result => + case x:T_Result @unchecked => if (x.size == 0) Some(x) else None; case _ => None }; @@ -911,7 +939,7 @@ trait C_MULTISET[T_Result, T_ElemType] extends C_TYPE[T_Result]with C_BAG[T_Resu val v_equal : (T_Result,T_Result) => T_Boolean; val v_less : (T_Result,T_Result) => T_Boolean; val v_less_equal : (T_Result,T_Result) => T_Boolean; - val v__op_AC : (T_ElemType*) => T_Result; + val v__op_AC : (Seq[T_ElemType]) => T_Result; val p__op_AC : PatternSeqFunction[T_Result,T_ElemType]; val v_member : (T_ElemType,T_Result) => T_Boolean; val v_count : (T_ElemType,T_Result) => T_Integer; @@ -1028,7 +1056,7 @@ class M_ORDERED_SET[T_ElemType](t_ElemType:C_ORDERED[T_ElemType] extends Module( val v_less_equal = f_less_equal _; def f_less_equal(v__130 : T_Result, v__131 : T_Result):T_Boolean; val v__op_AC = f__op_AC _; - def f__op_AC(v__132 : T_ElemType*):T_Result; + def f__op_AC(v__132 : Seq[T_ElemType]):T_Result; val v_union = f_union _; def f_union(v__133 : T_Result, v__134 : T_Result):T_Result; val v_intersect = f_intersect _; @@ -1068,7 +1096,7 @@ trait C_ORDERED_MULTISET[T_Result, T_ElemType] extends C_ORDERED_COLLECTION[T_Re val v_equal : (T_Result,T_Result) => T_Boolean; val v_less : (T_Result,T_Result) => T_Boolean; val v_less_equal : (T_Result,T_Result) => T_Boolean; - val v__op_AC : (T_ElemType*) => T_Result; + val v__op_AC : (Seq[T_ElemType]) => T_Result; val v_union : (T_Result,T_Result) => T_Result; val v_intersect : (T_Result,T_Result) => T_Result; val v_difference : (T_Result,T_Result) => T_Result; @@ -1101,7 +1129,7 @@ class M_ORDERED_MULTISET[T_ElemType](t_ElemType:C_ORDERED[T_ElemType] extends Mo val v_less_equal = f_less_equal _; def f_less_equal(v__143 : T_Result, v__144 : T_Result):T_Boolean; val v__op_AC = f__op_AC _; - def f__op_AC(v__145 : T_ElemType*):T_Result; + def f__op_AC(v__145 : Seq[T_ElemType]):T_Result; val v_union = f_union _; def f_union(v__146 : T_Result, v__147 : T_Result):T_Result; val v_intersect = f_intersect _; @@ -1277,7 +1305,7 @@ with C_STRING[String] override def f_assert(v__88 : T_Result) : Unit = {}; val v__op_AC = f__op_AC _; - def f__op_AC(v_l : Char*):T_Result = (v_l :\ "")((c,s) => c + s); + def f__op_AC(v_l : Char*):T_Result = (v_l foldRight "")((c,s) => s"$c$s"); val p__op_AC = new PatternSeqFunction[T_Result,Char](u__op_AC); def u__op_AC(x:Any) : Option[(T_Result,Seq[Char])] = x match { @@ -1319,7 +1347,7 @@ with C_STRING[String] val p_none = new PatternFunction[T_Result](u_none); val v_cons = f_cons _; - def f_cons(x : Char, s : String) : String = x + s; + def f_cons(x : Char, s : String) : String = s"$x$s"; val v_concatenate : (T_Result,T_Result) => T_Result = v_append; val v_nth = f_nth _; def f_nth(v_i : T_Integer, v_l : T_Result):Char = v_l.charAt(v_i); @@ -1378,3 +1406,81 @@ class M__basic_24[T_Node <: Node](t_Node:C_PHYLUM[T_Node]) { v_x.asInstanceOf[Node].lineNumber; }; +trait C_TUPLE_LATTICE[T_Result, T_ElemType, T_ST] + extends C_MAKE_LATTICE[T_Result, T_ST] + with C_LIST[T_Result, T_ElemType]; + +class M_TUPLE_LATTICE[T_ElemType, T_ST](val name: String, + val t_ElemType: C_TYPE[T_ElemType] with C_LATTICE[T_ElemType], + val t_ST: C_TYPE[T_ST] with C_LIST[T_ST, T_ElemType]) + extends Module(name) + with C_TUPLE_LATTICE[T_ST, T_ElemType, T_ST] { + + override val v_concatenate = t_ST.v_concatenate; + override val p__op_AC = t_ST.p__op_AC; + override val p_append = t_ST.p_append; + override val p_single = t_ST.p_single; + override val p_none = t_ST.p_none; + override val v_member = t_ST.v_member; + override val v_nth_from_end = t_ST.v_nth_from_end; + override val v_position = t_ST.v_position; + override val v_position_from_end = t_ST.v_position_from_end; + override val v_append = t_ST.v_append; + override val v_single = t_ST.v_single; + override val v_none = t_ST.v_none; + override val v_subseq = t_ST.v_subseq; + override val v_subseq_from_end = t_ST.v_subseq_from_end; + override val v_butsubseq = t_ST.v_butsubseq; + override val v_butsubseq_from_end = t_ST.v_butsubseq_from_end; + override val v_cons = t_ST.v_cons; + override val v_bottom = t_ST.v_none(); + override val v__op_AC = t_ST.v__op_AC; + override val v_nth = t_ST.v_nth; + override val v_equal = t_ST.v_equal; + override val v_assert = t_ST.v_assert; + override val v_node_equivalent = t_ST.v_node_equivalent; + override val v_string = t_ST.v_string; + + override def v_initial: T_ST = v_bottom; + override val v_join = f_combine; + override val v_meet = f_meet; + override val v_combine = f_combine; + + def f_combine(v_t1: T_ST, v_t2: T_ST): T_ST = { + v_t1 match { + case t_ST.p_none(_) => v_t2 + case t_ST.p_single(_, x) => v_t2 match { + case t_ST.p_none(_) => v_t1 + case t_ST.p_single(_, y) => t_ST.v__op_AC(Seq(t_ElemType.v_join(x, y))) + case t_ST.p_append(_, ly1, ly2) => v_append(f_combine(v_t1, ly1), ly2) + } + case t_ST.p_append(_, lx1, lx2) => v_t2 match { + case t_ST.p_none(_) => v_t1 + case t_ST.p_single(_, y) => v_append(f_combine(lx1, v_t2), lx2) + case t_ST.p_append(_, ly1, ly2) => v_append(f_combine(lx1, ly1), f_combine(lx2, ly2)) + } + } + }; + + def f_meet(v_t1: T_ST, v_t2: T_ST): T_ST = { + v_t1 match { + case t_ST.p_none(_) => v_t1 + case t_ST.p_single(_, x) => v_t2 match { + case t_ST.p_none(_) => v_t2 + case t_ST.p_single(_, y) => t_ST.v__op_AC(Seq(t_ElemType.v_meet(x, y))) + case t_ST.p_append(_, ly1, ly2) => f_meet(v_t1, ly1) + } + case t_ST.p_append(_, lx1, lx2) => v_t2 match { + case t_ST.p_none(_) => v_t2 + case t_ST.p_single(_, y) => f_meet(lx1, v_t2) + case t_ST.p_append(_, ly1, ly2) => v_append(f_meet(lx1, ly1), f_meet(lx2, ly2)) + } + } + }; + + // x < y iff x <= y and x != y + override val v_compare: (T_ST, T_ST) => T_Boolean = (x, y) => v_compare_equal(x, y) && !v_equal(x, y) + + // x <= y iff x \/ y == y + override val v_compare_equal: (T_ST, T_ST) => T_Boolean = (x, y) => v_equal(f_combine(x, y), y) +} diff --git a/base/scala/symbol.handcode.scala b/base/scala/symbol.handcode.scala new file mode 100644 index 00000000..5adbcc52 --- /dev/null +++ b/base/scala/symbol.handcode.scala @@ -0,0 +1,34 @@ +// partially handcoded SYMBOL module +import basic_implicit._; + +trait C_SYMBOL[T_Result] extends C_BASIC[T_Result] with C_PRINTABLE[T_Result] with C_ORDERED[T_Result] with C_TYPE[T_Result] { + val v_assert : (T_Result) => Unit; + val v_equal : (T_Result,T_Result) => T_Boolean; + val v_create : (T_String) => T_Result; + val v_name : (T_Result) => T_String; + val v_less : (T_Result,T_Result) => T_Boolean; + val v_less_equal : (T_Result,T_Result) => T_Boolean; + val v_string : (T_Result) => T_String; + def v_null : T_Result; +} + +class M_SYMBOL(name : String) + extends I_TYPE[Symbol](name) + with C_SYMBOL[Symbol] +{ + override def f_equal(x : Symbol, y : Symbol) : Boolean = x eq y; + override def f_string(x : Symbol) : String = x.name; + + val v_create = f_create _; + def f_create(v__4 : T_String):T_Result = Symbol(v__4); + val v_name = f_name _; + def f_name(v__5 : T_Result):T_String = v__5.name; + val v_less = f_less _; + def f_less(v__6 : T_Result, v__7 : T_Result):T_Boolean = + v__6.hashCode() < v__7.hashCode(); + val v_less_equal = f_less_equal _; + def f_less_equal(v__8 : T_Result, v__9 : T_Result):T_Boolean = + v__8.hashCode() <= v__9.hashCode(); + var v_null:T_Result = Symbol("nil"); +} + diff --git a/base/scala/symbol.scala b/base/scala/symbol.scala new file mode 100644 index 00000000..316fc843 --- /dev/null +++ b/base/scala/symbol.scala @@ -0,0 +1,12 @@ +import basic_implicit._; +object symbol_implicit { + val cool_symbol_loaded = true; + val t_Symbol = new M_SYMBOL("Symbol"); + type T_Symbol = /*TI*/t_Symbol.T_Result; + val v_make_symbol : (T_String) => T_Symbol = t_Symbol.v_create; + val v_symbol_name : (T_Symbol) => T_String = t_Symbol.v_name; + val v_symbol_equal : (T_Symbol,T_Symbol) => T_Boolean = t_Symbol.v_equal; + val v_null_symbol:T_Symbol = t_Symbol.v_null; +} +import symbol_implicit._; + diff --git a/base/scala/table.handcode.scala b/base/scala/table.handcode.scala index 81dd21ab..a9e2b742 100644 --- a/base/scala/table.handcode.scala +++ b/base/scala/table.handcode.scala @@ -6,6 +6,7 @@ import basic_implicit._; object table_implicit { val table_loaded = true; type T_TABLE[K,V] = scala.collection.immutable.TreeMap[K,V]; + type T_TABLE_LATTICE[K, V] = T_TABLE[K, V]; } import table_implicit._; @@ -26,10 +27,10 @@ class M_TABLE[T_KeyType, T_ValueType] extends I_TYPE[TreeMap[T_KeyType,T_ValueType]](name) with C_TABLE[TreeMap[T_KeyType,T_ValueType],T_KeyType,T_ValueType] { - implicit def key_order(x : T_KeyType) = new Ordered[T_KeyType] { + implicit def key_order(x : T_KeyType): Ordered[T_KeyType] = new Ordered[T_KeyType] { def compare(y : T_KeyType) : Int = key_ordering.compare(x,y); }; - implicit val key_ordering = new Ordering[T_KeyType] { + implicit val key_ordering: Ordering[T_KeyType] = new Ordering[T_KeyType] { def compare(x : T_KeyType, y : T_KeyType) : Int = { if (t_KeyType.v_equal(x,y)) return 0; if (t_KeyType.v_less(x,y)) return -1; @@ -43,10 +44,10 @@ with C_TABLE[TreeMap[T_KeyType,T_ValueType],T_KeyType,T_ValueType] val v_table_entry = f_table_entry _; def f_table_entry(v_key : T_KeyType, v_val : T_ValueType):T_Result = - v_empty_table.insert(v_key,v_val); + v_empty_table.updated(v_key,v_val); def u_table_entry(x:Any) : Option[(T_Result,T_KeyType,T_ValueType)] = x match { - case m:Table => { + case m:Table @unchecked => { if (m.size == 1) { val k = m.firstKey; Some((m,k,m(k))) @@ -65,7 +66,7 @@ with C_TABLE[TreeMap[T_KeyType,T_ValueType],T_KeyType,T_ValueType] if (result.isDefinedAt(k)) { result = result.updated(k,t_ValueType.v_combine(result(k),v)).asInstanceOf[Table] } else { - result = result.insert(k,v); + result = result.updated(k,v); } }; result @@ -75,8 +76,39 @@ with C_TABLE[TreeMap[T_KeyType,T_ValueType],T_KeyType,T_ValueType] def f_select(v_table : T_Result, v_key : T_KeyType):T_Result = { v_table.get(v_key) match { case Some(v) => f_table_entry(v_key,v) - case None => f_table_entry(v_key,t_ValueType.v_initial) + case None => v_initial } } } +trait C_TABLE_LATTICE[TResult, T_KeyType, T_ValueType] + extends C_TABLE[TResult, T_KeyType, T_ValueType] + with C_LATTICE[TResult] + +class M_TABLE_LATTICE[T_KeyType, T_ValueType] ( + val _name : String, + val _t_KeyType : C_TYPE[T_KeyType] with C_ORDERED[T_KeyType], + val _t_ValueType : C_TYPE[T_ValueType] with C_COMBINABLE[T_ValueType] with C_LATTICE[T_ValueType]) + extends M_TABLE[T_KeyType, T_ValueType](_name, _t_KeyType, _t_ValueType) + with C_TABLE_LATTICE[T_TABLE[T_KeyType,T_ValueType],T_KeyType,T_ValueType] { + + override val v_join: (T_TABLE[T_KeyType, T_ValueType], T_TABLE[T_KeyType, T_ValueType]) => T_TABLE[T_KeyType, T_ValueType] = v_combine + override val v_meet: (T_TABLE[T_KeyType, T_ValueType], T_TABLE[T_KeyType, T_ValueType]) => T_TABLE[T_KeyType, T_ValueType] = f_meet + + def f_meet(v_t1 :T_Result, v_t2 :T_Result) :T_Result = { + var result :Table = v_empty_table + for ((k,v) <- v_t2) { + if (v_t1.isDefinedAt(k)) { + result = result.updated(k, _t_ValueType.v_meet(v_t1(k), v)).asInstanceOf[Table] + } + }; + result + }; + + override def v_bottom: T_TABLE[T_KeyType, T_ValueType] = v_empty_table + + // v1 != v2 && v1 <= v2 --> v1 < v2 + override val v_compare: (T_TABLE[T_KeyType, T_ValueType], T_TABLE[T_KeyType, T_ValueType]) => T_Boolean = (x, y) => v_compare_equal(x, y) && !v_equal(x, y) + + override val v_compare_equal: (T_TABLE[T_KeyType, T_ValueType], T_TABLE[T_KeyType, T_ValueType]) => T_Boolean = (x, y) => (x.keySet subsetOf y.keySet) && (x.keySet & y.keySet).forall(key => _t_ValueType.v_compare_equal(x(key), y(key))) +} diff --git a/base/scala/test-flat.scala b/base/scala/test-flat.scala new file mode 100644 index 00000000..214df769 --- /dev/null +++ b/base/scala/test-flat.scala @@ -0,0 +1,83 @@ +import basic_implicit._; + +object TestFlat extends App { + + private def assert_equals[T](text: String, expected: T, actual: T) = { + if (expected != actual) { + throw new RuntimeException(s"Failed: $text") + } + } + + val t_FlatIntegerLattice = new M_FLAT_LATTICE[T_Integer]("Flat", t_Integer); + + val i1 = t_FlatIntegerLattice.v_lift(1); + val i2 = t_FlatIntegerLattice.v_lift(2); + val i1a = t_FlatIntegerLattice.v_lift(1); + val top = t_FlatIntegerLattice.v_top; + val bot = t_FlatIntegerLattice.v_bottom; + + assert_equals("bot", "BOT", t_FlatIntegerLattice.v_string(bot)); + assert_equals("i1", "LIFT(1)", t_FlatIntegerLattice.v_string(i1)); + assert_equals("i2", "LIFT(2)", t_FlatIntegerLattice.v_string(i2)); + assert_equals("top", "TOP", t_FlatIntegerLattice.v_string(top)); + + assert_equals("bot < bot", false, t_FlatIntegerLattice.v_compare(bot, bot)); + assert_equals("bot < i1", true, t_FlatIntegerLattice.v_compare(bot, i1)); + assert_equals("bot < i2", true, t_FlatIntegerLattice.v_compare(bot, i2)); + assert_equals("bot < top", true, t_FlatIntegerLattice.v_compare(bot, top)); + + assert_equals("i1 < bot", false, t_FlatIntegerLattice.v_compare(i1, bot)); + assert_equals("i1 < i1", false, t_FlatIntegerLattice.v_compare(i1, i1)); + assert_equals("i1 < i2", false, t_FlatIntegerLattice.v_compare(i1, i2)); + assert_equals("i1 < top", true, t_FlatIntegerLattice.v_compare(i1, top)); + + assert_equals("top < bot", false, t_FlatIntegerLattice.v_compare(top, bot)); + assert_equals("top < i1", false, t_FlatIntegerLattice.v_compare(top, i1)); + assert_equals("top < i2", false, t_FlatIntegerLattice.v_compare(top, i2)); + assert_equals("top < top", false, t_FlatIntegerLattice.v_compare(top, top)); + + assert_equals("bot <= bot", true, t_FlatIntegerLattice.v_compare_equal(bot, bot)); + assert_equals("bot <= i1", true, t_FlatIntegerLattice.v_compare_equal(bot, i1)); + assert_equals("bot <= i2", true, t_FlatIntegerLattice.v_compare_equal(bot, i2)); + assert_equals("bot <= top", true, t_FlatIntegerLattice.v_compare_equal(bot, top)); + + assert_equals("i1 <= bot", false, t_FlatIntegerLattice.v_compare_equal(i1, bot)); + assert_equals("i1 <= i1", true, t_FlatIntegerLattice.v_compare_equal(i1, i1)); + assert_equals("i1 <= i2", false, t_FlatIntegerLattice.v_compare_equal(i1, i2)); + assert_equals("i1 <= top", true, t_FlatIntegerLattice.v_compare_equal(i1, top)); + + assert_equals("top <= bot", false, t_FlatIntegerLattice.v_compare_equal(top, bot)); + assert_equals("top <= i1", false, t_FlatIntegerLattice.v_compare_equal(top, i1)); + assert_equals("top <= i2", false, t_FlatIntegerLattice.v_compare_equal(top, i2)); + assert_equals("top <= top", true, t_FlatIntegerLattice.v_compare_equal(top, top)); + + assert_equals("bot \\/ bot", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(bot, bot))); + assert_equals("bot \\/ i1", "LIFT(1)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(bot, i1))); + assert_equals("bot \\/ i2", "LIFT(2)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(bot, i2))); + assert_equals("bot \\/ top", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(bot, top))); + + assert_equals("i1 \\/ bot", "LIFT(1)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(i1, bot))); + assert_equals("i1 \\/ i1", "LIFT(1)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(i1, i1))); + assert_equals("i1 \\/ i2", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(i1, i2))); + assert_equals("i1 \\/ top", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(i1, top))); + + assert_equals("top \\/ bot", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(top, bot))); + assert_equals("top \\/ i1", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(top, i1))); + assert_equals("top \\/ i2", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(top, i2))); + assert_equals("top \\/ top", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_join(top, top))); + + assert_equals("bot /\\ bot", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(bot, bot))); + assert_equals("bot /\\ i1", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(bot, i1))); + assert_equals("bot /\\ i2", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(bot, i2))); + assert_equals("bot /\\ top", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(bot, top))); + + assert_equals("i1 /\\ bot", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(i1, bot))); + assert_equals("i1 /\\ i1", "LIFT(1)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(i1, i1))); + assert_equals("i1 /\\ i2", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(i1, i2))); + assert_equals("i1 /\\ top", "LIFT(1)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(i1, top))); + + assert_equals("top /\\ bot", "BOT", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(top, bot))); + assert_equals("top /\\ i1", "LIFT(1)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(top, i1))); + assert_equals("top /\\ i2", "LIFT(2)", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(top, i2))); + assert_equals("top /\\ top", "TOP", t_FlatIntegerLattice.v_string(t_FlatIntegerLattice.v_meet(top, top))); +} \ No newline at end of file diff --git a/base/scala/test-multiset.scala b/base/scala/test-multiset.scala index 8f20ffc3..2a29bee5 100644 --- a/base/scala/test-multiset.scala +++ b/base/scala/test-multiset.scala @@ -5,10 +5,14 @@ object TestMultiSet extends App { val l1 = List(1,2,3,2,1,4,5,4,3,2,2,2); val l2 = List(0,1,2,3,4,5,2); + val l1_clone = l1 ++ Set(); println("union = "+ t_MI.v_union(l1,l2)); println("intersect = " + t_MI.v_intersect(l1,l2)); println("difference = " + t_MI.v_difference(l1,l2)); println("combine = "+ t_MI.v_combine(l1,l2)); println("count(2) = " + t_MI.v_count(2,l1)); + println("equal(l1,l1) = " + t_MI.v_equal(l1,l1)); + println("equal(l1,l1_clone) = " + t_MI.v_equal(l1,l1_clone)); + println("equal(l1,l2) = " + t_MI.v_equal(l1,l2)); } diff --git a/base/scala/test-set.scala b/base/scala/test-set.scala new file mode 100644 index 00000000..4c279e58 --- /dev/null +++ b/base/scala/test-set.scala @@ -0,0 +1,17 @@ +import basic_implicit._; + +object TestSet extends App { + val t_Set = new M_SET[T_Integer]("SET", t_Integer); + + val l1 = Set(1,2,3,2,1,4,5,4,3,2,2,2); + val l2 = Set(0,1,2,3,4,5,2); + val l1_clone = l1 ++ Set(); + + println("union = "+ t_Set.v_union(l1,l2)); + println("intersect = " + t_Set.v_intersect(l1,l2)); + println("difference = " + t_Set.v_difference(l1,l2)); + println("combine = "+ t_Set.v_combine(l1,l2)); + println("equal(l1,l1) = " + t_Set.v_equal(l1,l1)); + println("equal(l1,l1_clone) = " + t_Set.v_equal(l1,l1_clone)); + println("equal(l1,l2) = " + t_Set.v_equal(l1,l2)); +} diff --git a/base/symbol.aps b/base/symbol.aps new file mode 100644 index 00000000..c42ac70f --- /dev/null +++ b/base/symbol.aps @@ -0,0 +1,25 @@ +-- SYMBOL.APS +-- Stubs for a symbol table package +-- John Boyland + +private module SYMBOL[] :: BASIC[], PRINTABLE[], ORDERED[] begin + function assert(_:Result); + function equal(_,_:Result) : Boolean; + function create(_:String) : Result; + function name(_:Result) : String; + function less(_,_:Result) : Boolean; + function less_equal(_,_:Result) : Boolean; + string = name; + null : Result; +end; + +type Symbol := SYMBOL[]; + +make_symbol = Symbol$create; +symbol_name = Symbol$name; +symbol_equal = Symbol$equal; + +null_symbol : Symbol := Symbol$null; + +-- classified as a function for convenience +function gensym() : Symbol; diff --git a/base/table.aps b/base/table.aps index a55126c5..674750a6 100644 --- a/base/table.aps +++ b/base/table.aps @@ -9,3 +9,18 @@ begin function select(table : Result; key : KeyType) : Result := table; -- function arbitrary(table : Result) : ValueType; end; + +module TABLE_LATTICE[KeyType :: ORDERED[]; ValueType :: COMBINABLE[], LATTICE[]] + :: TABLE[KeyType, ValueType], LATTICE[] extends TABLE[KeyType, ValueType] +begin + bottom = Result$initial; + function compare(t1, t2 : Result) : Boolean; + function compare_equal(t1, t2 : Result) : Boolean; + function join(t1, t2 : Result) : Result; + function meet(t1, t2 : Result) : Result; + + function select(table: Result; key: KeyType): Result := Result$select(table, key); + + pattern table_entry = Result$table_entry; + function table_entry(key: KeyType; value: ValueType): Result := Result$table_entry(key, value); +end; diff --git a/codegen/dump.h b/codegen/dump.h new file mode 100644 index 00000000..13c65c5e --- /dev/null +++ b/codegen/dump.h @@ -0,0 +1,212 @@ +#ifndef DUMP_H +#include +#include +#include + +using std::ostream; +using std::string; + +// don't generate any code for this declaration: +void omit_declaration(const char *name); + +// The result type is as given: +void impl_module(const char *name, const char *type); + +extern bool incremental; +extern bool static_schedule; +extern int verbose; +extern int debug; +extern bool include_comments; + +typedef struct synth_function_state SYNTH_FUNCTION_STATE; + +class Implementation; + +extern Implementation *impl; + +static const int indent_multiple = 2; +extern int nesting_level; +string indent(int level = nesting_level); +class InDefinition { + int saved_nesting; + public: + InDefinition(int nn=0) { saved_nesting = nesting_level; nesting_level = nn; } + ~InDefinition() { nesting_level = saved_nesting; } +}; + +#ifdef APS2SCALA +/* + * The Scala generation does everything in one file. + */ +#define GEN_OUTPUT ostream +#define INDEFINITION InDefinition xx(nesting_level) + +#else /* APS2SCALA */ +/* + * The C++ generation has separate header and implementation files. + */ +struct output_streams { + Declaration context; + ostream &hs, &cpps, &is; + string prefix; + output_streams(Declaration _c, ostream &_hs, ostream &_cpps, ostream &_is, + string _p) + : context(_c), hs(_hs), cpps(_cpps), is(_is), prefix(_p) {} +}; +#define GEN_OUTPUT output_streams + +extern int inline_definitions; +#define INDEFINITION InDefinition xx(inline_definitions ? nesting_level : 0) +#endif /* APS2SCALA */ + +void dump_Declaration(Declaration,GEN_OUTPUT&); + +void dump_Signature(Signature,string,ostream&); +void dump_Type_prefixed(Type,ostream&); +void dump_Type(Type,ostream&); +void dump_Type_value(Type,ostream&); +void dump_Type_signature(Type,ostream&); +void dump_Typed_decl(Type,Declaration,const char*prefix,ostream&); +void dump_Expression(Expression,ostream&); +void dump_Use(Use,const char *prefix,ostream&); +void dump_TypeEnvironment(TypeEnvironment,ostream&); +void dump_vd_Default(Declaration,ostream&); + +void dump_function_prototype(string,Type ft, GEN_OUTPUT& oss); +void dump_debug_end(ostream& os); + +#ifndef APC2SCALA +// these two must always be called in pairs: the first +// leaves information around for the second: +void dump_Pattern_cond(Pattern p, string node, ostream&); +void dump_Pattern_bindings(Pattern p, ostream&); +string matcher_bindings(string node, Match m); +#endif +void dump_Pattern(Pattern p, ostream&); // only defined for Scala + +// override << +ostream& operator<<(ostream&o,Symbol s); +ostream& operator<<(ostream&o,String s); + +inline ostream& operator<<(ostream&o,Expression e) +{ + dump_Expression(e,o); + return o; +} + +inline ostream& operator<<(ostream&o,Type t) +{ + dump_Type(t,o); + return o; +} + +inline ostream& operator<<(ostream&o, Pattern p) +{ + dump_Pattern(p,o); + return o; +} + +// wrappers +struct as_sig { + Type type; + as_sig(Type t) : type(t) {} +}; +struct as_val { + Type type; + as_val(Type t) : type(t) {} +}; + +inline ostream& operator<<(ostream& o, as_sig tas) +{ + dump_Type_signature(tas.type,o); + return o; +} + +inline ostream& operator<<(ostream& o, as_val tav) +{ + dump_Type_value(tav.type,o); + return o; +} + +// we have some debugging functions: +extern void debug_Instance(INSTANCE*,ostream&); +inline ostream& operator<<(ostream&os, INSTANCE*i) { + debug_Instance(i,os); + return os; +} + +extern string operator+(string, int); + +#ifndef APS2SCALA +// special C++ generation code +// sending to oss copies to cpps, ... +template +inline const output_streams& operator<<(const output_streams& oss, Any x) { + oss.hs << x; + if (!inline_definitions) oss.cpps << x; + return oss; +} + +// ... except for a header return type, ... +// (null for constructors) +template +struct header_return_type { + Type rt; + header_return_type(Type ty) : rt(ty) {} +}; +template<> +inline const output_streams& operator<< > +(const output_streams& oss, header_return_type hrt) { + oss.hs << indent(); + if (hrt.rt) oss.hs << hrt.rt; + if (!inline_definitions) { + oss.cpps << "\n"; + if (hrt.rt) dump_Type_prefixed(hrt.rt,oss.cpps); + } + return oss; +} +template<> +inline const output_streams& operator<< > +(const output_streams& oss, header_return_type hrt) { + oss.hs << indent(); + oss.hs << hrt.rt; + if (!inline_definitions) { + oss.cpps << "\n" << oss.prefix << hrt.rt; + } + return oss; +} + +// ... and header name ... +struct header_function_name { + std::string fname; + header_function_name(std::string fn) : fname(fn) {} +}; +template<> +inline const output_streams& operator<< +(const output_streams& oss, header_function_name hfn) { + oss.hs << hfn.fname; + if (!inline_definitions) oss.cpps << oss.prefix << hfn.fname; + return oss; +} +// ... and header end +struct header_end { +}; +template <> +inline const output_streams& operator<< +(const output_streams& oss, header_end) { + if (!inline_definitions) oss.hs << ";\n"; + return oss; +} +#endif /* APS2SCALA */ + +// The following are used for synth function implementation only +extern AUG_GRAPH* current_aug_graph; +extern STATE* current_state; +extern std::vector synth_functions_states; +extern SYNTH_FUNCTION_STATE* current_synth_functions_state; + +// Common code generation utility functions +extern bool check_surrounding_decl(void* node, KEYTYPE_Declaration decl_key, Declaration* result_decl); +extern bool check_surrounding_node(void* node, KEYTYPE_ABSTRACT_APS_Phylum ast_key, void** result_node); + +#endif diff --git a/aps2scala/implement.cc b/codegen/implement.cc similarity index 90% rename from aps2scala/implement.cc rename to codegen/implement.cc index b52197f0..1347cd86 100644 --- a/aps2scala/implement.cc +++ b/codegen/implement.cc @@ -3,7 +3,7 @@ extern "C" { #include #include "aps-ag.h" } -#include "dump-scala.h" +#include "dump.h" #include "implement.h" Implementation::ModuleInfo::ModuleInfo(Declaration module) @@ -11,25 +11,25 @@ Implementation::ModuleInfo::ModuleInfo(Declaration module) {} void Implementation::ModuleInfo::note_top_level_match(Declaration tlm, - ostream&) + GEN_OUTPUT&) { top_level_matches.push_back(tlm); } void Implementation::ModuleInfo::note_var_value_decl(Declaration vd, - ostream&) + GEN_OUTPUT&) { var_value_decls.push_back(vd); } void Implementation::ModuleInfo::note_local_attribute(Declaration ld, - ostream&) + GEN_OUTPUT&) { local_attributes.push_back(ld); } void Implementation::ModuleInfo::note_attribute_decl(Declaration ad, - ostream&) + GEN_OUTPUT&) { attribute_decls.push_back(ad); } diff --git a/apscpp/implement.h b/codegen/implement.h similarity index 60% rename from apscpp/implement.h rename to codegen/implement.h index f8e30309..d7f8d3c0 100644 --- a/apscpp/implement.h +++ b/codegen/implement.h @@ -1,13 +1,13 @@ #ifndef IMPLEMENT_H #define IMPLEMENT_H -// Implementing APS in C++ +// Implementing APS in Imperative OO (C++/Scala) // Different scheduling algorithms // This file contains an abstract class that // is used to schedule attribute grammars expressed // in APS using C++ templates. -// The main code is in dump-cpp.cc, +// The main code is in dump-LANG.cc, // but some parts are different depending on whether // we have static or dynamic scheduling. @@ -17,10 +17,26 @@ using std::ostream; using std::vector; -class output_streams; +#ifdef APS2SCALA +#define GEN_OUTPUT ostream +#else /* APSCPP */ +struct output_streams; +#define GEN_OUTPUT output_streams +#endif #define LOCAL_UNIQUE_PREFIX(ld) Def_info(value_decl_def(ld))->unique_prefix +typedef struct synth_function_state { + std::string fdecl_name; + INSTANCE* source; + PHY_GRAPH* source_phy_graph; + std::vector regular_dependencies; + std::vector fiber_dependents; + std::vector aug_graphs; + bool is_phylum_instance; + bool is_fiber_evaluation; +} SYNTH_FUNCTION_STATE; + // Abstract class: class Implementation { public: @@ -38,22 +54,18 @@ class Implementation { ModuleInfo(Declaration module); virtual ~ModuleInfo() {}; - virtual void note_top_level_match(Declaration tlm, - const output_streams& oss); + virtual void note_top_level_match(Declaration tlm, GEN_OUTPUT&); - virtual void note_local_attribute(Declaration la, - const output_streams& oss); + virtual void note_local_attribute(Declaration la, GEN_OUTPUT&); - virtual void note_attribute_decl(Declaration ad, - const output_streams& oss); + virtual void note_attribute_decl(Declaration ad, GEN_OUTPUT&); // Declaration will be declared by caller, but not initialized // unless this function desires to: - virtual void note_var_value_decl(Declaration vd, - const output_streams& oss); + virtual void note_var_value_decl(Declaration vd, GEN_OUTPUT&); // implement tlm's and var value decls, and generate finish() routine. - virtual void implement(const output_streams& oss) = 0; + virtual void implement(GEN_OUTPUT&) = 0; }; virtual ModuleInfo* get_module_info(Declaration module) = 0; @@ -62,15 +74,21 @@ class Implementation { virtual void implement_function_body(Declaration f, ostream&) = 0; // not sure what to do here - // virtual void implement_procedure(Declaration p, output_streams&) = 0; + // virtual void implement_procedure(Declaration p, GEN_OUTPUT&) = 0; // if a Declaration has an implementation mark on it, // this function is called to implement its use: virtual void implement_value_use(Declaration vd, ostream&) = 0; + + virtual void dump_synth_instance(INSTANCE*, ostream&) { + fatal_error("Only implemented for synth function codegen"); + } }; extern Implementation *dynamic_impl; extern Implementation *static_impl; +extern Implementation *static_scc_impl; +extern Implementation *synth_impl; #define IMPLEMENTATION_MARKS (127<<24) diff --git a/aps2scala/static-impl.cc b/codegen/static-impl.cc similarity index 72% rename from aps2scala/static-impl.cc rename to codegen/static-impl.cc index 27e575ee..a541f4d8 100644 --- a/aps2scala/static-impl.cc +++ b/codegen/static-impl.cc @@ -3,27 +3,19 @@ extern "C" { #include #include "aps-ag.h" } -#include "dump-scala.h" +#include "dump.h" #include "implement.h" +#include #define LOCAL_VALUE_FLAG (1<<28) using namespace std; -/** Return phase (synthesized) or -phase (inherited) - * for fibered attribute, given the phylum's summary dependence graph. - */ -int attribute_schedule(PHY_GRAPH *phy_graph, const FIBERED_ATTRIBUTE& key) -{ - int n = phy_graph->instances.length; - for (int i=0; i < n; ++i) { - const FIBERED_ATTRIBUTE& fa = phy_graph->instances.array[i].fibered_attr; - if (fa.attr == key.attr && fa.fiber == key.fiber) - return phy_graph->summary_schedule[i]; - } - fatal_error("Could not find summary schedule for instance"); - return 0; -} +#ifdef APS2SCALA +#define DEREF "." +#else +#define DEREF "->" +#endif Expression default_init(Default def) { @@ -106,7 +98,7 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, Expression instance_assignment[], int nch, Declaration children[], - int child_phase[], + int child_phase[], // phase child is at ostream& os) { // STATE *s = aug_graph->global_state; @@ -123,7 +115,11 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, PHY_GRAPH* npg = node_is_syntax ? Declaration_info(in->node)->node_phy_graph : 0; - int ph = node_is_syntax ? attribute_schedule(npg,in->fibered_attr) : -1; + int ph = node_is_syntax ? attribute_schedule(npg,&(in->fibered_attr)) : 0; + print_instance(in,stdout); + if (ch == nch) printf(" ch = "); + else printf(" ch = %d",ch); + printf(", ph = %d\n",ph); // check for phase change of parent: if (node_is_lhs && ph != current && ph != -current) { @@ -144,18 +140,31 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, bool is_mod = Declaration_KEY(in->node) == KEYmodule_decl; if (is_mod) { +#ifdef APS2SCALA os << indent() << "for (root <- roots) {\n"; +#else /* APS2SCALA */ + os << indent() << "for (int i=0; i < n_roots; ++i) {\n"; +#endif /* APS2SCALA */ ++nesting_level; +#ifndef APS2SCALA + os << indent() << "C_PHYLUM::Node *root = phylum->node(i);\n"; +#endif /* APS2SCALA */ } if (ph < 0) { aps_warning(in->node,"used inherited attributes of children"); + fatal_error("stopping"); + ph = -ph; } os << indent() << "visit_" << PHY_GRAPH_NUM(npg) << "_" << ph << "("; if (is_mod) os << "root"; else +#ifdef APS2SCALA + os << "v_" << ("_" == string(decl_name(in->node)) ? "0": decl_name(in->node)); +#else /* APS2SCALA */ os << "v_" << decl_name(in->node); +#endif /* APS2SCALA */ os << ");\n"; if (is_mod) { --nesting_level; @@ -181,12 +190,26 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, // if first match in case, we evaluate variable: if (m == first_Match(case_stmt_matchers(header))) { Expression e = case_stmt_expr(header); +#ifdef APS2SCALA //Type ty = infer_expr_type(e); os << indent() << "val node = " << e << ";\n"; +#else /* APS2SCALA */ + Type ty = infer_expr_type(e); + os << indent() << ty << " node=" << e << ";\n"; +#endif /* APS2SCALA */ } +#ifdef APS2SCALA os << indent() << "node match {\n"; os << indent() << "case " << p << " => {\n"; +#else /* APS2SCALA */ + os << indent() << "if ("; + dump_Pattern_cond(p,"node",os); + os << ") {\n"; +#endif /* APS2SCALA */ nesting_level+=1; +#ifndef APS2SCALA + dump_Pattern_bindings(p,os); +#endif /* APS2SCALA */ if_true = matcher_body(m); if (MATCH_NEXT(m)) { if_false = 0; //? Why not the nxt match ? @@ -207,12 +230,16 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, nch,children,child_phase,os); delete[] true_assignment; --nesting_level; +#ifdef APS2SCALA if (is_match) { os << indent() << "}\n"; os << indent() << "case _ => {\n"; } else { os << indent() << "} else {\n"; } +#else /* APS2SCALA */ + os << indent() << "} else {\n"; +#endif /* APS2SCALA */ ++nesting_level; Expression* false_assignment = if_false ? make_instance_assignment(aug_graph,if_false,instance_assignment) @@ -223,11 +250,15 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, nch,children,child_phase,os); if (if_false) delete[] false_assignment; --nesting_level; +#ifdef APS2SCALA if (is_match) { os << indent() << "}}\n"; } else { os << indent() << "}\n"; } +#else /* APS2SCALA */ + os << indent() << "}\n"; +#endif /* APS2SCALA */ return cont; } @@ -271,15 +302,28 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, case KEYvalue_use: // shared global collection field = USE_DECL(value_use_use(lhs)); +#ifdef APS2SCALA os << "a_" << decl_name(field) << "."; if (debug) os << "assign"; else os << "set"; os << "(" << rhs << ");\n"; +#else /* APS2SCALA */ + os << "v_" << decl_name(field) << "="; + switch (Default_KEY(value_decl_default(field))) { + case KEYcomposite: + os << composite_combiner(value_decl_default(field)); + break; + default: + os << as_val(value_decl_type(field)) << "->v_combine"; + break; + } + os << "(v_" << decl_name(field) << "," << rhs << ");\n"; +#endif /* APS2SCALA */ break; case KEYfuncall: field = field_ref_p(lhs); if (field == 0) fatal_error("what sort of assignment lhs: %d", tnode_line_number(assign)); - os << "a_" << decl_name(field) << "."; + os << "a_" << decl_name(field) << DEREF; if (debug) os << "assign"; else os << "set"; os << "(" << field_ref_object(lhs) << "," << rhs << ");\n"; break; @@ -295,17 +339,21 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, if (in->node == 0 && ad != 0) { if (rhs) { if (Declaration_info(ad)->decl_flags & LOCAL_ATTRIBUTE_FLAG) { - os << "a" << LOCAL_UNIQUE_PREFIX(ad) << "_" << asym << "."; + os << "a" << LOCAL_UNIQUE_PREFIX(ad) << "_" << asym << DEREF; if (debug) os << "assign"; else os << "set"; os << "(anchor," << rhs << ");\n"; } else { int i = LOCAL_UNIQUE_PREFIX(ad); if (i == 0) { +#ifdef APS2SCALA if (!def_is_constant(value_decl_def(ad))) { os << "// v_" << asym << " is assigned/initialized by default.\n"; } else { os << "// v_" << asym << " is initialized in module.\n"; } +#else + os << "v_" << asym << " = " << rhs << ";\n"; +#endif } else { os << "v" << i << "_" << asym << " = " << rhs << "; // local\n"; } @@ -340,7 +388,7 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, os << "v" << i << "_" << asym << " = " << rhs << ";\n"; } } else { - os << "a_" << asym << "."; + os << "a_" << asym << DEREF; if (debug) os << "assign"; else os << "set"; os << "(v_" << decl_name(in->node) << "," << rhs << ");\n"; @@ -354,7 +402,7 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, } else if (Declaration_KEY(in->node) == KEYvalue_decl) { if (rhs) { // assigning field of object - os << "a_" << asym << "."; + os << "a_" << asym << DEREF; if (debug) os << "assign"; else os << "set"; os << "(v_" << decl_name(in->node) << "," << rhs << ");\n"; @@ -372,12 +420,23 @@ static bool implement_visit_function(AUG_GRAPH* aug_graph, // dump visit functions for constructors void dump_visit_functions(PHY_GRAPH *phy_graph, AUG_GRAPH *aug_graph, - ostream& oss) +#ifdef APS2SCALA + ostream& os) +#else /* APS2SCALA */ + output_streams& oss) +#endif /* APS2SCALA */ { Declaration tlm = aug_graph->match_rule; Match m = top_level_match_m(tlm); Block block = matcher_body(m); +#ifndef APS2SCALA + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + // ostream& is = oss.is; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ int pgn = PHY_GRAPH_NUM(phy_graph); int j = Declaration_info(aug_graph->syntax_decl)->instance_index; CTO_NODE *total_order = aug_graph->total_order; @@ -406,22 +465,38 @@ void dump_visit_functions(PHY_GRAPH *phy_graph, while (total_order) { ++phase; - oss << indent() << "def visit_" << pgn << "_" << phase << "_" << j - << "(anchor : T_" << decl_name(phy_graph->phylum) - << ") : Unit = anchor match {\n"; +#ifdef APS2SCALA + os << indent() << "def visit_" << pgn << "_" << phase << "_" << j + << "(anchor : T_" << decl_name(phy_graph->phylum) + << ") : Unit = anchor match {\n"; ++nesting_level; - oss << indent() << "case " << matcher_pat(m) << " => {\n"; + os << indent() << "case " << matcher_pat(m) << " => {\n"; +#else /* APS2SCALA */ + oss << header_return_type(0) << "void " + << header_function_name("visit_") << pgn << "_" << phase << "_" << j + << "(C_PHYLUM::Node* anchor)" << header_end(); + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ ++nesting_level; +#ifndef APS2SCALA + os << matcher_bindings("anchor",m); + os << "\n"; +#endif /* APS2SCALA */ + printf("Implementing visit function for %s, phase %d\n", + decl_name(aug_graph->syntax_decl), phase); bool cont = implement_visit_function(aug_graph,phase,0,total_order, instance_assignment, - nch,children,child_phase,oss); + nch,children,child_phase,os); --nesting_level; - oss << indent() << "}\n"; +#ifdef APS2SCALA + os << indent() << "}\n"; --nesting_level; - oss << indent() << "}\n" << endl; +#endif /* APS2SCALA */ + os << indent() << "}\n" << endl; if (!cont) break; } @@ -429,6 +504,7 @@ void dump_visit_functions(PHY_GRAPH *phy_graph, delete[] instance_assignment; } +// The following function is only for Scala code generation void dump_constructor_owner(Declaration pd, ostream& os) { switch (Declaration_KEY(pd)) { @@ -436,7 +512,7 @@ void dump_constructor_owner(Declaration pd, ostream& os) aps_error(pd,"cannot attribute this phylum"); break; case KEYphylum_formal: - os << "t_" << decl_name(pd) << "."; + os << "t_" << decl_name(pd) << DEREF; break; case KEYphylum_decl: switch (Type_KEY(phylum_decl_type(pd))) { @@ -446,7 +522,7 @@ void dump_constructor_owner(Declaration pd, ostream& os) case KEYno_type: break; case KEYtype_inst: - os << "t_" << decl_name(pd) << "."; + os << "t_" << decl_name(pd) << DEREF; break; case KEYtype_use: dump_constructor_owner(USE_DECL(type_use_use(phylum_decl_type(pd))),os); @@ -466,11 +542,24 @@ void dump_constructor_owner(Declaration pd, ostream& os) } } -void dump_visit_functions(PHY_GRAPH *pg, ostream& oss) +#ifdef APS2SCALA +void dump_visit_functions(PHY_GRAPH *pg, ostream& os) +#else /* APS2SCALA */ +void dump_visit_functions(PHY_GRAPH *pg, output_streams& oss) +#endif /* APS2SCALA */ { STATE *s = pg->global_state; int pgn = PHY_GRAPH_NUM(pg); +#ifdef APS2SCALA + ostream &oss = os; +#else + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + // ostream& is = oss.is; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ int max_phase = 0; for (int i=0; i < pg->instances.length; ++i) { int ph = pg->summary_schedule[i]; @@ -493,7 +582,9 @@ void dump_visit_functions(PHY_GRAPH *pg, ostream& oss) if (num_cons == 0) { fatal_error("no top-level-match match for phylum %s", decl_name(pg->phylum)); } - + + // The match clauses may be in a different order + // that the constructor declarations in the tree module int cons_num = 0; Declaration cmodule = (Declaration)tnode_parent(aug_graphs.front()->syntax_decl); @@ -506,7 +597,7 @@ void dump_visit_functions(PHY_GRAPH *pg, ostream& oss) if (Declaration_KEY(d) == KEYconstructor_decl) { for (int j=0; j < num_cons; ++j) { if (aug_graphs[j]->syntax_decl == d) { - Declaration_info(d)->instance_index = cons_num; //? why not j ? + Declaration_info(d)->instance_index = cons_num; ++cons_num; break; } @@ -519,24 +610,56 @@ void dump_visit_functions(PHY_GRAPH *pg, ostream& oss) } for (int ph = 1; ph <= max_phase; ++ph) { - oss << indent() << "def visit_" << pgn << "_" << ph - << "(node : T_" << decl_name(pg->phylum) - << ") : Unit = node match {\n"; +#ifdef APS2SCALA + os << indent() << "def visit_" << pgn << "_" << ph + << "(node : T_" << decl_name(pg->phylum) + << ") : Unit = node match {\n"; +#else /* APS2SCALA */ + oss << header_return_type(0) << "void " + << header_function_name("visit_") << pgn << "_" << ph + << "(C_PHYLUM::Node* node)" << header_end(); + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ ++nesting_level; +#ifndef APS2SCALA + os << indent() << "switch (node->cons->get_index()) {\n"; +#endif /* APS2SCALA */ for (int j=0; j < num_cons; ++j) { +#ifdef APS2SCALA Declaration cd = aug_graphs[j]->syntax_decl; - oss << indent() << "case "; - dump_constructor_owner(pg->phylum,oss); - oss << "p_" << decl_name(cd) << "(_"; + os << indent() << "case "; + dump_constructor_owner(pg->phylum,os); + os << "p_" << decl_name(cd) << "(_"; Declarations fs = function_type_formals(constructor_decl_type(cd)); for (Declaration f = first_Declaration(fs); f; f=DECL_NEXT(f)) { - oss << ",_"; + os << ",_"; } - oss << ") => " << "visit_" << pgn << "_" << ph << "_" - << Declaration_info(cd)->instance_index << "(node);\n"; + os << ") => " << "visit_" << pgn << "_" << ph << "_" + << Declaration_info(cd)->instance_index << "(node);\n"; +#else /* APS2SCALA */ + os << indent() << "case " << j << ":\n"; + ++nesting_level; + os << indent() << "visit_" << pgn << "_" << ph << "_" << j + << "(node);\n"; + os << indent() << "break;\n"; + --nesting_level; +#endif /* APS2SCALA */ } +#ifndef APS2SCALA + os << indent() << "default:\n"; + ++nesting_level; + os << indent() + << "throw std::runtime_error(\"bad constructor index\");\n"; +#endif /* APS2SCALA */ --nesting_level; - oss << indent() << "};\n"; +#ifdef APS2SCALA + os << indent() << "};\n"; +#else /* APS2SCALA */ + os << indent() << "}\n"; + --nesting_level; + os << indent() << "}\n"; +#endif /* APS2SCALA */ } // Now spit out visit procedures for each constructor @@ -547,8 +670,20 @@ void dump_visit_functions(PHY_GRAPH *pg, ostream& oss) oss << "\n"; // some blank lines } -void dump_visit_functions(STATE*s, ostream& oss) +#ifdef APS2SCALA +void dump_visit_functions(STATE*s, ostream& os) +#else /* APS2SCALA */ +void dump_visit_functions(STATE*s, output_streams& oss) +#endif /* APS2SCALA */ { +#ifdef APS2SCALA + ostream& oss = os; +#else /* !APS2SCALA */ + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ // first dump all visit functions for each phylum: int nphy = s->phyla.length; for (int j=0; j < nphy; ++j) { @@ -559,10 +694,24 @@ void dump_visit_functions(STATE*s, ostream& oss) } Declaration sp = s->start_phylum; - oss << indent() << "def visit() : Unit = {\n"; +#ifdef APS2SCALA + os << indent() << "def visit() : Unit = {\n"; +#else /* APS2SCALA */ + oss << header_return_type(0) << "void " + << header_function_name("visit") << "()" << header_end(); + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ ++nesting_level; - oss << indent() << "val roots = t_" << decl_name(sp) - << ".nodes;\n"; +#ifdef APS2SCALA + os << indent() << "val roots = t_" << decl_name(sp) + << ".nodes;\n"; +#else /* APS2SCALA */ + os << indent() << "Phylum* phylum = this->t_" << decl_name(sp) //! bug sometimes + << "->get_phylum();\n"; + os << indent() << "int n_roots = phylum->size();\n"; + os << "\n"; // blank line +#endif /* APS2SCALA */ int phase = 1; Declaration root_decl[1] ; @@ -577,13 +726,13 @@ void dump_visit_functions(STATE*s, ostream& oss) while (implement_visit_function(&s->global_dependencies,phase,0, s->global_dependencies.total_order, instance_assignment, - 1,root_decl,root_phase,oss)) + 1,root_decl,root_phase,os)) ++phase; delete[] instance_assignment; --nesting_level; - oss << indent() << "}\n\n"; + os << indent() << "}\n\n"; } static void* dump_scheduled_local(void *pbs, void *node) { @@ -594,10 +743,15 @@ static void* dump_scheduled_local(void *pbs, void *node) { static int unique = 0; LOCAL_UNIQUE_PREFIX(d) = ++unique; Declaration_info(d)->decl_flags |= LOCAL_VALUE_FLAG; +#ifdef APS2SCALA bs << indent() << "var " << " v" << unique << "_" << decl_name(d) << " : " << value_decl_type(d) << " = null.asInstanceOf[" << value_decl_type(d) << "]" << ";\n"; +#else /* APS2SCALA */ + bs << indent() << value_decl_type(d) + << " v" << unique << "_" << decl_name(d) << ";\n"; +#endif /* APS2SCALA */ } } return pbs; @@ -665,36 +819,58 @@ class Static : public Implementation public: ModuleInfo(Declaration mdecl) : Implementation::ModuleInfo(mdecl) {} - void note_top_level_match(Declaration tlm, ostream& oss) { + void note_top_level_match(Declaration tlm, GEN_OUTPUT& oss) { Super::note_top_level_match(tlm,oss); } - void note_local_attribute(Declaration ld, ostream& oss) { + void note_local_attribute(Declaration ld, GEN_OUTPUT& oss) { Super::note_local_attribute(ld,oss); Declaration_info(ld)->decl_flags |= LOCAL_ATTRIBUTE_FLAG; } - void note_attribute_decl(Declaration ad, ostream& oss) { + void note_attribute_decl(Declaration ad, GEN_OUTPUT& oss) { Declaration_info(ad)->decl_flags |= ATTRIBUTE_DECL_FLAG; Super::note_attribute_decl(ad,oss); } - void note_var_value_decl(Declaration vd, ostream& oss) { + void note_var_value_decl(Declaration vd, GEN_OUTPUT& oss) { Super::note_var_value_decl(vd,oss); } - void implement(ostream& oss) { +#ifdef APS2SCALA + void implement(ostream& os) { +#else /* APS2SCALA */ + void implement(output_streams& oss) { +#endif /* APS2SCALA */ STATE *s = (STATE*)Declaration_info(module_decl)->analysis_state; +#ifdef APS2SCALA + ostream& oss = os; +#else + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + ostream& os = inline_definitions ? hs : cpps; + // char *name = decl_name(module_decl); +#endif /* APS2SCALA */ Declarations ds = block_body(module_decl_contents(module_decl)); dump_visit_functions(s,oss); // Implement finish routine: - oss << indent() << "override def finish() : Unit = {\n"; +#ifdef APS2SCALA + os << indent() << "override def finish() : Unit = {\n"; +#else /* APS2SCALA */ + hs << indent() << "void finish()"; + if (!inline_definitions) { + hs << ";\n"; + cpps << "void " << oss.prefix << "finish()"; + } + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ ++nesting_level; - oss << indent() << "visit();\n"; + os << indent() << "visit();\n"; // types actually should be scheduled... for (Declaration d = first_Declaration(ds); d; d = DECL_NEXT(d)) { const char* kind = NULL; @@ -714,12 +890,14 @@ class Static : public Implementation } if (kind != NULL) { const char *n = decl_name(d); - oss << indent() << kind << n << ".finish();\n"; + os << indent() << kind << n << DEREF << "finish();\n"; } } - oss << "super.finish();\n"; +#ifdef APS2SCALA + os << "super.finish();\n"; +#endif /* ! APS2SCALA */ --nesting_level; - oss << indent() << "}\n\n"; + os << indent() << "}\n\n"; clear_implementation_marks(module_decl); } @@ -746,9 +924,9 @@ class Static : public Implementation int flags = Declaration_info(vd)->decl_flags; if (flags & LOCAL_ATTRIBUTE_FLAG) { os << "a" << LOCAL_UNIQUE_PREFIX(vd) << "_" - << decl_name(vd) << ".get(anchor)"; + << decl_name(vd) << DEREF << "get(anchor)"; } else if (flags & ATTRIBUTE_DECL_FLAG) { - os << "a" << "_" << decl_name(vd) << ".get"; + os << "a" << "_" << decl_name(vd) << DEREF << "get"; } else if (flags & LOCAL_VALUE_FLAG) { os << "v" << LOCAL_UNIQUE_PREFIX(vd) << "_" << decl_name(vd); } else { diff --git a/codegen/static-scc-impl.cc b/codegen/static-scc-impl.cc new file mode 100644 index 00000000..d31227a5 --- /dev/null +++ b/codegen/static-scc-impl.cc @@ -0,0 +1,1444 @@ +#include +#include +#include +extern "C" { +#include +#include "aps-ag.h" +} +#include +#include +#include + +#include +#include "dump.h" +#include "implement.h" + +#define LOCAL_VALUE_FLAG (1 << 28) + +#ifdef APS2SCALA +#define DEREF "." +#else +#define DEREF "->" +#endif + +template +static std::string any_to_string(const T & value) { + std::ostringstream oss; + oss << value; + return oss.str(); +} + +typedef void (*OutputWriterT)(int, std::ostream&); + +class OutputWriter { + struct Item { + OutputWriterT function; + int marker; + }; + + private: + std::ostream& _os; + std::vector _items; + + void dump_queue() { + for (std::vector::iterator it = _items.begin(); it != _items.end(); + it++) { + ((OutputWriterT)it->function)(it->marker, _os); + } + + _items.clear(); + } + + bool contains_marker(int marker) { + for (std::vector::iterator it = _items.begin(); it != _items.end(); + it++) { + if (it->marker == marker) { + return true; + } + } + + return false; + } + + public: + OutputWriter(std::ostream& os) : _os(os), _items(std::vector()) {} + + std::ostream& get_outstream() { + dump_queue(); + return _os; + } + + void queue_write(int marker, + OutputWriterT lambda) { + if (!contains_marker(marker)) { + struct Item* item = new Item; + item->marker = marker; + item->function = lambda; + _items.push_back(*item); + } else { + fatal_error("Already enqueued to write marker: %d", VOIDP2INT(marker)); + } + } + + bool any_write_since(int marker) { return !contains_marker(marker); } + + void clear_since(int marker) { + bool found_marker = false; + for (std::vector::iterator it = _items.begin(); it != _items.end();) { + if (it->marker == marker || found_marker) { + it = _items.erase(it); + found_marker = true; + } else { + it++; + } + } + } + + virtual ~OutputWriter() { dump_queue(); } +}; + +static Expression default_init(Default def) { + switch (Default_KEY(def)) { + case KEYsimple: + return simple_value(def); + case KEYcomposite: + return composite_initial(def); + default: + return 0; + } +} + +/* Return new array with instance assignments for block. + * If "from" is not NULL, then initialize the new array + * with it. + */ +static vector > make_instance_assignment( + AUG_GRAPH* aug_graph, + Block block, + vector > from, + bool include_initial_defaults) { + int n = aug_graph->instances.length; + vector > array(from); + + if (include_initial_defaults) { + for (int i = 0; i < n; ++i) { + INSTANCE* in = &aug_graph->instances.array[i]; + Declaration ad = in->fibered_attr.attr; + if (ad != 0 && in->fibered_attr.fiber == 0 && + ABSTRACT_APS_tnode_phylum(ad) == KEYDeclaration) { + // get default! + switch (Declaration_KEY(ad)) { + case KEYattribute_decl: + array[i].insert(default_init(attribute_decl_default(ad))); + break; + case KEYvalue_decl: + array[i].insert(default_init(value_decl_default(ad))); + break; + default: + break; + } + } + } + } + + // Step #1 clear any existing assignments and insert normal assignments + // Step #2 insert collection assignments + int step = 1; + while (step <= 2) { + Declarations ds = block_body(block); + for (Declaration d = first_Declaration(ds); d; d = DECL_NEXT(d)) { + switch (Declaration_KEY(d)) { + case KEYnormal_assign: { + if (INSTANCE* in = Expression_info(assign_rhs(d))->value_for) { + if (in->index >= n) + fatal_error("bad index [normal_assign] for instance"); + array[in->index].clear(); + array[in->index].insert(assign_rhs(d)); + } + break; + } + case KEYcollect_assign: { + if (INSTANCE* in = Expression_info(assign_rhs(d))->value_for) { + if (in->index >= n) + fatal_error("bad index [collection_assign] for instance"); + + if (step == 1) + array[in->index].clear(); + else + array[in->index].insert(assign_rhs(d)); + } + break; + } + default: + break; + } + } + + step++; + } + + return array; +} + +// visit procedures are called: +// visit_n_m +// where n is the number of the phy_graph and m is the phase. +// This does a dispatch to visit_n_m_p +// where p is the production number (0-based constructor index) +#define PHY_GRAPH_NUM(pg) (pg - pg->global_state->phy_graphs) + +static void dump_loop_end(AUG_GRAPH* aug_graph, + int parent_ph, + int loop_id, + OutputWriter* ow) { +#ifdef APS2SCALA + if (ow->any_write_since(loop_id)) { + std::ostream& os = ow->get_outstream(); + string suffix = any_to_string(loop_id); + std::replace(suffix.begin(), suffix.end(), '-', '_'); + --nesting_level; + os << "\n"; + os << indent() << "} while(newChanged.get);" << "\n"; + nesting_level--; + os << indent() << "}" << "\n"; + } else { + ow->clear_since(loop_id); + } +#endif /* APS2SCALA */ +} + +static void dump_loop_start_helper(int loop_id, std::ostream& os) +{ +#ifdef APS2SCALA + // ABS value of component_index because initially it is -1 + string suffix = any_to_string(loop_id); + std::replace(suffix.begin(), suffix.end(), '-', '_'); + + os << indent() << "{\n"; + ++nesting_level; + os << indent() << "val newChanged = new AtomicBoolean(false);\n"; + os << indent() << "do {\n"; + ++nesting_level; + os << indent() << "newChanged.set(false);\n"; +#endif /* APS2SCALA */ +} + +static void dump_loop_start(AUG_GRAPH* aug_graph, + int parent_ph, + int loop_id, + OutputWriter* ow) { +#ifdef APS2SCALA + + ow->queue_write( + loop_id, + dump_loop_start_helper); + +#endif /* APS2SCALA */ +} + +static void dump_changed(int loop_id, std::ostream& os) +{ +#ifdef APS2SCALA + os << (loop_id == -1 ? "changed" : "newChanged"); +#endif /* APS2SCALA */ +} + +// phase is what we are generating code for, +// current is the current value of ph +// return true if there are still more instances after this phase: +static bool implement_visit_function( + AUG_GRAPH* aug_graph, + int phase, /* phase to impl. */ + CTO_NODE* cto, + vector > instance_assignment, + int nch, + CONDITION* cond, + int chunk_index, + bool loop_allowed, + int loop_id, + bool skip_previous_visit_code, + OutputWriter* ow) { + + STATE* s = aug_graph->global_state; + + for (; cto; cto = cto->cto_next) { + INSTANCE* in = cto->cto_instance; + bool is_conditional = in != NULL && if_rule_p(in->fibered_attr.attr); + bool is_circular = false; + + if (in == NULL || is_conditional) { + is_circular = false; + } else { + switch (Declaration_KEY(in->fibered_attr.attr)) { + case KEYvalue_decl: + is_circular = direction_is_circular(value_decl_direction(in->fibered_attr.attr)); + break; + case KEYattribute_decl: + is_circular = direction_is_circular(attribute_decl_direction(in->fibered_attr.attr)); + break; + default: + break; + } + } + + int ch = cto->child_phase.ch; + int ph = cto->child_phase.ph; + bool chunk_changed = chunk_index != -1 && cto->chunk_index != chunk_index; + PHY_GRAPH* pg_parent = + Declaration_info(aug_graph->lhs_decl)->node_phy_graph; + + Declaration ad = in != NULL ? in->fibered_attr.attr : NULL; + void* ad_parent = ad != NULL ? tnode_parent(ad) : NULL; + bool node_is_for_in_stmt = ad_parent != NULL && + ABSTRACT_APS_tnode_phylum(ad_parent) == KEYDeclaration && + Declaration_KEY((Declaration)ad_parent) == KEYfor_in_stmt; + + bool is_mod = false; + switch (Declaration_KEY(aug_graph->syntax_decl)) { + case KEYsome_class_decl: + is_mod = true; + break; + default: + break; + } + + if (!is_mod && chunk_changed) { + if (include_comments) { + ow->get_outstream() + << indent() << "// Finished with chunk #" << chunk_index << "\n"; + } + + // Need to close the loop if any + if (loop_allowed && loop_id != -1) { + dump_loop_end(aug_graph, phase, loop_id, ow); + loop_id = -1; + } + + if (include_comments) { + ow->get_outstream() << indent() << "// Started working on chunk #" + << cto->chunk_index << "\n"; + } + + if (loop_allowed && cto->chunk_circular) { + if (!pg_parent->cyclic_flags[phase]) { + loop_id = cto->chunk_index; + dump_loop_start(aug_graph, phase, loop_id, ow); + } else { + if (include_comments) { + ow->get_outstream() + << indent() << "// Parent phase " << phase + << " is circular, fixed-point loop cannot be added here\n"; + } + } + } + } + + // Code generate if: + // - CTO_NODE belongs to this visit + // - OR CTO_NODE is conditional + // - OR CTO_NODE is for-in-stmt + if (skip_previous_visit_code && !is_conditional && !node_is_for_in_stmt) { + // CTO_NODE belongs to this visit + if (cto->visit != phase) { + chunk_index = cto->chunk_index; + continue; + } + } + + // Update chunk index + chunk_index = cto->chunk_index; + + // Visit marker for when visit ends + if (in == NULL && ch == -1) { + // If we are the module (roots) level + if (is_mod) { + int n = PHY_GRAPH_NUM( + Declaration_info(aug_graph->syntax_decl)->node_phy_graph); + + // Dump loop start if this phase of global dependency is circular and + // loop is allowed + if (pg_parent->cyclic_flags[ph] && loop_allowed) { + loop_id = cto->chunk_index; + dump_loop_start(aug_graph, phase, loop_id, ow); + } +#ifdef APS2SCALA + ow->get_outstream() << indent() << "for (root <- roots) {\n"; +#else /* APS2SCALA */ + ow->get_outstream() + << indent() << "for (int i=0; i < n_roots; ++i) {\n"; +#endif /* APS2SCALA */ + ++nesting_level; +#ifndef APS2SCALA + ow->get_outstream() + << indent() << "C_PHYLUM::Node *root = phylum->node(i);\n"; +#endif /* APS2SCALA */ + + ow->get_outstream() << indent() << "visit_" << n << "_" << ph << "("; + ow->get_outstream() << "root"; + if (s->loop_required) { + ow->get_outstream() << ", "; + if (loop_id == -1) { + ow->get_outstream() << "new AtomicBoolean(false)"; + } else { + ow->get_outstream() << "newChanged"; + } + } + ow->get_outstream() << ");\n"; + + --nesting_level; + ow->get_outstream() << indent() << "}\n"; + + // Dump loop end if this phase of global dependency was circular, and we + // are inside the loop + if (pg_parent->cyclic_flags[ph] && loop_allowed && loop_id != -1) { + dump_loop_end(aug_graph, phase, loop_id, ow); + loop_id = -1; + } + } + + if (include_comments) { + ow->get_outstream() + << indent() << "// End of parent (" << aug_graph_name(aug_graph) + << ") phase visit marker for phase: " << ph << "\n"; + } + if (!is_mod) { + // Need to close the loop if any + if (loop_allowed && loop_id != -1) { + dump_loop_end(aug_graph, phase, loop_id, ow); + loop_id = -1; + } + } + + // If ph == phase to implement then stop, cannot continue any further for + // this visit + if (ph == phase) { + return false; + } + + // Otherwise, continue, there is more instances to implement + continue; + } + + // Visit marker for when child visit happens + if (in == NULL && ch > -1) { + ow->get_outstream() << "\n"; + + if (include_comments) { + ow->get_outstream() + << indent() << "// aug_graph: " << aug_graph_name(aug_graph) + << "\n"; + ow->get_outstream() + << indent() << "// visit marker(" << ph << "," << ch << ")\n"; + } + + PHY_GRAPH* pg = Declaration_info(cto->child_decl)->node_phy_graph; + int n = PHY_GRAPH_NUM(pg); + + if (include_comments) { + ow->get_outstream() + << indent() << "// parent visit of " << decl_name(pg_parent->phylum) + << " at phase " << phase << " is " + << (pg_parent->cyclic_flags[phase] ? "circular" : "non-circular") + << "\n"; + ow->get_outstream() + << indent() << "// child visit of " << decl_name(pg->phylum) + << " at phase " << ph << " is " + << (pg->cyclic_flags[ph] ? "circular" : "non-circular") << "\n"; + } + + if (!pg_parent->cyclic_flags[phase] && pg->cyclic_flags[ph] && + loop_allowed && chunk_index == -1) { + fatal_error( + "The child visit(%d,%d) of %s should have been wrapped in a " + "do-while loop.", + n, ph, aug_graph_name(aug_graph)); + } + + ow->get_outstream() << indent() << "visit_" << n << "_" << ph << "("; +#ifdef APS2SCALA + ow->get_outstream() << "v_" << decl_name(cto->child_decl); + + if (s->loop_required) { + ow->get_outstream() << ", "; + dump_changed(loop_id, ow->get_outstream()); + } +#else /* APS2SCALA */ + ow->get_outstream() << "v_" << decl_name(cto->child_decl); +#endif /* APS2SCALA */ + ow->get_outstream() << ");\n"; + + continue; + } + + // Instance should not be null for non-visit marker CTO nodes + // Visit markers have a form of either: or <-ph,-1> + if (in == NULL) { + fatal_error( + "total_order is malformed: Instance should not be null for non-visit " + "marker CTO nodes."); + } + + bool node_is_lhs = in->node == aug_graph->lhs_decl; + bool node_is_syntax = ch < nch || node_is_lhs; + + CONDITION icond = instance_condition(in); + if (MERGED_CONDITION_IS_IMPOSSIBLE(*cond, icond)) { + if (include_comments) { + ow->get_outstream() + << indent() << "// '" << in + << "' attribute instance is impossible because cond: (+" + << cond->positive << ",-" << cond->negative << ") icond: (+" + << icond.positive << ",-" << icond.negative << ")\n"; + } + continue; + } + + if (node_is_for_in_stmt) { + Declaration for_in_stmt_decl = (Declaration)ad_parent; + Block body = for_in_stmt_body(for_in_stmt_decl); + Declaration formal = for_in_stmt_formal(for_in_stmt_decl); + Expression sequence = for_in_stmt_seq(for_in_stmt_decl); + + bool prev_loop_allowed = loop_allowed; + if (loop_allowed) { + // If loop is allowed, and we are not in the loop already then allow + // loops inside the for-in-stmt. + loop_allowed = loop_id == -1; + } + +#ifdef APS2SCALA + ow->get_outstream() << indent() << "for (v_" << decl_name(formal) << " <- " << sequence << ") {\n"; + ++nesting_level; +#endif /* APS2SCALA */ + + vector > assignment = + make_instance_assignment(aug_graph, body, instance_assignment, false /* defaults are already included */); + + bool cont = implement_visit_function(aug_graph, phase, cto->cto_next, + assignment, nch, cond, cto->chunk_index, + loop_allowed, loop_id, skip_previous_visit_code, + ow); + +#ifdef APS2SCALA + --nesting_level; + ow->get_outstream() << indent() << "}\n"; + + + // Restore previous value of loop allowed. + loop_allowed = prev_loop_allowed; + + // Closing of the loop now that for-in-stmt is finished + if (loop_allowed && loop_id != -1) { + dump_loop_end(aug_graph, phase, loop_id, ow); + loop_id = -1; + } +#endif /* APS2SCALA */ + return cont; + } + + if (if_rule_p(ad)) { + bool prev_loop_allowed = loop_allowed; + if (loop_allowed) { + // If loop is allowed, and we are not in the loop already then allow + // loops inside the true and false branch of the conditional. + loop_allowed = loop_id == -1; + }; + + bool is_match = ABSTRACT_APS_tnode_phylum(ad) == KEYMatch; + Block if_true; + Block if_false; + if (is_match) { + Match m = (Match)ad; + Pattern p = matcher_pat(m); + Declaration header = Match_info(m)->header; + // if first match in case, we evaluate variable: + if (m == first_Match(case_stmt_matchers(header))) { + Expression e = case_stmt_expr(header); +#ifdef APS2SCALA + // Type ty = infer_expr_type(e); + ow->get_outstream() << indent() << "val node = " << e << ";\n"; +#else /* APS2SCALA */ + Type ty = infer_expr_type(e); + ow->get_outstream() << indent() << ty << " node=" << e << ";\n"; +#endif /* APS2SCALA */ + } +#ifdef APS2SCALA + ow->get_outstream() << indent() << "node match {\n"; + ow->get_outstream() << indent() << "case " << p << " => {\n"; +#else /* APS2SCALA */ + ow->get_outstream() << indent() << "if ("; + dump_Pattern_cond(p, "node", ow->get_outstream()); + ow->get_outstream() << ") {\n"; +#endif /* APS2SCALA */ + nesting_level += 1; +#ifndef APS2SCALA + dump_Pattern_bindings(p, ow->get_outstream()); +#endif /* APS2SCALA */ + if_true = matcher_body(m); + if (MATCH_NEXT(m)) { + if_false = 0; //? Why not the nxt match ? + } else { + if_false = case_stmt_default(header); + } + } else { + // Symbol boolean_symbol = intern_symbol("Boolean"); + ow->get_outstream() + << indent() << "if (" << if_stmt_cond(ad) << ") {\n"; + ++nesting_level; + if_true = if_stmt_if_true(ad); + if_false = if_stmt_if_false(ad); + } + + int cmask = 1 << (if_rule_index(ad)); + vector > true_assignment = + make_instance_assignment(aug_graph, if_true, instance_assignment, false /* defaults are already included */); + + cond->positive |= cmask; + implement_visit_function(aug_graph, phase, cto->cto_if_true, + true_assignment, nch, cond, cto->chunk_index, + loop_allowed, loop_id, skip_previous_visit_code, + ow); + cond->positive &= ~cmask; + + --nesting_level; +#ifdef APS2SCALA + if (is_match) { + ow->get_outstream() << indent() << "}\n"; + ow->get_outstream() << indent() << "case _ => {\n"; + } else { + ow->get_outstream() << indent() << "} else {\n"; + } +#else /* APS2SCALA */ + ow->get_outstream() << indent() << "} else {\n"; +#endif /* APS2SCALA */ + ++nesting_level; + vector > false_assignment = + if_false ? make_instance_assignment(aug_graph, if_false, + instance_assignment, false /* defaults are already included */) + : instance_assignment; + + cond->negative |= cmask; + bool cont = implement_visit_function( + aug_graph, phase, cto->cto_if_false, false_assignment, nch, cond, + cto->chunk_index, loop_allowed, loop_id, skip_previous_visit_code, + ow); + cond->negative &= ~cmask; + + --nesting_level; +#ifdef APS2SCALA + if (is_match) { + ow->get_outstream() << indent() << "}}\n"; + } else { + ow->get_outstream() << indent() << "}\n"; + } + + // Restore previous value of loop allowed. + loop_allowed = prev_loop_allowed; + + // Delay closing of the loop until conditional is finished + if (loop_allowed && loop_id != -1) { + dump_loop_end(aug_graph, phase, loop_id, ow); + loop_id = -1; + } + +#else /* APS2SCALA */ + ow->get_outstream() << indent() << "}\n"; +#endif /* APS2SCALA */ + return cont; + } + + Symbol asym = ad ? def_name(declaration_def(ad)) : 0; + + if (instance_direction(in) == instance_inward) { + if (include_comments) { + ow->get_outstream() << indent() << "// " << in << " is ready now.\n"; + } + continue; + } + + for (std::set::iterator rhs_it = + instance_assignment[in->index].begin(); + rhs_it != instance_assignment[in->index].end(); rhs_it++) { + Expression rhs = *rhs_it; + + if (rhs == NULL) + continue; + + if (in->node && Declaration_KEY(in->node) == KEYnormal_assign) { + // parameter value will be filled in at call site + if (include_comments) { + ow->get_outstream() + << indent() << "// delaying " << in << " to call site.\n"; + } + continue; + } + + if (in->node && Declaration_KEY(in->node) == KEYpragma_call) { + if (include_comments) { + ow->get_outstream() + << indent() << "// place holder for " << in << "\n"; + } + continue; + } + + if (in->fibered_attr.fiber != NULL) { + if (rhs == NULL) { + if (include_comments) { + ow->get_outstream() << indent() << "// " << in << "\n"; + } + continue; + } + + Declaration assign = (Declaration)tnode_parent(rhs); + Expression lhs = assign_lhs(assign); + Declaration field = 0; + ow->get_outstream() << indent(); + // dump the object containing the field + switch (Expression_KEY(lhs)) { + case KEYvalue_use: + // shared global collection + field = USE_DECL(value_use_use(lhs)); +#ifdef APS2SCALA + ow->get_outstream() << "a_" << decl_name(field) << "."; + if (debug) + ow->get_outstream() << "assign"; + else + ow->get_outstream() << "set"; + ow->get_outstream() << "(" << rhs; + + if (s->loop_required && is_circular) { + ow->get_outstream() << ", "; + dump_changed(loop_id, ow->get_outstream()); + } + + ow->get_outstream() << ");\n"; +#else /* APS2SCALA */ + ow->get_outstream() << "v_" << decl_name(field) << "="; + switch (Default_KEY(value_decl_default(field))) { + case KEYcomposite: + ow->get_outstream() + << composite_combiner(value_decl_default(field)); + break; + default: + ow->get_outstream() + << as_val(value_decl_type(field)) << "->v_combine"; + break; + } + ow->get_outstream() + << "(v_" << decl_name(field) << "," << rhs << ");\n"; +#endif /* APS2SCALA */ + break; + case KEYfuncall: + field = field_ref_p(lhs); + if (field == 0) + fatal_error("what sort of assignment lhs: %d", + tnode_line_number(assign)); + ow->get_outstream() << "a_" << decl_name(field) << DEREF; + if (debug) + ow->get_outstream() << "assign"; + else + ow->get_outstream() << "set"; + ow->get_outstream() + << "(" << field_ref_object(lhs) << "," << rhs; + + if (s->loop_required && is_circular) { + ow->get_outstream() << ", "; + dump_changed(loop_id, ow->get_outstream()); + } + + ow->get_outstream() << ");\n"; + break; + default: + fatal_error("what sort of assignment lhs: %d", + tnode_line_number(assign)); + } + continue; + } + + if (in->node == 0 && ad != NULL) { + if (rhs) { + if (Declaration_info(ad)->decl_flags & LOCAL_ATTRIBUTE_FLAG) { + ow->get_outstream() << indent() << "a" << LOCAL_UNIQUE_PREFIX(ad) + << "_" << asym << DEREF; + if (debug) + ow->get_outstream() << "assign"; + else + ow->get_outstream() << "set"; + ow->get_outstream() << "(anchor," << rhs; + if (s->loop_required && is_circular) { + ow->get_outstream() << ", "; + dump_changed(loop_id, ow->get_outstream()); + } + ow->get_outstream() << ");\n"; + } else { + int i = LOCAL_UNIQUE_PREFIX(ad); + if (i == 0) { +#ifdef APS2SCALA + if (!def_is_constant(value_decl_def(ad))) { + if (include_comments) { + ow->get_outstream() + << indent() << "// v_" << asym + << " is assigned/initialized by default.\n"; + } + } else { + if (include_comments) { + ow->get_outstream() << indent() << "// v_" << asym + << " is initialized in module.\n"; + } + } +#else + ow->get_outstream() + << indent() << "v_" << asym << " = " << rhs << ";\n"; +#endif + } else { + ow->get_outstream() << indent() << "v" << i << "_" << asym + << " = " << rhs << "; // local\n"; + } + } + } else { + if (Declaration_KEY(ad) == KEYvalue_decl && + !direction_is_collection(value_decl_direction(ad))) { + aps_warning(ad, "Local attribute %s is apparently undefined", + decl_name(ad)); + } + if (include_comments) { + ow->get_outstream() << indent() << "// " << in << " is ready now\n"; + } + } + continue; + } else if (node_is_syntax) { + if (ATTR_DECL_IS_SHARED_INFO(ad) && ch < nch) { + if (include_comments) { + ow->get_outstream() << indent() << "// shared info for " + << decl_name(in->node) << " is ready.\n"; + } + } else if (ATTR_DECL_IS_UP_DOWN(ad)) { + if (include_comments) { + ow->get_outstream() << indent() << "// " << decl_name(in->node) + << "." << decl_name(ad) << " implicit.\n"; + } + } else if (rhs) { + if (Declaration_KEY(in->node) == KEYfunction_decl) { + if (direction_is_collection(value_decl_direction(ad))) { + std::cout << "Not expecting collection here!\n"; + ow->get_outstream() + << indent() << "v_" << asym << " = somehow_combine(v_" << asym + << "," << rhs << ");\n"; + } else { + int i = LOCAL_UNIQUE_PREFIX(ad); + if (i == 0) + ow->get_outstream() << indent() << "v_" << asym << " = " << rhs + << "; // function\n"; + else + ow->get_outstream() << indent() << "v" << i << "_" << asym + << " = " << rhs << ";\n"; + } + } else { + ow->get_outstream() << indent() << "a_" << asym << DEREF; + if (debug) + ow->get_outstream() << "assign"; + else + ow->get_outstream() << "set"; + ow->get_outstream() + << "(v_" << decl_name(in->node) << "," << rhs; + + if (s->loop_required && is_circular) { + ow->get_outstream() << ", "; + dump_changed(loop_id, ow->get_outstream()); + } + ow->get_outstream() << ");\n"; + } + } else { + aps_warning(in->node, "Attribute %s.%s is apparently undefined", + decl_name(in->node), symbol_name(asym)); + + if (include_comments) { + ow->get_outstream() << indent() << "// " << in << " is ready.\n"; + } + } + continue; + } else if (Declaration_KEY(in->node) == KEYvalue_decl) { + if (rhs) { + // assigning field of object + ow->get_outstream() << indent() << "a_" << asym << DEREF; + if (debug) + ow->get_outstream() << "assign"; + else + ow->get_outstream() << "set"; + ow->get_outstream() + << "(v_" << decl_name(in->node) << "," << rhs; + if (s->loop_required && is_circular) { + ow->get_outstream() << ", changed"; + } + ow->get_outstream() << ");\n"; + } else { + if (include_comments) { + ow->get_outstream() + << indent() << "// " << in << " is ready now.\n"; + } + } + continue; + } + std::cout << "Problem assigning " << in << "\n"; + ow->get_outstream() << indent() << "// Not sure what to do for " << in + << "\n"; + } + } + + // Close any dangling loop if any. This should never happen + // because after circular visit is followed by non-empty or empty + // non-circular visit so this code never happens. + if (loop_allowed && loop_id != -1) { + dump_loop_end(aug_graph, phase, loop_id, ow); + loop_id = -1; + } + + return false; // no more! +} + +// dump visit functions for constructors +static void dump_visit_functions(PHY_GRAPH* phy_graph, + AUG_GRAPH* aug_graph, +#ifdef APS2SCALA + ostream& os) +#else /* APS2SCALA */ + output_streams& oss) +#endif /* APS2SCALA */ +{ + Declaration tlm = aug_graph->match_rule; + Match m = top_level_match_m(tlm); + Block block = matcher_body(m); + STATE* s = phy_graph->global_state; + +#ifndef APS2SCALA + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + // ostream& is = oss.is; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ + int pgn = PHY_GRAPH_NUM(phy_graph); + int j = Declaration_info(aug_graph->syntax_decl)->instance_index; + CTO_NODE* total_order = aug_graph->total_order; + + /* Count the children */ + int nch = 0; + for (Declaration ch = aug_graph->first_rhs_decl; ch != 0; ch = DECL_NEXT(ch)) + ++nch; + + int phase; + + vector > default_instance_assignments( + aug_graph->instances.length, std::set()); + vector > instance_assignment = + make_instance_assignment(aug_graph, block, default_instance_assignments, true /* include defaults */); + + // the following loop is controlled in two ways: + // (1) if total order is zero, there are no visits at all. + // (2) otherwise, total_order never changes, + // but eventually when scheduling a phase, we find out + // that it is the last phase and we break the loop + for (phase = 1; phase <= phy_graph->max_phase; phase++) { +#ifdef APS2SCALA + os << indent() << "def visit_" << pgn << "_" << phase << "_" << j + << "(anchor : T_" << decl_name(phy_graph->phylum); + + if (s->loop_required) { + os << ", changed : AtomicBoolean"; + } + + os << ") : Unit = anchor match {\n"; + ++nesting_level; + os << indent() << "case " << matcher_pat(m) << " => {\n"; +#else /* APS2SCALA */ + oss << header_return_type(0) << "void " + << header_function_name("visit_") << pgn << "_" << phase << "_" << j + << "(C_PHYLUM::Node* anchor)" << header_end(); + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ + ++nesting_level; +#ifndef APS2SCALA + os << matcher_bindings("anchor", m); + os << "\n"; +#endif /* APS2SCALA */ + + if (include_comments) { + os << indent() << "// Implementing visit function for " + << aug_graph_name(aug_graph) << " phase: " << phase << "\n"; + } + + CONDITION cond; + cond.positive = 0; + cond.negative = 0; + + OutputWriter ow(os); + implement_visit_function(aug_graph, phase, total_order, instance_assignment, + nch, &cond, -1, s->loop_required, -1, true, &ow); + + --nesting_level; +#ifdef APS2SCALA + os << indent() << "}\n"; + --nesting_level; +#endif /* APS2SCALA */ + os << indent() << "}\n"; + } +} + +// The following function is only for Scala code generation +static void dump_constructor_owner(Declaration pd, ostream& os) { + switch (Declaration_KEY(pd)) { + default: + aps_error(pd, "cannot attribute this phylum"); + break; + case KEYphylum_formal: + os << "t_" << decl_name(pd) << DEREF; + break; + case KEYphylum_decl: + switch (Type_KEY(phylum_decl_type(pd))) { + default: + aps_error(pd, "cannot attribute this phylum"); + break; + case KEYno_type: + break; + case KEYtype_inst: + os << "t_" << decl_name(pd) << DEREF; + break; + case KEYtype_use: + dump_constructor_owner(USE_DECL(type_use_use(phylum_decl_type(pd))), + os); + break; + } + break; + case KEYtype_renaming: + switch (Type_KEY(phylum_decl_type(pd))) { + default: + aps_error(pd, "cannot attribute this phylum"); + break; + case KEYtype_use: + dump_constructor_owner(USE_DECL(type_use_use(phylum_decl_type(pd))), + os); + break; + } + break; + } +} + +#ifdef APS2SCALA +static void dump_visit_functions(PHY_GRAPH* pg, ostream& os) +#else /* APS2SCALA */ +static void dump_visit_functions(PHY_GRAPH* pg, output_streams& oss) +#endif /* APS2SCALA */ +{ + STATE* s = pg->global_state; + int pgn = PHY_GRAPH_NUM(pg); + +#ifdef APS2SCALA + ostream& oss = os; +#else + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + // ostream& is = oss.is; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ + vector aug_graphs; // not in order + + for (int i = 0; i < s->match_rules.length; ++i) { + AUG_GRAPH* ag = &s->aug_graphs[i]; + if (ag->lhs_decl && Declaration_info(ag->lhs_decl)->node_phy_graph == pg) { + aug_graphs.push_back(ag); + } + } + + int num_cons = aug_graphs.size(); + + if (num_cons == 0) { + fatal_error("no top-level-match match for phylum %s", + decl_name(pg->phylum)); + } + + // The match clauses may be in a different order + // that the constructor declarations in the tree module + int cons_num = 0; + Declaration cmodule = + (Declaration)tnode_parent(aug_graphs.front()->syntax_decl); + while (ABSTRACT_APS_tnode_phylum(cmodule) != KEYDeclaration || + Declaration_KEY(cmodule) != KEYmodule_decl) + cmodule = (Declaration)tnode_parent(cmodule); + Block cblock = module_decl_contents(cmodule); + for (Declaration d = first_Declaration(block_body(cblock)); d; + d = DECL_NEXT(d)) { + if (Declaration_KEY(d) == KEYconstructor_decl) { + for (int j = 0; j < num_cons; ++j) { + if (aug_graphs[j]->syntax_decl == d) { + Declaration_info(d)->instance_index = cons_num; + ++cons_num; + break; + } + } + } + } + + if (num_cons != cons_num) { + fatal_error("Can't find all constructors"); + } + + for (int ph = 1; ph <= pg->max_phase; ++ph) { +#ifdef APS2SCALA + os << indent() << "def visit_" << pgn << "_" << ph << "(node : T_" + << decl_name(pg->phylum); + + if (s->loop_required) { + os << ", changed : AtomicBoolean"; + } + + os << ") : Unit = node match {\n"; +#else /* APS2SCALA */ + oss << header_return_type(0) << "void " + << header_function_name("visit_") << pgn << "_" << ph + << "(C_PHYLUM::Node* node)" << header_end(); + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ + ++nesting_level; +#ifndef APS2SCALA + os << indent() << "switch (node->cons->get_index()) {\n"; +#endif /* APS2SCALA */ + for (int j = 0; j < num_cons; ++j) { +#ifdef APS2SCALA + Declaration cd = aug_graphs[j]->syntax_decl; + os << indent() << "case "; + dump_constructor_owner(pg->phylum, os); + os << "p_" << decl_name(cd) << "(_"; + Declarations fs = function_type_formals(constructor_decl_type(cd)); + for (Declaration f = first_Declaration(fs); f; f = DECL_NEXT(f)) { + os << ",_"; + } + os << ") => " + << "visit_" << pgn << "_" << ph << "_" + << Declaration_info(cd)->instance_index << "(node"; + + if (s->loop_required) { + os << ", changed"; + } + + os << ");\n"; +#else /* APS2SCALA */ + os << indent() << "case " << j << ":\n"; + ++nesting_level; + os << indent() << "visit_" << pgn << "_" << ph << "_" << j << "(node"; + if (s->loop_required) { + os << ", changed"; + } + os << ");\n"; + os << indent() << "break;\n"; + --nesting_level; +#endif /* APS2SCALA */ + } +#ifndef APS2SCALA + os << indent() << "default:\n"; + ++nesting_level; + os << indent() << "throw std::runtime_error(\"bad constructor index\");\n"; +#endif /* APS2SCALA */ + --nesting_level; +#ifdef APS2SCALA + os << indent() << "};\n"; +#else /* APS2SCALA */ + os << indent() << "}\n"; + --nesting_level; + os << indent() << "}\n"; +#endif /* APS2SCALA */ + } + + // Now spit out visit procedures for each constructor + for (int i = 0; i < num_cons; ++i) { + dump_visit_functions(pg, aug_graphs[i], oss); + } + + oss << "\n"; // some blank lines +} + +#ifdef APS2SCALA +static void dump_visit_functions(STATE* s, ostream& os) +#else /* APS2SCALA */ +static void dump_visit_functions(STATE* s, output_streams& oss) +#endif /* APS2SCALA */ +{ +#ifdef APS2SCALA + ostream& oss = os; +#else /* !APS2SCALA */ + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ + // first dump all visit functions for each phylum: + int nphy = s->phyla.length; + for (int j = 0; j < nphy; ++j) { + PHY_GRAPH* pg = &s->phy_graphs[j]; + if (Declaration_KEY(pg->phylum) == KEYphylum_decl) { + dump_visit_functions(pg, oss); + } + } + + Declaration sp = s->start_phylum; +#ifdef APS2SCALA + os << indent() << "def visit() : Unit = {\n"; +#else /* APS2SCALA */ + oss << header_return_type(0) << "void " << header_function_name("visit") + << "()" << header_end(); + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ + ++nesting_level; +#ifdef APS2SCALA + os << indent() << "val roots = t_" << decl_name(sp) << ".nodes;\n"; +#else /* APS2SCALA */ + os << indent() << "Phylum* phylum = this->t_" + << decl_name(sp) //! bug sometimes + << "->get_phylum();\n"; + os << indent() << "int n_roots = phylum->size();\n"; + os << "\n"; // blank line +#endif /* APS2SCALA */ + + // printf("sp: %s and pointer %ld %d\n", decl_name(sp), (long) + // (Declaration_info(s->start_phylum)->node_phy_graph), + // tnode_line_number(s->)); + int phase = Declaration_info(s->module)->node_phy_graph->max_phase; + + vector > default_instance_assignments( + s->global_dependencies.instances.length, std::set()); + vector > instance_assignment = make_instance_assignment( + &s->global_dependencies, module_decl_contents(s->module), + default_instance_assignments, true /* include defaults */); + + CONDITION cond; + cond.positive = 0; + cond.negative = 0; + + OutputWriter ow(os); + implement_visit_function( + &s->global_dependencies, phase, s->global_dependencies.total_order, + instance_assignment, 1, &cond, -1, true, -1, false, &ow); + --nesting_level; + os << indent() << "}\n"; +} + +static void* dump_scheduled_local(void* pbs, void* node) { + ostream& bs = *(ostream*)pbs; + if (ABSTRACT_APS_tnode_phylum(node) == KEYDeclaration) { + Declaration d = (Declaration)node; + if (Declaration_KEY(d) == KEYvalue_decl) { + static int unique = 0; + LOCAL_UNIQUE_PREFIX(d) = ++unique; + Declaration_info(d)->decl_flags |= LOCAL_VALUE_FLAG; +#ifdef APS2SCALA + bs << indent() << "var " + << " v" << unique << "_" << decl_name(d) << " : " << value_decl_type(d) + << " = null.asInstanceOf[" << value_decl_type(d) << "]" + << ";\n"; +#else /* APS2SCALA */ + bs << indent() << value_decl_type(d) << " v" << unique << "_" + << decl_name(d) << ";\n"; +#endif /* APS2SCALA */ + } + } + return pbs; +} + +static void dump_scheduled_function_body(Declaration fd, STATE* s, ostream& bs) { + const char* name = decl_name(fd); + Type ft = function_decl_type(fd); + + // dump any local values: + traverse_Declaration(dump_scheduled_local, &bs, fd); + + int index; + for (index = 0; index < s->match_rules.length; ++index) + if (s->match_rules.array[index] == fd) + break; + + if (index >= s->match_rules.length) + fatal_error("Cannot find function %s in top-level-matches", name); + + int pindex; + for (pindex = 0; pindex < s->phyla.length; ++pindex) + if (s->phyla.array[pindex] == fd) + break; + + if (pindex >= s->phyla.length) + fatal_error("Cannot find function %s in phyla", name); + + AUG_GRAPH* aug_graph = &s->aug_graphs[index]; + CTO_NODE* schedule = aug_graph->total_order; + + vector > default_instance_assignments( + aug_graph->instances.length, std::set()); + vector > instance_assignment = make_instance_assignment( + aug_graph, function_decl_body(fd), default_instance_assignments, true /* include defaults */); + + CONDITION cond; + cond.positive = 0; + cond.negative = 0; + + int max_phase = + Declaration_info(aug_graph->lhs_decl)->node_phy_graph->max_phase; + + OutputWriter ow(bs); + bool cont = implement_visit_function(aug_graph, max_phase, schedule, + instance_assignment, 0, &cond, -1, false, + -1, false, &ow); + + Declaration returndecl = first_Declaration(function_type_return_values(ft)); + if (returndecl == 0) { + bs << indent() << "return;\n"; + } else { + bs << indent() << "return v" << LOCAL_UNIQUE_PREFIX(returndecl) << "_" + << decl_name(returndecl) << ";\n"; + } + + if (cont) { + std::cout << "Function " << name << " should not require a second pass!\n"; + int phase = 2; + bs << " /*\n"; + bs << " // phase 2\n"; + + while (implement_visit_function(aug_graph, phase, schedule, + instance_assignment, 0, &cond, -1, false, + -1, true, &ow)) { + if (include_comments) { + bs << indent() << "// phase " << ++phase << "\n"; + } + } + bs << indent() << "*/\n"; + } +} + +class StaticScc : public Implementation { + public: + typedef Implementation::ModuleInfo Super; + class ModuleInfo : public Super { + public: + ModuleInfo(Declaration mdecl) : Implementation::ModuleInfo(mdecl) {} + + void note_top_level_match(Declaration tlm, GEN_OUTPUT& oss) { + Super::note_top_level_match(tlm, oss); + } + + void note_local_attribute(Declaration ld, GEN_OUTPUT& oss) { + Super::note_local_attribute(ld, oss); + Declaration_info(ld)->decl_flags |= LOCAL_ATTRIBUTE_FLAG; + } + + void note_attribute_decl(Declaration ad, GEN_OUTPUT& oss) { + Declaration_info(ad)->decl_flags |= ATTRIBUTE_DECL_FLAG; + Super::note_attribute_decl(ad, oss); + } + + void note_var_value_decl(Declaration vd, GEN_OUTPUT& oss) { + Super::note_var_value_decl(vd, oss); + } + +#ifdef APS2SCALA + void implement(ostream& os){ +#else /* APS2SCALA */ + void implement(output_streams& oss) { +#endif /* APS2SCALA */ + STATE* s = (STATE*)Declaration_info(module_decl)->analysis_state; + +#ifdef APS2SCALA + ostream& oss = os; +#else + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + ostream& os = inline_definitions ? hs : cpps; + // char *name = decl_name(module_decl); +#endif /* APS2SCALA */ + + Declarations ds = block_body(module_decl_contents(module_decl)); + + dump_visit_functions(s, oss); + + // Implement finish routine: +#ifdef APS2SCALA + os << indent() << "override def finish() : Unit = {\n"; +#else /* APS2SCALA */ + hs << indent() << "void finish()"; + if (!inline_definitions) { + hs << ";\n"; + cpps << "void " << oss.prefix << "finish()"; + } + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ + ++nesting_level; + os << indent() << "visit();\n"; + + // types actually should be scheduled... + for (Declaration d = first_Declaration(ds); d; d = DECL_NEXT(d)) { + const char* kind = NULL; + switch (Declaration_KEY(d)) { + case KEYphylum_decl: + case KEYtype_decl: + switch (Type_KEY(some_type_decl_type(d))) { + case KEYno_type: + case KEYtype_inst: + kind = "t_"; + break; + default: + break; + } + default: + break; + } + if (kind != NULL) { + const char* n = decl_name(d); + os << indent() << kind << n << DEREF << "finish();\n"; + } + } +#ifdef APS2SCALA + os << indent() << "super.finish();\n"; +#endif /* ! APS2SCALA */ + --nesting_level; + os << indent() << "}\n"; + + clear_implementation_marks(module_decl); + } +}; + +Super* get_module_info(Declaration m) { + return new ModuleInfo(m); +} + +void implement_function_body(Declaration f, ostream& os) { + Declaration module = (Declaration)tnode_parent(f); + while (module && (ABSTRACT_APS_tnode_phylum(module) != KEYDeclaration || + Declaration_KEY(module) != KEYmodule_decl)) + module = (Declaration)tnode_parent(module); + if (module) { + STATE* s = (STATE*)Declaration_info(module)->analysis_state; + dump_scheduled_function_body(f, s, os); + } else { + dynamic_impl->implement_function_body(f, os); + } +} + +void implement_value_use(Declaration vd, ostream& os) { + int flags = Declaration_info(vd)->decl_flags; + if (flags & LOCAL_ATTRIBUTE_FLAG) { + os << "a" << LOCAL_UNIQUE_PREFIX(vd) << "_" << decl_name(vd) << DEREF + << "get(anchor)"; + } else if (flags & ATTRIBUTE_DECL_FLAG) { + os << "a" + << "_" << decl_name(vd) << DEREF << "get"; + } else if (flags & LOCAL_VALUE_FLAG) { + os << "v" << LOCAL_UNIQUE_PREFIX(vd) << "_" << decl_name(vd); + } else { + aps_error(vd, "internal_error: What is special about this?"); + } +} +} +; + +Implementation* static_scc_impl = new StaticScc(); diff --git a/codegen/synth-impl.cc b/codegen/synth-impl.cc new file mode 100644 index 00000000..13288907 --- /dev/null +++ b/codegen/synth-impl.cc @@ -0,0 +1,1783 @@ +#include +#include + +#include +#include +#include +extern "C" { +#include + +#include "aps-ag.h" +} +#include +#include +#include +#include +#include +#include + +#include "dump.h" +#include "implement.h" + +#define LOCAL_VALUE_FLAG (1 << 28) + +#ifdef APS2SCALA +#define DEREF "." +#else +#define DEREF "->" +#endif + +static const string LOOP_VAR = "isInsideFixedPoint"; +static const string PREV_LOOP_VAR = "prevIsInsideFixedPoint"; + +// visit procedures are called: +// visit_n_m +// where n is the number of the phy_graph and m is the phase. +// This does a dispatch to visit_n_m_p +// where p is the production number (0-based constructor index) +#define KEY_BLOCK_ITEM_CONDITION 1 +#define KEY_BLOCK_ITEM_INSTANCE 2 + +struct block_item_base { + int key; + INSTANCE* instance; + struct block_item_base* prev; +}; + +typedef struct block_item_base BlockItem; + +struct block_item_condition { + int key; /* KEY_BLOCK_ITEM_CONDITION */ + INSTANCE* instance; + BlockItem* prev; + Declaration condition; + BlockItem* next_positive; + BlockItem* next_negative; +}; + +struct block_item_instance { + int key; /* KEY_BLOCK_ITEM_INSTANCE */ + INSTANCE* instance; + BlockItem* prev; + BlockItem* next; +}; + +vector current_blocks; +BlockItem* current_scope_block; +vector dumped_conditional_block_items; +vector dumped_instances; +static bool tracking_fiber_convergence = false; + +// Given a block, it prints its linearized schedule as comments in the output stream. +static void print_linearized_block(BlockItem* block, ostream& os) { + if (block != NULL) { + os << indent() << block->instance << "\n"; + if (block->key == KEY_BLOCK_ITEM_CONDITION) { + struct block_item_condition* cond = (struct block_item_condition*)block; + + if (cond->prev != NULL && cond->prev->key != KEY_BLOCK_ITEM_CONDITION) { + os << indent() << cond->prev->instance << "\n"; + } + + os << indent() << "IF\n"; + nesting_level++; + print_linearized_block(cond->next_positive, os); + nesting_level--; + os << indent() << "ELSE\n"; + nesting_level++; + print_linearized_block(cond->next_negative, os); + nesting_level--; + } else { + print_linearized_block(((struct block_item_instance*)block)->next, os); + } + } +} + +static vector sort_instances(AUG_GRAPH* aug_graph) { + vector result; + + int n = aug_graph->instances.length; + int i; + for (i = 0; i < n; i++) { + INSTANCE* instance = &aug_graph->instances.array[i]; + if (!if_rule_p(instance->fibered_attr.attr)) { + result.push_back(instance); + } + } + + for (i = 0; i < n; i++) { + INSTANCE* instance = &aug_graph->instances.array[i]; + if (if_rule_p(instance->fibered_attr.attr)) { + result.push_back(instance); + } + } + + return result; +} + +// Given an augmented dependency graph, it linearize it recursively +static BlockItem* linearize_block_helper(AUG_GRAPH* aug_graph, + const vector& sorted_instances, + bool* scheduled, + CONDITION* cond, + BlockItem* prev, + int remaining, + INSTANCE* aug_graph_instance) { + // impossible merge condition + if (CONDITION_IS_IMPOSSIBLE(*cond)) { + return NULL; + } + + int j; + int n = aug_graph->instances.length; + + for (auto it = sorted_instances.begin(); it != sorted_instances.end(); it++) { + INSTANCE* instance = *it; + int i = instance->index; + + if (scheduled[i]) { + continue; + } + + // impossible merge condition, cannot schedule this instance + if (MERGED_CONDITION_IS_IMPOSSIBLE(*cond, instance_condition(instance))) { + scheduled[i] = true; + BlockItem* result = linearize_block_helper(aug_graph, sorted_instances, scheduled, cond, prev, remaining - 1, aug_graph_instance); + scheduled[i] = false; + return result; + } + + // if there is no dependency between this instance and the augmented dependency instance that we want to linearize for, + // then this instance should not be included in the linearization linked-list + if (aug_graph_instance != instance && !edgeset_kind(aug_graph->graph[instance->index * n + aug_graph_instance->index])) { + scheduled[i] = true; + BlockItem* result = linearize_block_helper(aug_graph, sorted_instances, scheduled, cond, prev, remaining - 1, aug_graph_instance); + scheduled[i] = false; + return result; + } + + // printf("trying to schedule: "); + // print_instance(instance, stdout); + // printf("\n"); + + bool ready_to_schedule = true; + for (j = 0; j < n && ready_to_schedule; j++) { + INSTANCE* other_instance = &aug_graph->instances.array[j]; + + // already scheduled dependency + if (scheduled[j]) { + continue; + } + + // impossible merge condition, ignore this dependency + if (MERGED_CONDITION_IS_IMPOSSIBLE(instance_condition(instance), instance_condition(other_instance))) { + continue; + } + + // not a direct dependency + if (!(edgeset_kind(aug_graph->graph[j * n + i]) & DEPENDENCY_MAYBE_DIRECT)) { + continue; + } + + ready_to_schedule = false; + break; + } + + // if all dependencies are ready to schedule + if (!ready_to_schedule) { + continue; + } + + BlockItem* item_base; + scheduled[i] = true; + + if (if_rule_p(instance->fibered_attr.attr)) { + struct block_item_condition* item = (struct block_item_condition*)malloc(sizeof(struct block_item_condition)); + item_base = (BlockItem*)item; + + item->key = KEY_BLOCK_ITEM_CONDITION; + item->instance = instance; + item->condition = instance->fibered_attr.attr; + item->prev = prev; + + int cmask = 1 << (if_rule_index(instance->fibered_attr.attr)); + cond->positive |= cmask; + item->next_positive = linearize_block_helper(aug_graph, sorted_instances, scheduled, cond, item_base, remaining - 1, aug_graph_instance); + cond->positive &= ~cmask; + cond->negative |= cmask; + item->next_negative = linearize_block_helper(aug_graph, sorted_instances, scheduled, cond, item_base, remaining - 1, aug_graph_instance); + cond->negative &= ~cmask; + } else { + struct block_item_instance* item = (struct block_item_instance*)malloc(sizeof(struct block_item_instance)); + item_base = (BlockItem*)item; + item->key = KEY_BLOCK_ITEM_INSTANCE; + item->instance = instance; + item->prev = prev; + item->next = linearize_block_helper(aug_graph, sorted_instances, scheduled, cond, item_base, remaining - 1, aug_graph_instance); + } + + scheduled[i] = false; + + return item_base; + } + + if (remaining != 0) { + fatal_error("failed to schedule some instances, remaining: %d", remaining); + } + + return NULL; +} + +// Given an augmented dependency graph, it linearizes +// the direct dependency schedule. +static BlockItem* linearize_block(AUG_GRAPH* aug_graph, INSTANCE* aug_graph_instance) { + int n = aug_graph->instances.length; + bool* scheduled = (bool*)alloca(sizeof(bool) * n); + memset(scheduled, 0, sizeof(bool) * n); + + CONDITION cond = {0, 0}; + vector sorted_instances = sort_instances(aug_graph); + + return linearize_block_helper(aug_graph, sorted_instances, scheduled, &cond, NULL, n, aug_graph_instance); +} + +// Given an instance it traverses the direct dependency schedule +// trying to find the instance and if it sees the condition +// along the way, it returns that condition. +static BlockItem* find_surrounding_block(BlockItem* block, INSTANCE* instance) { + while (block != NULL) { + if (block->key == KEY_BLOCK_ITEM_CONDITION) { + return block; + } else if (block->key == KEY_BLOCK_ITEM_INSTANCE) { + if (block->instance == instance) { + return block; + } else { + block = ((struct block_item_instance*)block)->next; + } + } + } + + return NULL; +} + +enum instance_direction custom_instance_direction(INSTANCE* i) { + enum instance_direction dir = fibered_attr_direction(&i->fibered_attr); + if (i->node == NULL) { + return dir; + } else if (DECL_IS_LHS(i->node)) { + return dir; + } else if (DECL_IS_RHS(i->node)) { + return dir; + } else if (DECL_IS_LOCAL(i->node)) { + return instance_local; + } else { + fatal_error("%d: unknown attributed node", tnode_line_number(i->node)); + return dir; /* keep CC happy */ + } +} + +static bool instance_is_local(INSTANCE* instance) { + if (instance->fibered_attr.fiber == NULL) { + return custom_instance_direction(instance) == instance_local; + } else { + return fibered_attr_direction(&instance->fibered_attr) == instance_local; + } +} + +static bool instance_is_synthesized(INSTANCE* instance) { + if (instance->fibered_attr.fiber == NULL) { + return custom_instance_direction(instance) == instance_outward; + } else { + return fibered_attr_direction(&instance->fibered_attr) == instance_outward; + } +} + +static bool instance_is_inherited(INSTANCE* instance) { + if (instance->fibered_attr.fiber == NULL) { + return custom_instance_direction(instance) == instance_inward; + } else { + return fibered_attr_direction(&instance->fibered_attr) == instance_inward; + } +} + +static bool instance_is_pure_shared_info(INSTANCE* instance) { + return instance->fibered_attr.fiber == NULL && ATTR_DECL_IS_SHARED_INFO(instance->fibered_attr.attr); +} + +static std::vector collect_phylum_graph_attr_dependencies(PHY_GRAPH* phylum_graph, + INSTANCE* sink_instance) { + std::vector result; + + int i; + int n = phylum_graph->instances.length; + + for (i = 0; i < n; i++) { + INSTANCE* source_instance = &phylum_graph->instances.array[i]; + if (!instance_is_pure_shared_info(source_instance) && source_instance->index != sink_instance->index && + phylum_graph->mingraph[source_instance->index * n + sink_instance->index]) { + result.push_back(source_instance); + } + } + + return result; +} + +static bool is_function_decl_attribute(INSTANCE* instance) { + if (instance->node != NULL && ABSTRACT_APS_tnode_phylum(instance->node) == KEYDeclaration) { + switch (Declaration_KEY(instance->node)) { + case KEYpragma_call: { + Declaration fdecl = Declaration_info(instance->node)->proxy_fdecl; + switch (Declaration_KEY(fdecl)) { + case KEYfunction_decl: + return true; + default: + break; + } + break; + } + default: + break; + } + } + + return false; +} + +static std::vector collect_aug_graph_attr_dependencies(AUG_GRAPH* aug_graph, + INSTANCE* sink_instance) { + std::vector result; + + int i; + int n = aug_graph->instances.length; + + for (i = 0; i < n; i++) { + INSTANCE* source_instance = &aug_graph->instances.array[i]; + if (!instance_is_pure_shared_info(source_instance) && source_instance->index != sink_instance->index && + !is_function_decl_attribute(source_instance) && + edgeset_kind(aug_graph->graph[source_instance->index * n + sink_instance->index])) { + result.push_back(source_instance); + } + } + + return result; +} + +static vector collect_lhs_aug_graphs(STATE* state, PHY_GRAPH* pgraph) { + vector result; + + int i; + int n = state->match_rules.length; + for (i = 0; i < n; i++) { + AUG_GRAPH* aug_graph = &state->aug_graphs[i]; + PHY_GRAPH* aug_graph_pgraph = Declaration_info(aug_graph->lhs_decl)->node_phy_graph; + + if (aug_graph_pgraph == pgraph) { + switch (Declaration_KEY(aug_graph->lhs_decl)) { + case KEYsome_function_decl: + continue; + default: + break; + } + + result.push_back(aug_graph); + } + } + + return result; +} + +static bool find_instance(AUG_GRAPH* aug_graph, + Declaration node, + FIBERED_ATTRIBUTE& fiber_attr, + INSTANCE** instance_out) { + int i; + for (i = 0; i < aug_graph->instances.length; i++) { + INSTANCE* instance = &aug_graph->instances.array[i]; + if (instance->node == node && instance->fibered_attr.attr == fiber_attr.attr) { + if (fibered_attr_equal(&instance->fibered_attr, &fiber_attr)) { + *instance_out = instance; + return true; + } + } + } + + return false; +} + +string attr_to_string(Declaration attr) { + if (ATTR_DECL_IS_SHARED_INFO(attr)) { + return "sharedinfo"; + } else { + return decl_name(attr); + } +} + +string fiber_to_string(FIBER fiber) { + std::stringstream ss; + + while (fiber != NULL && fiber->field != NULL) { + std::string field = decl_name(fiber->field); + field.erase(std::remove(field.begin(), field.end(), '!'), field.end()); + + ss << field; + fiber = fiber->shorter; + if (fiber->field != NULL) { + ss << "_"; + } + } + + return ss.str(); +} + +string instance_to_string(INSTANCE* in, bool force_include_node_type = false, bool trim_node = false) { + vector result; + + Declaration node = in->node; + if (force_include_node_type && node == NULL) { + node = current_aug_graph->lhs_decl; + } + + if (!trim_node && node != NULL) { + if (Declaration_KEY(node) == KEYpragma_call) { + result.push_back(symbol_name(pragma_call_name(node))); + } else { + result.push_back(decl_name(node)); + } + } + + if (in->fibered_attr.attr != NULL) { + result.push_back(attr_to_string(in->fibered_attr.attr)); + } + + if (in->fibered_attr.fiber != NULL) { + result.push_back(fiber_to_string(in->fibered_attr.fiber)); + } + + return std::accumulate(std::next(result.begin()), result.end(), result[0], + [](std::string a, std::string b) { return a + "_" + b; }); +} + +static string instance_to_string_with_nodetype(Declaration polymorphic, INSTANCE* in) { + Declaration attr = in->fibered_attr.attr; + std::stringstream ss; + + if (Declaration_KEY(attr) == KEYvalue_decl && LOCAL_UNIQUE_PREFIX(attr) != 0) { + ss << "a" << LOCAL_UNIQUE_PREFIX(attr) << "_"; + } + + ss << decl_name(polymorphic) << "_" << instance_to_string(in, false, false); + + return ss.str(); +} + +static string instance_to_attr(INSTANCE* in) { + Declaration attr = in->fibered_attr.attr; + Declaration field = in->fibered_attr.fiber != NULL ? in->fibered_attr.fiber->field : NULL; + std::stringstream ss; + + if (Declaration_KEY(attr) == KEYvalue_decl && LOCAL_UNIQUE_PREFIX(attr) != 0) { + ss << "a" << LOCAL_UNIQUE_PREFIX(attr); + } else { + ss << "a"; + } + + if (attr != NULL && !ATTR_DECL_IS_SHARED_INFO(attr)) { + ss << "_" << decl_name(attr); + } + + if (field != NULL) { + std::string field_str = decl_name(field); + field_str.erase(std::remove(field_str.begin(), field_str.end(), '!'), field_str.end()); + ss << "_" << field_str; + } + + return ss.str(); +} + +static bool check_is_match_formal(void* node) { + Declaration formal_decl = NULL; + bool is_formal = check_surrounding_decl(node, KEYnormal_formal, &formal_decl); + + void* match = NULL; + bool is_inside_match = check_surrounding_node(node, KEYMatch, &match); + + return is_formal && is_inside_match; +} + +// Unified filter for determining which dependencies should be included +// as function parameters and call arguments in synthesized eval functions. +static bool should_skip_synth_dependency(INSTANCE* source_instance) { + if (source_instance->fibered_attr.fiber != NULL) { + return true; + } + if (if_rule_p(source_instance->fibered_attr.attr)) { + return true; + } + if (check_is_match_formal(source_instance->fibered_attr.attr)) { + return true; + } + return false; +} + +static std::vector build_synth_functions_state(STATE* s) { + std::vector synth_function_states; + int i, j; + int aug_graph_count = s->match_rules.length; + + for (i = 0; i < s->phyla.length; i++) { + PHY_GRAPH* pg = &s->phy_graphs[i]; + if (Declaration_KEY(pg->phylum) != KEYphylum_decl) { + continue; + } + + for (j = 0; j < pg->instances.length; j++) { + INSTANCE* in = &pg->instances.array[j]; + bool is_fiber = in->fibered_attr.fiber != NULL; + bool is_shared_info = ATTR_DECL_IS_SHARED_INFO(in->fibered_attr.attr); + + if (!instance_is_synthesized(in)) { + continue; + } + + SYNTH_FUNCTION_STATE* state = new SYNTH_FUNCTION_STATE(); + state->fdecl_name = instance_to_string_with_nodetype(pg->phylum, in); + state->source = in; + state->is_phylum_instance = true; + state->source_phy_graph = pg; + state->is_fiber_evaluation = is_fiber || is_shared_info; + + if (state->is_fiber_evaluation && state->source->node != NULL) { + printf("Warning: Synthesizing fiber evaluation for instance with attributed node, which is not supported yet. Instance: "); + print_instance(in, stdout); + printf("\n"); + } + + state->regular_dependencies = collect_phylum_graph_attr_dependencies(pg, in); + state->aug_graphs = collect_lhs_aug_graphs(s, pg); + + synth_function_states.push_back(state); + } + } + + for (i = 0; i < aug_graph_count; i++) { + AUG_GRAPH* aug_graph = &s->aug_graphs[i]; + + switch (Declaration_KEY(aug_graph->lhs_decl)) { + case KEYsome_function_decl: + continue; + default: + break; + } + + for (j = 0; j < aug_graph->instances.length; j++) { + INSTANCE* instance = &aug_graph->instances.array[j]; + bool is_local = instance_direction(instance) == instance_local; + bool is_fiber = instance->fibered_attr.fiber != NULL; + bool is_shared_info = ATTR_DECL_IS_SHARED_INFO(instance->fibered_attr.attr); + + if (is_local && !is_fiber && !if_rule_p(instance->fibered_attr.attr)) { + switch (Declaration_KEY(instance->fibered_attr.attr)) { + case KEYformal: + continue; + default: + break; + } + + Declaration attr = instance->fibered_attr.attr; + PHY_GRAPH* pg = Declaration_info(aug_graph->lhs_decl)->node_phy_graph; + Declaration tdecl = canonical_type_decl(canonical_type(value_decl_type(attr))); + + SYNTH_FUNCTION_STATE* state = new SYNTH_FUNCTION_STATE(); + state->fdecl_name = instance_to_string_with_nodetype(tdecl, instance); + state->source = instance; + state->is_phylum_instance = false; + state->source_phy_graph = pg; + state->is_fiber_evaluation = is_fiber || is_shared_info; + state->regular_dependencies = collect_aug_graph_attr_dependencies(aug_graph, instance); + + vector aug_graphs; + aug_graphs.push_back(aug_graph); + state->aug_graphs = aug_graphs; + + synth_function_states.push_back(state); + } + } + } + + return synth_function_states; +} + +static void destroy_synth_function_states(const vector& states) { + for (auto it = states.begin(); it != states.end(); it++) { + delete (*it); + } +} + +static void dump_attribute_type(INSTANCE* in, ostream& os) { + CanonicalType* ctype = canonical_type(infer_some_value_decl_type(in->fibered_attr.attr)); + switch (ctype->key) { + case KEY_CANONICAL_USE: { + os << "T_" << decl_name(canonical_type_decl(ctype)); + break; + } + case KEY_CANONICAL_FUNC: { + struct Canonical_function_type* fdecl_ctype = (struct Canonical_function_type*)ctype; + os << "T_" << decl_name(canonical_type_decl(fdecl_ctype->return_type)); + break; + } + default: + break; + } +} + +class FiberDependencyDumper { +public: + static void dump(AUG_GRAPH* aug_graph, INSTANCE* sink, ostream& os) { + + int i, j; + int n = aug_graph->instances.length; + + vector relevant_instances; + + // collect relevant fiber dependencies + for (i = 0; i < n; i++) { + INSTANCE* in = &aug_graph->instances.array[i]; + if (in->node != NULL && Declaration_KEY(in->node) == KEYpragma_call) { + continue; + } + + if (edgeset_kind(aug_graph->graph[in->index * n + sink->index])) { + if (in->fibered_attr.fiber != NULL) { + if (instance_is_synthesized(in) || instance_is_local(in)) { + relevant_instances.push_back(in); + } + } + } + } + + if (relevant_instances.empty()) { + return; + } + + bool* scheduled = (bool*)alloca(sizeof(bool) * n); + memset(scheduled, 0, sizeof(bool) * n); + + SccGraph scc_graph; + scc_graph_initialize(&scc_graph, static_cast(relevant_instances.size())); + + // add vertices to the SCC graph + for (auto it = relevant_instances.begin(); it != relevant_instances.end(); it++) { + INSTANCE* in = *it; + scc_graph_add_vertex(&scc_graph, in); + } + + // add edges to the SCC graph + for (auto it1 = relevant_instances.begin(); it1 != relevant_instances.end(); it1++) { + INSTANCE* in1 = *it1; + for (auto it2 = relevant_instances.begin(); it2 != relevant_instances.end(); it2++) { + INSTANCE* in2 = *it2; + if (in1->index == in2->index) { + continue; + } + + if (edgeset_kind(aug_graph->graph[in1->index * n + in2->index])) { + scc_graph_add_edge(&scc_graph, in1, in2); + } + } + } + + SCC_COMPONENTS* components = scc_graph_components(&scc_graph); + + if (include_comments) { + os << indent() << "/* Fiber dependency SCC components:\n"; + nesting_level++; + for (i = 0; i < components->length; i++) { + SCC_COMPONENT* component = components->array[i]; + os << indent() << "Component " << i << ":\n"; + nesting_level++; + for (j = 0; j < component->length; j++) { + INSTANCE* in = (INSTANCE*)component->array[j]; + os << indent() << in << "\n"; + } + nesting_level--; + } + nesting_level--; + os << indent() << "*/\n"; + } + + dump_scc_helper(aug_graph, components, scheduled, os); + + scc_graph_destroy(&scc_graph); + } + +private: + static void dump_scc_helper(AUG_GRAPH* aug_graph, SCC_COMPONENTS* components, bool* scheduled, ostream& os) { + int component_count = components->length; + int i; + for (i = 0; i < component_count; i++) { + SCC_COMPONENT* component = find_next_ready_component(aug_graph, components, scheduled); + + dump_scc_helper_dump(aug_graph, component, scheduled, os); + } + + for (i = 0; i < component_count; i++) { + SCC_COMPONENT* component = components->array[i]; + if (!already_scheduled(aug_graph, component, scheduled)) { + fatal_error("some instances were not scheduled"); + } + } + } + + static void dump_scc_helper_dump(AUG_GRAPH* aug_graph, SCC_COMPONENT* component, bool* scheduled, ostream& os) { + int i; + + if (already_scheduled(aug_graph, component, scheduled)) { + return; + } + + int n = aug_graph->instances.length; + if (component->length == 0) { + return; + } + + for (i = 0; i < component->length; i++) { + INSTANCE* in = (INSTANCE*)component->array[i]; + + if (scheduled[in->index]) { + continue; + } + + bool dependency_ready = true; + for (int j = 0; j < component->length && dependency_ready; j++) { + INSTANCE* dependency_instance = (INSTANCE*)component->array[j]; + if (dependency_instance == in) { + continue; + } + + if (!scheduled[dependency_instance->index] && + edgeset_kind(aug_graph->graph[dependency_instance->index * n + in->index]) & DEPENDENCY_MAYBE_DIRECT) { + dependency_ready = false; + } + } + + if (!dependency_ready) { + continue; + } + + scheduled[in->index] = true; + os << indent(); + impl->dump_synth_instance(in, os); + dumped_conditional_block_items.clear(); + dumped_instances.clear(); + os << "\n"; + + dump_scc_helper_dump(aug_graph, component, scheduled, os); + } + } + + static bool already_scheduled(AUG_GRAPH* aug_graph, SCC_COMPONENT* component, bool* scheduled) { + for (int i = 0; i < component->length; i++) { + INSTANCE* in = (INSTANCE*)component->array[i]; + if (!scheduled[in->index]) { + return false; + } + } + return true; + } + + static SCC_COMPONENT* find_next_ready_component(AUG_GRAPH* aug_graph, + SCC_COMPONENTS* components, + bool* scheduled) { + + int n = aug_graph->instances.length; + + for (int i = 0; i < components->length; i++) { + SCC_COMPONENT* component = components->array[i]; + + // if all instances in the component are scheduled, skip it + if (already_scheduled(aug_graph, component, scheduled)) { + continue; + } + + bool component_ready = true; + for (int j = 0; j < component->length && component_ready; j++) { + INSTANCE* in = (INSTANCE*)component->array[j]; + + for (int k = 0; k < components->length && component_ready; k++) { + SCC_COMPONENT* other_component = components->array[k]; + if (other_component == component) { + continue; + } + + if (already_scheduled(aug_graph, other_component, scheduled)) { + continue; + } + + for (int l = 0; l < other_component->length; l++) { + INSTANCE* other_in = (INSTANCE*)other_component->array[l]; + if (edgeset_kind(aug_graph->graph[other_in->index * n + in->index])) { + component_ready = false; + break; + } + } + } + } + + if (component_ready) { + return component; + } + } + + fatal_error("no more components to schedule"); + return NULL; + } +}; + +#ifdef APS2SCALA +static void dump_synth_functions(STATE* s, ostream& os) +#else /* APS2SCALA */ +static void dump_synth_functions(STATE* s, output_streams& oss) +#endif /* APS2SCALA */ +{ +#ifdef APS2SCALA + ostream& oss = os; +#else /* !APS2SCALA */ + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + ostream& os = inline_definitions ? hs : cpps; + +#endif /* APS2SCALA */ + // first dump all visit functions for each phylum: + + os << "\n"; + + int i; + int aug_graph_count = s->match_rules.length; + current_state = s; + synth_functions_states = build_synth_functions_state(s); + bool needs_fixed_point = s->loop_required; + + for (auto state_it = synth_functions_states.begin(); state_it != synth_functions_states.end(); state_it++) { + SYNTH_FUNCTION_STATE* synth_functions_state = *state_it; + current_synth_functions_state = synth_functions_state; + + if (include_comments) { + os << indent() << "// " << synth_functions_state->source << " (" + << (synth_functions_state->is_phylum_instance ? "phylum" : "aug-graph") << ")\n"; + } + if (synth_functions_state->is_fiber_evaluation) { + os << indent() << "val evaluated_map_" << synth_functions_state->fdecl_name + // << " = scala.collection.mutable.Map[T_" << decl_name(synth_functions_state->source_phy_graph->phylum) + << " = scala.collection.mutable.Map[Int" + << ", Boolean]()" + << "\n\n"; + } + + os << indent() << "def eval_" << synth_functions_state->fdecl_name << "("; + os << "node: T_" << decl_name(synth_functions_state->source_phy_graph->phylum); + + for (auto it = synth_functions_state->regular_dependencies.begin(); + it != synth_functions_state->regular_dependencies.end(); it++) { + INSTANCE* source_instance = *it; + if (should_skip_synth_dependency(source_instance)) { + continue; + } + + // for locals, it needs prefix in formals, not for fibers or regular attributes + + os << ",\n"; + os << indent(nesting_level+1); + os << "v_"; + + if (!synth_functions_state->is_phylum_instance) { + os << instance_to_string(source_instance, false, false) << ": "; + } else { + os << instance_to_string(source_instance, false, true) << ": "; + } + + dump_attribute_type(source_instance, os); + } + + os << ")"; + + if (needs_fixed_point) { + os << "(implicit " << LOOP_VAR << ": Boolean, changed: AtomicBoolean)"; + } + + os << ": "; + + if (synth_functions_state->is_fiber_evaluation) { + os << "Unit"; + } else { + dump_attribute_type(synth_functions_state->source, os); + } + os << " = {\n"; + nesting_level++; + + // don't cache if we are in the loop. + if (needs_fixed_point) { + os << indent() << "if (!" << LOOP_VAR << ") {\n"; + nesting_level++; + } + + if (synth_functions_state->is_fiber_evaluation) { + os << indent() << "evaluated_map_" << synth_functions_state->fdecl_name + << ".getOrElse(node.nodeNumber, false) match {\n"; + os << indent(nesting_level + 1) << "case true => "; + os << "return ()\n"; + } else { + os << indent() << instance_to_attr(synth_functions_state->source) + << ".checkNode(node).status match {\n"; + os << indent(nesting_level + 1) << "case Evaluation.ASSIGNED => "; + os << "return " << instance_to_attr(synth_functions_state->source) << ".get(node)\n"; + } + + os << indent(nesting_level + 1) << "case _ => ()\n"; + os << indent() << "};\n"; + + if (needs_fixed_point) { + nesting_level--; + os << indent() << "}\n"; + } + + if (synth_functions_state->is_fiber_evaluation) { + os << indent() << "node match {\n"; + } else { + os << indent() << "val result = node match {\n"; + } + nesting_level++; + + for (auto it = synth_functions_state->aug_graphs.begin(); it != synth_functions_state->aug_graphs.end(); it++) { + AUG_GRAPH* aug_graph = *it; + int n = aug_graph->instances.length; + + current_aug_graph = aug_graph; + current_blocks.push_back(matcher_body(top_level_match_m(aug_graph->match_rule))); + + os << indent() << "case " << matcher_pat(top_level_match_m(aug_graph->match_rule)) << " => {\n"; + nesting_level++; + + INSTANCE* aug_graph_instance = NULL; + if (synth_functions_state->is_phylum_instance) { + if (!find_instance(aug_graph, aug_graph->lhs_decl, synth_functions_state->source->fibered_attr, &aug_graph_instance)) { + fatal_error("something is wrong with instances in aug graph %s", aug_graph_name(aug_graph)); + } + } else { + aug_graph_instance = synth_functions_state->source; + } + + // Linearize the current scope block but make sure IF statements or conditional instances + // that have nothing to do with this instance don't appear in linearization + current_scope_block = linearize_block(aug_graph, aug_graph_instance); + + if (include_comments) { + os << indent() << "/* Linearized schedule:\n"; + nesting_level++; + print_linearized_block(current_scope_block, os); + nesting_level--; + os << indent() << "*/\n"; + } + + int src_idx = synth_functions_state->source->index; + string src_attr = instance_to_attr(synth_functions_state->source); + + bool declared_is_circular = instance_circular(aug_graph_instance); + bool depends_on_itself = edgeset_kind(aug_graph->graph[src_idx * n + src_idx]) != 0; + + if (!declared_is_circular && depends_on_itself) { + aps_warning(aug_graph_instance->node, "Instance %s depends on itself but is not declared circular", instance_to_string(aug_graph_instance).c_str()); + } + + bool dump_fixed_point_loop = declared_is_circular && !instance_is_pure_shared_info(synth_functions_state->source); + string node_get = ATTR_DECL_IS_SHARED_INFO(synth_functions_state->source->fibered_attr.attr) ? "" : "node"; + string node_assign = ATTR_DECL_IS_SHARED_INFO(synth_functions_state->source->fibered_attr.attr) ? "" : "node, "; + + // Open fixed-point loop using shared changed flag for convergence tracking + if (dump_fixed_point_loop) { + os << indent() << "{\n"; + nesting_level++; + os << indent() << "val " << PREV_LOOP_VAR << src_idx << " = " << LOOP_VAR << ";\n"; + os << indent() << "val prevChanged" << src_idx << " = changed;\n"; + os << indent() << "val newChanged" << src_idx << " = new AtomicBoolean(false);\n"; + if (include_comments) { + os << indent() << "var iterCount" << src_idx << " = 0;\n"; + } + os << indent() << "do {\n"; + nesting_level++; + os << indent() << "newChanged" << src_idx << ".set(false);\n"; + if (synth_functions_state->is_fiber_evaluation) { + tracking_fiber_convergence = true; + } + os << indent() << "implicit val " << LOOP_VAR << ": Boolean = true;\n"; + os << indent() << "implicit val changed: AtomicBoolean = newChanged" << src_idx << ";\n"; + } + + // Fiber dependencies (inside loop when looping) + if (synth_functions_state->is_fiber_evaluation) { + if (include_comments && !dump_fixed_point_loop) { + os << "\n"; + } + FiberDependencyDumper::dump(aug_graph, aug_graph_instance, os); + } + + // Non-fiber instance computation + if (!synth_functions_state->is_fiber_evaluation) { + if (dump_fixed_point_loop) { + os << indent() << src_attr << ".assign(" << node_assign; + impl->dump_synth_instance(aug_graph_instance, os); + os << ", changed);\n"; + } else { + os << indent(); + impl->dump_synth_instance(aug_graph_instance, os); + os << "\n"; + } + } + + // Close fixed-point loop + if (dump_fixed_point_loop) { + tracking_fiber_convergence = false; + if (include_comments) { + os << indent() << "iterCount" << src_idx << " += 1;\n"; + os << indent() << "Debug.out(\"fixed-point " << synth_functions_state->fdecl_name + << " node=\" + node + \" iteration=\" + iterCount" << src_idx << ");\n"; + } + nesting_level--; + os << indent() << "} while (newChanged" << src_idx << ".get && !" << PREV_LOOP_VAR << src_idx << ")\n"; + os << indent() << "prevChanged" << src_idx << ".compareAndSet(false, newChanged" << src_idx << ".get);\n"; + if (!synth_functions_state->is_fiber_evaluation) { + os << indent() << src_attr << ".get(" << node_get << ")\n"; + } + nesting_level--; + os << indent() << "}\n"; + } + + current_blocks.clear(); + dumped_conditional_block_items.clear(); + dumped_instances.clear(); + + nesting_level--; + os << indent() << "}\n"; + } + + os << indent() << "case _ => throw new RuntimeException(\"failed pattern matching: \" + node)\n"; + + nesting_level--; + os << indent() << "};\n"; + + if (synth_functions_state->is_fiber_evaluation) { + os << indent() << "evaluated_map_" << synth_functions_state->fdecl_name << ".update(node.nodeNumber, true);\n"; + } else { + os << indent() << instance_to_attr(synth_functions_state->source) << ".assign(node, result);\n"; + os << indent() << instance_to_attr(synth_functions_state->source) << ".get(node);\n"; + } + + if (!synth_functions_state->is_fiber_evaluation) { + os << indent() << "result\n"; + } + + nesting_level--; + os << indent() << "}\n\n"; + } + + destroy_synth_function_states(synth_functions_states); +} + +class SynthScc : public Implementation { + public: + typedef Implementation::ModuleInfo Super; + class ModuleInfo : public Super { + public: + ModuleInfo(Declaration mdecl) : Implementation::ModuleInfo(mdecl) {} + + void note_top_level_match(Declaration tlm, GEN_OUTPUT& oss) { Super::note_top_level_match(tlm, oss); } + + void note_local_attribute(Declaration ld, GEN_OUTPUT& oss) { + Super::note_local_attribute(ld, oss); + Declaration_info(ld)->decl_flags |= LOCAL_ATTRIBUTE_FLAG; + } + + void note_attribute_decl(Declaration ad, GEN_OUTPUT& oss) { + Declaration_info(ad)->decl_flags |= ATTRIBUTE_DECL_FLAG; + Super::note_attribute_decl(ad, oss); + } + + void note_var_value_decl(Declaration vd, GEN_OUTPUT& oss) { Super::note_var_value_decl(vd, oss); } + +#ifdef APS2SCALA + void implement(ostream& os){ +#else /* APS2SCALA */ + void implement(output_streams& oss) { +#endif /* APS2SCALA */ + STATE* s = (STATE*)Declaration_info(module_decl) -> analysis_state; + +#ifdef APS2SCALA + ostream& oss = os; +#else + ostream& hs = oss.hs; + ostream& cpps = oss.cpps; + ostream& os = inline_definitions ? hs : cpps; + // char *name = decl_name(module_decl); +#endif /* APS2SCALA */ + + dump_synth_functions(s, oss); + + bool needs_fixed_point = s->original_state_dependency != 0; + + // Implement finish routine: +#ifdef APS2SCALA + os << indent() << "override def finish() : Unit = {\n"; +#else /* APS2SCALA */ + hs << indent() << "void finish()"; + if (!inline_definitions) { + hs << ";\n"; + cpps << "void " << oss.prefix << "finish()"; + } + INDEFINITION; + os << " {\n"; +#endif /* APS2SCALA */ + ++nesting_level; + + PHY_GRAPH* start_phy_graph = summary_graph_for(s, s->start_phylum); + + if (needs_fixed_point) { + os << indent() << "implicit val " << LOOP_VAR << ": Boolean = false;\n"; + os << indent() << "implicit val changed: AtomicBoolean = new AtomicBoolean(false);\n"; + } + os << indent() << "for (root <- t_" << decl_name(s->start_phylum) << ".nodes) {\n"; + ++nesting_level; + int i; + for (i = 0; i < start_phy_graph->instances.length; i++) { + INSTANCE* in = &start_phy_graph->instances.array[i]; + + if (!instance_is_synthesized(in)) + continue; + + string eval_name = instance_to_string_with_nodetype(s->start_phylum, &start_phy_graph->instances.array[i]); + os << indent() << "eval_" << eval_name << "(root);\n"; + } + --nesting_level; + os << indent() << "}\n"; + +#ifdef APS2SCALA + os << indent() << "super.finish();\n"; +#endif /* ! APS2SCALA */ + --nesting_level; + os << indent() << "};\n"; + + clear_implementation_marks(module_decl); + } +}; + +Super* get_module_info(Declaration m) { + return new ModuleInfo(m); +} + +void implement_function_body(Declaration f, ostream& os) { + dynamic_impl->implement_function_body(f, os); +} + +void implement_value_use(Declaration vd, ostream& os) { + int flags = Declaration_info(vd)->decl_flags; + if (flags & LOCAL_ATTRIBUTE_FLAG) { + int instance_index = Declaration_info(vd)->instance_index; + INSTANCE* instance = ¤t_aug_graph->instances.array[instance_index]; + + Type ty = value_decl_type(vd); + Declaration ctype_decl = canonical_type_decl(canonical_type(ty)); + string target_name = instance_to_string_with_nodetype(ctype_decl, instance); + + os << "eval_" << target_name << "(\n"; + int saved_nesting = nesting_level; + nesting_level = std::max(nesting_level + 2, 1); + os << indent() << "node"; + + // Find matching synth function state and use its dependencies + // for consistent parameter passing with the function signature + for (auto state_it = synth_functions_states.begin(); state_it != synth_functions_states.end(); state_it++) { + SYNTH_FUNCTION_STATE* synth_function_state = *state_it; + if (synth_function_state->fdecl_name == target_name) { + for (auto dep_it = synth_function_state->regular_dependencies.begin(); + dep_it != synth_function_state->regular_dependencies.end(); dep_it++) { + INSTANCE* source_instance = *dep_it; + if (should_skip_synth_dependency(source_instance)) { + continue; + } + os << ",\n" << indent(); + impl->dump_synth_instance(source_instance, os); + } + break; + } + } + + nesting_level = saved_nesting; + os << "\n" << indent() << ")"; + } else if (flags & ATTRIBUTE_DECL_FLAG) { + if (ATTR_DECL_IS_INH(vd)) { + os << "v_" << decl_name(vd); + } else { + os << "a" << "_" << decl_name(vd) << DEREF << "get"; + } + + // os << "a" << "_" << decl_name(vd) << DEREF << "get"; + } else if (flags & LOCAL_VALUE_FLAG) { + os << "v" << LOCAL_UNIQUE_PREFIX(vd) << "_" << decl_name(vd); + } else { + aps_error(vd, "internal_error: What is special about this?"); + } +} + +static Expression default_init(Default def) { + switch (Default_KEY(def)) { + case KEYsimple: + return simple_value(def); + case KEYcomposite: + return composite_initial(def); + default: + return 0; + } +} + +/* Return new array with instance assignments for block. + * If "from" is not NULL, then initialize the new array + * with it. + */ +static vector > make_instance_assignment() { + int n = current_aug_graph->instances.length; + + vector > from(n); + + for (int i = 0; i < n; ++i) { + INSTANCE* in = ¤t_aug_graph->instances.array[i]; + Declaration ad = in->fibered_attr.attr; + if (ad != 0 && in->fibered_attr.fiber == 0 && ABSTRACT_APS_tnode_phylum(ad) == KEYDeclaration) { + // get default! + switch (Declaration_KEY(ad)) { + case KEYattribute_decl: + from[in->index].insert(default_init(attribute_decl_default(ad))); + break; + case KEYvalue_decl: + from[in->index].insert(default_init(value_decl_default(ad))); + break; + default: + break; + } + } + } + + // start from the outer-most and override it with the most inner scope + for (auto it = current_blocks.begin(); it != current_blocks.end(); it++) { + Block block = *it; + vector > array(from); + + // Step #1 clear any existing assignments and insert normal assignments + // Step #2 insert collection assignments + int step = 1; + while (step <= 2) { + Declarations ds = block_body(block); + for (Declaration d = first_Declaration(ds); d; d = DECL_NEXT(d)) { + switch (Declaration_KEY(d)) { + case KEYnormal_assign: { + if (INSTANCE* in = Expression_info(assign_rhs(d))->value_for) { + if (in->index >= n) { + fatal_error("bad index [normal_assign] for instance"); + } + array[in->index].clear(); + if (assign_rhs(d) == NULL) { + printf("Warning: assignment to %s is empty\n", instance_to_string(in).c_str()); + } + + array[in->index].insert(assign_rhs(d)); + } + break; + } + case KEYcollect_assign: { + if (INSTANCE* in = Expression_info(assign_rhs(d))->value_for) { + if (in->index >= n) { + fatal_error("bad index [collection_assign] for instance"); + } + + if (step == 1) { + array[in->index].clear(); + } else { + array[in->index].insert(assign_rhs(d)); + } + } + break; + } + default: + break; + } + } + + step++; + } + + // and repeat if any + from = array; + } + + return from; +} + +void dump_assignment(INSTANCE* in, Expression rhs, ostream& o) { + Declaration ad = in != NULL ? in->fibered_attr.attr : NULL; + Symbol asym = ad ? def_name(declaration_def(ad)) : 0; + bool node_is_syntax = in->node == current_aug_graph->lhs_decl; + + if (in->fibered_attr.fiber != NULL) { + if (rhs == NULL) { + if (include_comments) { + o << "// " << in << "\n"; + } + return; + } + + Declaration assign = (Declaration)tnode_parent(rhs); + Expression lhs = assign_lhs(assign); + Declaration field = 0; + // dump the object containing the field + switch (Expression_KEY(lhs)) { + case KEYvalue_use: + // shared global collection + field = USE_DECL(value_use_use(lhs)); +#ifdef APS2SCALA + o << "a_" << decl_name(field) << "."; + if (debug) + o << "assign"; + else + o << "set"; + o << "(" << rhs; + if (tracking_fiber_convergence) { + o << ", changed"; + } + o << ")"; +#else /* APS2SCALA */ + o << "v_" << decl_name(field) << "="; + switch (Default_KEY(value_decl_default(field))) { + case KEYcomposite: + o << composite_combiner(value_decl_default(field)); + break; + default: + o << as_val(value_decl_type(field)) << "->v_combine"; + break; + } + o << "(v_" << decl_name(field) << "," << rhs << ");\n"; +#endif /* APS2SCALA */ + break; + case KEYfuncall: + field = field_ref_p(lhs); + if (field == 0) + fatal_error("what sort of assignment lhs: %d", tnode_line_number(assign)); + o << "a_" << decl_name(field) << DEREF; + if (debug) + o << "assign"; + else + o << "set"; + o << "(" << field_ref_object(lhs) << "," << rhs; + if (tracking_fiber_convergence) { + o << ", changed"; + } + o << ");\n"; + break; + default: + fatal_error("what sort of assignment lhs: %d", tnode_line_number(assign)); + } + return; + } + + if (in->node == 0 && ad != NULL) { + if (rhs) { + if (Declaration_info(ad)->decl_flags & LOCAL_ATTRIBUTE_FLAG) { + o << "a" << LOCAL_UNIQUE_PREFIX(ad) << "_" << asym << DEREF; + if (debug) + o << "assign"; + else + o << "set"; + o << "(anchor," << rhs << ");\n"; + } else { + int i = LOCAL_UNIQUE_PREFIX(ad); + if (i == 0) { +#ifdef APS2SCALA + if (!def_is_constant(value_decl_def(ad))) { + if (include_comments) { + o << "// v_" << asym << " is assigned/initialized by default.\n"; + } + } else { + if (include_comments) { + o << "// v_" << asym << " is initialized in module.\n"; + } + } +#else + o << "v_" << asym << " = " << rhs << ";\n"; +#endif + } else { + o << "v" << i << "_" << asym << " = " << rhs << "; // local\n"; + } + } + } else { + if (Declaration_KEY(ad) == KEYvalue_decl && !direction_is_collection(value_decl_direction(ad))) { + aps_warning(ad, "Local attribute %s is apparently undefined", decl_name(ad)); + } + if (include_comments) { + o << "// " << in << " is ready now\n"; + } + } + return; + } else if (node_is_syntax) { + if (ATTR_DECL_IS_SHARED_INFO(ad)) { + if (include_comments) { + o << "// shared info for " << decl_name(in->node) << " is ready.\n"; + } + } else if (ATTR_DECL_IS_UP_DOWN(ad)) { + if (include_comments) { + o << "// " << decl_name(in->node) << "." << decl_name(ad) << " implicit.\n"; + } + } else if (rhs) { + if (Declaration_KEY(in->node) == KEYfunction_decl) { + if (direction_is_collection(value_decl_direction(ad))) { + std::cout << "Not expecting collection here!\n"; + o << "v_" << asym << " = somehow_combine(v_" << asym << "," << rhs << ");\n"; + } else { + int i = LOCAL_UNIQUE_PREFIX(ad); + if (i == 0) + o << "v_" << asym << " = " << rhs << "; // function\n"; + else + o << "v" << i << "_" << asym << " = " << rhs << ";\n"; + } + } else { + o << "a_" << asym << DEREF; + if (debug) + o << "assign"; + else + o << "set"; + o << "(v_" << decl_name(in->node) << "," << rhs << ");\n"; + } + } else { + aps_warning(in->node, "Attribute %s.%s is apparently undefined", decl_name(in->node), + symbol_name(asym)); + + if (include_comments) { + o << "// " << in << " is ready.\n"; + } + } + return; + } else if (Declaration_KEY(in->node) == KEYvalue_decl) { + if (rhs) { + // assigning field of object + o << "a_" << asym << DEREF; + if (debug) + o << "assign"; + else + o << "set"; + o << "(v_" << decl_name(in->node) << "," << rhs << ");\n"; + } else { + if (include_comments) { + o << "// " << in << " is ready now.\n"; + } + } + return; + } +} + +void dump_rhs_instance_helper(AUG_GRAPH* aug_graph, BlockItem* item, INSTANCE* instance, ostream& o) { + if (item == NULL) { + if (include_comments) { + o << "// " << instance << " is ready now.\n"; + } + return; + } + + if (item->key == KEY_BLOCK_ITEM_INSTANCE) { + struct block_item_instance* bi = (struct block_item_instance*)item; + vector > all_assignments = make_instance_assignment(); + std::set relevant_assignments = all_assignments[instance->index]; + bool any_assignment_dump = false; + + if (bi->instance == instance || bi->next == NULL) { + if (!relevant_assignments.empty()) { + for (auto it = relevant_assignments.begin(); it != relevant_assignments.end(); it++) { + Expression rhs = *it; + if (rhs == NULL) { + continue; + } + + any_assignment_dump = true; + + if (instance->fibered_attr.fiber != NULL) { + dump_assignment(instance, rhs, o); + } else { + // just dump RHS since synth functions are only interested in RHS, not side-effect + dump_Expression(rhs, o); + } + } + + if (!any_assignment_dump) { + fatal_error("should have dumped an assignment here"); + } + + return; + } + + if (instance->fibered_attr.fiber != NULL) { + // Shared info attribute wasn't assigned in this block, dump its default + auto direction = fibered_attr_direction(&instance->fibered_attr); + auto directionStr = ""; + switch (direction) + { + case instance_inward: + directionStr = "instance_inward"; + break; + case instance_outward: + directionStr = "instance_outward"; + break; + case instance_local: + directionStr = "instance_local"; + break; + default: + break; + } + + o << "/* did not find any assignment for this fiber attribute " << instance << " -> " << directionStr << " <-" <<" */"; + return; + } else { + print_instance(instance, stdout); + printf(" is a non-fiber instance, but no assignment found in this block. %d\n", if_rule_p(instance->fibered_attr.attr)); + fatal_error("crashed since non-fiber instance is missing an assignment"); + } + } else { + dump_rhs_instance_helper(aug_graph, bi->next, instance, o); + return; + } + } else if (item->key == KEY_BLOCK_ITEM_CONDITION) { + struct block_item_condition* cond = (struct block_item_condition*)item; + bool visited_if_stmt = std::find(dumped_conditional_block_items.begin(), dumped_conditional_block_items.end(), item) != dumped_conditional_block_items.end(); + dumped_conditional_block_items.push_back(item); + + switch (ABSTRACT_APS_tnode_phylum(cond->condition)) + { + case KEYDeclaration: + { + Declaration if_stmt = (Declaration)cond->condition; + if (Declaration_KEY(if_stmt) != KEYif_stmt) { + fatal_error("expected if statement, got %s %d", decl_name(if_stmt), Declaration_info(if_stmt)); + } + + if (!edgeset_kind(current_aug_graph->graph[cond->instance->index * current_aug_graph->instances.length + instance->index])) { + printf("\n"); + print_instance(cond->instance, stdout); + printf(" does not affect "); + print_instance(instance, stdout); + printf("\n"); + fatal_error("crashed since instance not affected by condition"); + } + + if (!visited_if_stmt) { + o << "if ("; + dump_Expression(if_stmt_cond(if_stmt), o); + o << ") {\n"; + nesting_level++; + } + current_blocks.push_back(if_stmt_if_true(if_stmt)); + if (!visited_if_stmt) { + o << indent(); + } + + vector dumped_instanced_positive(dumped_instances); + dump_rhs_instance_helper(aug_graph, cond->next_positive, instance, o); + dumped_instances = dumped_instanced_positive; + + if (!visited_if_stmt) { + current_blocks.pop_back(); + o << "\n"; + nesting_level--; + o << indent() << "} else {\n"; + nesting_level++; + } + current_blocks.push_back(if_stmt_if_false(if_stmt)); + if (!visited_if_stmt) { + o << indent(); + } + + vector dumped_instanced_negative(dumped_instances); + dump_rhs_instance_helper(aug_graph, cond->next_negative, instance, o); + dumped_instances = dumped_instanced_negative; + + current_blocks.pop_back(); + if (!visited_if_stmt) { + nesting_level--; + o << "\n"; + o << indent() << "}"; + } + break; + } + case KEYMatch: + { + Match m = (Match)cond->condition; + Pattern p = matcher_pat(m); + Declaration header = Match_info(m)->header; + // if first match in case, we evaluate variable: + if (m == first_Match(case_stmt_matchers(header))) { + Expression e = case_stmt_expr(header); +#ifdef APS2SCALA + // Type ty = infer_expr_type(e); + o << "{\n"; + nesting_level++; + o << indent() << "val node" << instance->index << " = " << e << ";\n"; +#else /* APS2SCALA */ + Type ty = infer_expr_type(e); + o << indent() << ty << " node" << instance->index << " = " << e << ";\n"; +#endif /* APS2SCALA */ + } +#ifdef APS2SCALA + o << indent() << "node" << instance->index << " match {\n"; + nesting_level++; + o << indent() << "case " << p << " => {\n"; +#else /* APS2SCALA */ + o << indent() << "if ("; + dump_Pattern_cond(p, "node" + std::to_string(instance->index), o); + o << ") {\n"; +#endif /* APS2SCALA */ + nesting_level += 1; +#ifndef APS2SCALA + dump_Pattern_bindings(p, o); +#endif /* APS2SCALA */ + Block if_true; + Block if_false; + if_true = matcher_body(m); + if (MATCH_NEXT(m)) { + if_false = 0; //? Why not the nxt match ? + } else { + if_false = case_stmt_default(header); + } + + current_blocks.push_back(if_true); + o << indent(); + dump_rhs_instance_helper(aug_graph, cond->next_positive, instance, o); + o << "\n"; + current_blocks.pop_back(); + +#ifdef APS2SCALA + nesting_level--; + o << indent() << "}\n"; + o << indent() << "case _ => {\n"; + nesting_level++; +#else /* APS2SCALA */ + o << "} else {\n"; +#endif /* APS2SCALA */ + current_blocks.push_back(if_false); + o << indent(); + dump_rhs_instance_helper(aug_graph, cond->next_negative, instance, o); + o << "\n"; + current_blocks.pop_back(); + + nesting_level--; +#ifdef APS2SCALA + o << indent() << "}\n"; + nesting_level--; + o << indent() << "}\n"; + if (m == first_Match(case_stmt_matchers(header))) { + nesting_level--; + o << indent() << "}"; + } +#else /* APS2SCALA */ + o << indent() << "}\n"; +#endif /* APS2SCALA */ + + break; + } + default: + fatal_error("unhandled if statement type"); + break; + } + } +} + +virtual void dump_synth_instance(INSTANCE* instance, ostream& o) override { + bool already_dumped = false; + if (std::find(dumped_instances.begin(), dumped_instances.end(), instance) != dumped_instances.end()) { + already_dumped = true; + } else { + dumped_instances.push_back(instance); + } + + AUG_GRAPH* aug_graph = current_aug_graph; + BlockItem* block = find_surrounding_block(current_scope_block, instance); + + Declaration node = instance->node; + bool is_parent_instance = current_aug_graph->lhs_decl == instance->node; + + bool is_synthesized = instance_is_synthesized(instance); + bool is_inherited = instance_is_inherited(instance); + bool is_circular = edgeset_kind(current_aug_graph->graph[instance->index * current_aug_graph->instances.length + instance->index]); + bool is_match_formal = check_is_match_formal(instance->fibered_attr.attr); + bool is_available = is_match_formal || is_inherited; + + if (is_circular && already_dumped && !is_available) { + o << "/* circular dependency detected for " << instance << ", dumping as attribute access */ "; + + o << instance_to_attr(instance) << ".get("; + if (instance->node == NULL) { + o << "node"; + } else { + o << "v_" << decl_name(instance->node); + } + + o << ")"; + return; + } else if (is_match_formal) { + o << "v_" << instance_to_string(instance, false, current_synth_functions_state->is_phylum_instance); + } else if (is_inherited) { + if (is_parent_instance) { + o << "v_" << instance_to_string(instance, false, current_synth_functions_state->is_phylum_instance); + } else { + // we need to find the assignment and dump the RHS recursive call + dump_rhs_instance_helper(aug_graph, block, instance, o); + } + } else if (is_synthesized) { + if (is_parent_instance) { + dump_rhs_instance_helper(aug_graph, block, instance, o); + } else { + for (auto it = synth_functions_states.begin(); it != synth_functions_states.end(); it++) { + SYNTH_FUNCTION_STATE* synth_function_state = *it; + if (fibered_attr_equal(&synth_function_state->source->fibered_attr, &instance->fibered_attr)) { + o << "eval_" << synth_function_state->fdecl_name << "(\n"; + int saved_nesting = nesting_level; + nesting_level = std::max(nesting_level + 2, 2); + o << indent() << "v_" << decl_name(node); + + std::vector dependencies = synth_function_state->regular_dependencies; + for (auto it = dependencies.begin(); it != dependencies.end(); it++) { + INSTANCE* source_instance = *it; + + if (should_skip_synth_dependency(source_instance)) { + continue; + } + + for (int i = 0; i < current_aug_graph->instances.length; i++) { + INSTANCE* in = ¤t_aug_graph->instances.array[i]; + if (in->node == node && fibered_attr_equal(&in->fibered_attr, &source_instance->fibered_attr)) { + o << ",\n" << indent(); + dump_synth_instance(in, o); + } + } + } + nesting_level = saved_nesting; + + o << "\n" << indent() << ")"; + return; + } + } + + printf("failed to find synth function for instance "); + print_instance(instance, stdout); + printf("\n"); + fatal_error("internal error: failed to find synth function for instance"); + } + } else { + dump_rhs_instance_helper(aug_graph, block, instance, o); + } +} +} +; + +Implementation* synth_impl = new SynthScc(); diff --git a/cs854.Dockerfile b/cs854.Dockerfile new file mode 100644 index 00000000..32773c4e --- /dev/null +++ b/cs854.Dockerfile @@ -0,0 +1,16 @@ +FROM boylanduwm/cool-compiler:latest + +WORKDIR /usr/stage/aps + +COPY . . + +RUN apk add --no-cache flex +RUN make && \ + make install && \ + cd base/scala && \ + make aps-library.jar && \ + make install +RUN cp /usr/stage/aps/bin/* /usr/local/bin && \ + cp /usr/stage/aps/lib/* /usr/local/lib + +WORKDIR /root diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 00000000..3eec47da --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,3 @@ +*.aux +*.log +*.pdf diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 00000000..ef365dfb --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,4 @@ +all : quasi.pdf + +%.pdf : %.tex + pdflatex $< diff --git a/doc/quasi.tex b/doc/quasi.tex new file mode 100644 index 00000000..6db16f1e --- /dev/null +++ b/doc/quasi.tex @@ -0,0 +1,568 @@ +\documentclass[12pt]{article} + +\usepackage{fullpage} +\usepackage{pict2e} + +\usepackage{alltt} +\usepackage{amssymb} +\usepackage{amsthm} + +\usepackage{hyperref} + +\def\implies{\Rightarrow} + +\theoremstyle{definition} +\newtheorem{definition}{Definition}[section] + +\theoremstyle{theorem} +\newtheorem{consequence}{Consequence}[section] +\newtheorem{theorem}{Theorem}[section] + +\def\aset#1{\left\{{#1}\right\}} + +\unitlength 1mm + +\begin{document} + +\title{Quasi Ordering for Dependency Scheduling in the Presence of Cycles} +\author{John Boyland and Amir Hesamian} +\date{DRAFT \today} + +\maketitle + +\begin{abstract} + Unlike many graphs, a dependency graph may have loops (edges from a + vertex to itself). The presence of a loop makes a difference to the + graph, since it represents a self-dependency. Thus representing + (the transitive closure of) a dependency graph as a partial order + over strongly connected components loses some information: are the + single-element components involved in a self-dependency or not? + This information is needed for an evaluation (scheduling) + mechanism. We propose a new algebraic structure: the \emph{quasi + order}, for which we propose partial and total versions. Any + transitive dependency relation can be represented as a quasi partial + order, and any schedule of such a relation can be represented as a \emph{quasi total order}. +\end{abstract} + +\section{Motivation} + +Consider the following set of equations that should be mutually +satisfied, and its dependency graph shown on the right: +\begin{quote} + \begin{minipage}{2.75in} +\begin{verbatim} +A = m() +B = n(A) +C = p(A,F) +D = q(A,B,C) +E = r(B,D,E) +F = s(D) +G = t(D,F) +\end{verbatim} + \end{minipage}\hfill + \begin{minipage}{2.75in} + \begin{picture}(40,44)(0,-4) + \put(10,40){\makebox(0,0){A}} + \put(30,40){\makebox(0,0){B}} + \put(0,20){\makebox(0,0){C}} + \put(20,20){\makebox(0,0){D}} + \put(40,20){\makebox(0,0){E}} + \put(10,0){\makebox(0,0){F}} + \put(30,0){\makebox(0,0){G}} + \put(9,38){\vector(-1,-2){8}} + \put(11,38){\vector(1,-2){8}} + \put(12,40){\vector(1,0){16}} + \put(29,38){\vector(-1,-2){8}} + \put(31,38){\vector(1,-2){8}} + \put(2,20){\vector(1,0){16}} + \put(22,20){\vector(1,0){16}} + \put(19,18){\vector(-1,-2){8}} + \put(21,18){\vector(1,-2){8}} + \put(9,2){\vector(-1,2){8}} + \put(12,0){\vector(1,0){16}} + \qbezier(39,18)(35,10)(40,10) + \qbezier(40,10)(45,10)(41,18) + \put(41,18){\vector(-1,2){0}} + \end{picture} + \end{minipage} +\end{quote} +From the dependency graph and to a lesser extent from the original +equations, it is clear that ``E'' depends upon itself. There is also +a cyclic dependency between ``C,'' ``D'' and ``F.'' Thus +topologically sorting the strongly connected components +and then evaluating them in this order will not +suffice to provide a solution. No solution may exist. But if the +values of the variables involved in cycles (\(\aset{\textrm{C}, + \textrm{D}, \textrm{E}, \textrm{F}}\)) are drawn from domains ordered +in ``partial +orders'' and the +function ``p'' is ``monotone'' in its second argument, `q'' is monotone in +its third argument, ``r'' in its third argument and ``s'' in its +(only) argument, and if the respective domains have distinguish +``bottom'' values and satisfy the +``ascending chain'' condition, then the values can be computed +according to the following schedule: +\begin{quote} + \def\{{\char123} + \def\}{\char125} +\begin{alltt} +A := m(); +B := n(A); +C' := \(\bot\); D' := \(\bot\); F' := \(\bot\); +do \{ C := C'; D := D'; F := F'; + C' := p(A,F); + D' := q(A,B,C); + F' := s(D); +\} while (C \(\neq\) C' \(\vee\) D \(\neq\) D' \(\vee\) F \(\neq\) F'); +E' := \(\bot\); +do \{ E := E'; + E' := r(E); +\} while (E \(\neq\) E') +G := t(D,F); +\end{alltt} +\end{quote} +In the evaluation, whenever we have a variable involve in a cyclic +dependency, we evaluate repeatedly from the ``bottom'' value until a +fixed point is reached. For mutually dependent cyclic dependencies, +all variables are evaluated together in a group (as with ``C,'' ``D'' +and ``F''). Monotonicity ensures that the evaluation will not +oscillate between values, and the ascending chain condition ensures that +the process will eventually terminate. + +The variables not involved in cyclic dependencies (e.g., ``A,'' ``B'' +and ``G'') are \emph{not} evaluated in a loop, and indeed it would +waste evaluation time to do so. + +The conventional way to handle possibly cyclic dependencies is to use +a strongly-connected subcomponents analysis of the dependency graph. +This analysis does not however distinguish the case of ``E'' (a vertex +that depends on itself, but is not involved in a cycle with any other +vertex) from ``B'' (a vertex that is involved in no cycles at all). + +This paper provides a foundation for a practice that distinguishes the +case of non-cyclic vertices from vertices involved cyclicly only with +itself. +It defines a ``quasi order'' to cover the transitive closure of a +dependency graph and a ``quasi total order'' to handle the linearization +of a quasi order. + +\section{Example} + +If we perform transitive closure on the dependency graph (repeated +for convenience on the left), and then display the resulting relation +in transitively reduced form, we have the relation on the right: +\begin{quote} +\begin{picture}(140,64)(0,-20) + \put(10,40){\makebox(0,0){A}} + \put(30,40){\makebox(0,0){B}} + \put(0,20){\makebox(0,0){C}} + \put(20,20){\makebox(0,0){D}} + \put(40,20){\makebox(0,0){E}} + \put(10,0){\makebox(0,0){F}} + \put(30,0){\makebox(0,0){G}} + \put(9,38){\vector(-1,-2){8}} + \put(11,38){\vector(1,-2){8}} + \put(12,40){\vector(1,0){16}} + \put(29,38){\vector(-1,-2){8}} + \put(31,38){\vector(1,-2){8}} + \put(2,20){\vector(1,0){16}} + \put(22,20){\vector(1,0){16}} + \put(19,18){\vector(-1,-2){8}} + \put(21,18){\vector(1,-2){8}} + \put(9,2){\vector(-1,2){8}} + \put(12,0){\vector(1,0){16}} + \qbezier(39,18)(35,10)(40,10) + \qbezier(40,10)(45,10)(41,18) + \put(41,18){\vector(-1,2){0}} + % + \put(80,20){\makebox(0,0){A}} + \put(100,20){\makebox(0,0){B}} + \put(120,20){\makebox(0,0){\{C,D,F\}}} + \put(140,40){\makebox(0,0){\{E\}}} + \put(140,0){\makebox(0,0){G}} + \put(82,20){\vector(1,0){16}} + \put(102,20){\vector(1,0){10}} + \put(124,24){\vector(1,1){13}} + \put(124,16){\vector(1,-1){14}} +\end{picture} +\end{quote} +The right-hand side is a picture of a ``quasi order.'' + +While any transitive relation is a quasi order, +the pictures shows our purpose: cyclic-dependent vertices are grouped +in sets, while vertices not so involved are not placed in sets. As we +prove in the formal part of this paper, a quasi order is isomorphic +to a partial order over the union of the non-cyclic vertices with the +set of strongly connected sets of cyclic vertices. + +It is well known (order-extension theorem) that any partial order can +be linearized into a total order.\footnote{% + The proof of this result depends on the axiom of choice, and so may + not hold for certain infinite partial orders, but in this paper we + are only interested in finite orders.} +The same concept applies for quasi orders except that we do not want +the linearization to combine vertices together that were not already +combined. Rather we end up with a quasi order such as the following: +\begin{quote} + \begin{picture}(80,10)(40,-25) + \put(40,-20){\makebox(0,0){A}} + \put(60,-20){\makebox(0,0){B}} + \put(80,-20){\makebox(0,0){\{C,D,F\}}} + \put(100,-20){\makebox(0,0){\{E\}}} + \put(120,-20){\makebox(0,0){G}} + \put(42,-20){\vector(1,0){16}} + \put(62,-20){\vector(1,0){10}} + \put(88,-20){\vector(1,0){8}} + \put(104,-20){\vector(1,0){14}} + \end{picture} +\end{quote} +Such a quasi order is called a ``quasi total order.'' As with partial +orders, there may be multiple linearizations. The other possible +linearization is: +\begin{quote} + \begin{picture}(80,10)(40,-25) + \put(40,-20){\makebox(0,0){A}} + \put(60,-20){\makebox(0,0){B}} + \put(80,-20){\makebox(0,0){\{C,D,F\}}} + \put(100,-20){\makebox(0,0){G}} + \put(120,-20){\makebox(0,0){\{E\}}} + \put(42,-20){\vector(1,0){16}} + \put(62,-20){\vector(1,0){10}} + \put(88,-20){\vector(1,0){10}} + \put(102,-20){\vector(1,0){14}} + \end{picture} +\end{quote} +But we do not want to change the cyclic nature of the original quasi +order, and thus our linearization will not produce a quasi total order +such as the following: +\begin{quote} + \begin{picture}(80,10)(40,-25) + \put(40,-20){\makebox(0,0){A}} + \put(60,-20){\makebox(0,0){B}} + \put(80,-20){\makebox(0,0){\{C,D,F\}}} + \put(100,-20){\makebox(0,0){\{E,G\}}} + \put(42,-20){\vector(1,0){16}} + \put(62,-20){\vector(1,0){10}} + \put(88,-20){\vector(1,0){6}} + \end{picture} +\end{quote} + + +The following section provides a solid foundation to the intuitive +concepts described so far. + +\section{Formalities} + +\subsection{Preliminaries} + +First, let's recall some basic definitions: +\begin{itemize} +\item A (binary) relation $R$ over a set $S$ \(\left(R=(\sqsubseteq,S)\right)\) is + \begin{itemize} + \item \emph{reflexive} if \(\forall_{x\in S}\: x \sqsubseteq x\) + \item \emph{irreflexive} if \(\forall_{x\in S}\: x \not\sqsubseteq x\) + \item \emph{symmetric} if \(\forall_{x,y \in S}\: x \sqsubseteq y \implies y \sqsubseteq x \). + \item \emph{anti-symmetric} if \(\forall_{x,y \in S}\: x \sqsubseteq y \wedge y \sqsubseteq x \implies x = y\). + \item \emph{total} \(\forall_{x \neq y \in S}\: x \sqsubseteq y \vee y \sqsubseteq x\). + \item \emph{transitive} if \( \forall_{x,y,z \in S}\: x \sqsubseteq y \wedge y \sqsubseteq z \implies x \sqsubseteq z\). + \end{itemize} +\item A relation $R$ is a (strict) \emph{partial order} if it is both irreflexive and transitive. + Note, as a consequence, a partial order is anti-symmetric, indeed has no symmetries at all. + \item A relation $R$ is a \emph{preorder} if it is both reflexive and transitive. + \item A relation $R$ is a \emph{total order} if it is a partial order and additionally total. Analogously, a \emph{total preorder} is a preorder that is total. + \item We define $R_1 \subseteq R_2$ for \( R_i = (\sqsubseteq_i,S_i)\) as + \[ + \forall_{a,b \in S_1} a \sqsubseteq_1 b \implies \left( a,b \in S_2 \wedge + a \sqsubseteq_2 b \right) + \] + \item We define the union of a family of relations: \( R = \bigcup_i R_i + \) where \( R_i = (\sqsubseteq_i, S_i) \) as \( (\sqsubseteq,S) \) + where + \begin{eqnarray*} + S &=& \bigcup_i S_i \\ + a \sqsubseteq b &\textrm{iff}& \exists_i\: a,b \in S_i \wedge a + \sqsubseteq_i b + \end{eqnarray*} + \item We define the restriction of a relation + \(R=(\sqsubseteq,S)\) to a subset $S'\subseteq S'$ (written + $R\mid_{S'}$) as \(R' = + (\sqsubseteq',S')\) where + \[ + a \sqsubseteq' b \textrm{ if and only if } a,b \in S' \wedge a + \sqsubseteq b + \] +\end{itemize} + +\begin{definition} + A \emph{transitive closure} of a relation $R$ over a set $S$ is a + smallest transitive relation $R'$ over the same set $S$ such that + $R \subseteq R'$. +\end{definition} +\begin{theorem} + A transitive closure is always defined and is unique. Therefore, we + speak of \emph{the} transitive closure of a relation. +\end{theorem} + +We will use a variety of infix operators (e.g. \( <, \leq, \sim, \lesssim \)) as relations. + +\subsection{Definitions} + +\def\SimHat#1{\stackrel{\sim}{#1}} +\def\NotSimHat#1{\stackrel{\not\sim}{#1}} + +\begin{definition} + A \emph{quasi (partial) order} is any relation $Q=(\lesssim,S)$ that is transitive. + A \emph{quasi total order} is a quasi order that is total. + Respective to the quasi order $Q$, we write \( x \sim y \) if and only if \( x \lesssim x \wedge y \lesssim x \), we write \( x \lnsim y \) if and only if \( x \lesssim y \wedge y \not\lesssim x \). We define: + \[ \SimHat{S} = \aset{x \mid x \sim x} \] + \[ \NotSimHat{S} = \aset{x \mid x \not\sim x } \] +\end{definition} +\begin{theorem} + For a quasi order $Q=(\lesssim,S)$, \((\sim,\SimHat{S})\) is an equivalence relation and + \((\lesssim,\NotSimHat{S})\) is a (strict) partial order, and total + if $Q$ is total. +\end{theorem} +\begin{proof} + + \verb| | + + \begin{enumerate} + \item + From its definition, $\sim$ is symmetric. It is transitive since if + \( x \sim y \wedge y \sim z \), we have \( x \lesssim y \wedge y + \lesssim x \wedge y \lesssim z \wedge z \lesssim y \), from which + by transitivity of $\lesssim$, we have \( x \lesssim z \wedge z + \lesssim x \) from which \( x \sim z \). Finally, $\sim$ is + reflexive over \(\SimHat{S}\) by definition. + \item + By definition $\lesssim$ is transitive and it is irreflexive over + $\NotSimHat{S}$ by definition of that set. Thus it is a partial + order over $\NotSimHat{S}$. Furthermore, if $Q$ is total, then + so is \((\lesssim,\NotSimHat{S})\). + \end{enumerate} +\end{proof} + +Total orders are incompatible: they serve as the bottom (incomparable) +elements in a (semi-)lattice of partial orders. But every partial order is a +quasi order, and when we form the lattice of quasi orders, we can +combine total orders into quasi-orders. The lattice has a proper +bottom element: the ``complete'' binary relation which is a quasi +total order. The following result demonstrates the ability of quasi +total orders to encompass multiple total orders: + +\begin{theorem} + The transitive closure of a non-empty union of total orders over the + same set $S$ is a + quasi total order over $S$. +\end{theorem} +\begin{proof} + Let \( Q \) be the transitive closure of \(R = \bigcup_i R_i\). Clearly + $Q$ is transitive (by definition of transitive closure). And it is + total since all two elements of $S$ are related in every one of the + (non-zero) total orders. Thus it is a quasi total order. +\end{proof} + +\subsection{The Cycle-Free Representation} + +It is convenient to distill a quasi order down to a partial order +using equivalence classes. This construction is similar to the +strongly-connected component representation of a preorder, but +distinguishes elements that have self-edges from those that do not. + +\begin{definition} + The \emph{cycle-free} representation of a quasi-order + $Q=(\lesssim,S)$ (written $[Q]$) is the relation \( [Q] = \left(<, + \NotSimHat{S} \cup \SimHat{S}\!\!/{\mathord\sim}\right) \) over the irreflexive + subset of the base set together with the quotient set of the + reflexive set where + \begin{eqnarray*} + x < y &\textrm{iff}& x \lnsim y \\ + {[\aset{u,\ldots}]} < y &\textrm{iff}& u \lnsim y \\ + x < [\aset{v,\ldots}] & \textrm{iff}& x \lnsim v \\ + {[\aset{u,\ldots}]} < [\aset{v,\ldots}] & \textrm{iff} & u \lnsim + v + \end{eqnarray*} +\end{definition} +\begin{theorem} + The cycle-free representation of $Q$ is well-defined, unique and is a + partial order, and additionally is total if and only if $Q$ is total. +\end{theorem} +\begin{proof} + We prove the four claims sequentially: + \begin{itemize} + \item $[Q]$ is well defined.\par + To be well defined, we need the representation element used to + define $<$ to be unimportant: all members of the set yield the + same result. In particular, we need that for any element $x \in + S$ and any equivalence class + $E \in {\SimHat{S}\!\!/{\mathord\sim}}$, then $\exists v\in E: x \lnsim v$ + then $\forall v \in E: x \lnsim v$ and also the reverse property + $\exists u\in E: u \lnsim x$ implies + $\forall u\in E: u \lnsim x$. + + To prove the first, let $x \in S$ and $v \in E$ where + $x \lnsim v$. Suppose we have an arbitrary $v' \sim v$, then + $v \lesssim v'$ and thus by transitivity, $x \lesssim v'$. + Suppose, contrary-wise, $v' \lesssim x$, then by equivalence and + transitivity, we have $v \lesssim x$ which contradicts our + assumption. Therefore $x \lnsim v'$. + + The reverse property is proved analogously. + + \item $[Q]$ is unique. In other words, + if $[Q] = [Q']$ then $Q = Q'$. + + Suppose $[Q] = [Q']$ for quasi orders $Q$ and $Q'$, + and further suppose $x \lesssim y$. We will prove that + $x \lesssim' y$ and thus since no generality was lost, we have $Q = Q'$. + + If $x \sim y$, then $x,y \in E$ where $E$ is an equivalence class. + This equivalence class is an element of $[Q]$ and thus must also be + in $[Q']$ and thus $x \sim' y$ and the result is proved. + + If on the other hand, we have $x \lnsim y$, then consider whether + each is in $\SimHat{S}$ (for $Q$). Suppose $x \in E_1$, + $y \in E_2$ equivalence classes of $\sim$. Then these equivalence + classes are in $[Q]$ with $E_1 < E_2$, and thus in $[Q']$ (with + $E_1 <' E_2$) and we have (by our previous result) that + $x \lnsim' y$. Then if $x \in \NotSimHat{S}$ and $y \in E$, we + have $x < E$ and thus $x <' E$ and thus $x \lnsim' y$. The other + cases are analogous. + + \item $[Q]$ is a partial order. + + $[Q]$ must be irreflexive. Otherwise we would have $x \lnsim x$ + for some $x \in S$ which is a contradiction. + + Furthermore $[Q]$ is transitive, since $\lnsim$ is transitive. + + \item $[Q]$ is total if and only if $Q$ is total. + \begin{description} + \item[if] Suppose $Q$ is total. Then consider two elements of + the base set of $[Q]$. + + If we have two equivalence classes, then if they are the same, + we have nothing to prove for totality. If they are different, + then by the totality of $Q$, for some representatives $x$ and + $y$ of the respective equivalence classes, + we must have $x \lesssim y$ or $y \lesssim x$. We cannot have + both because otherwise we would have $x \sim y$ and they would + be in the same equivalence class. Thus we have $x \lnsim y$ or + $y \lnsim x$ which shows that the two equivalence classes are + related in $[Q]$. + + If we have an element $x \in \NotSimHat{S}$ and an equivalence + class $E$, then for a representative $y \in E$, we can make the + same argument as above to show we have totality. + + Finally if we are considering \( x \neq y\) both in \( + \NotSimHat{S} \), then + we can again make the same argument. + + \item[only if] Suppose $[Q]$ is total, then consider $x\neq y$. + + Suppose $x$ and $y$ are in equivalence classes $E_1$ and $E_2$ + respectively. If $E_1 = E_2$, then $x \lesssim y$ + (and $y \lesssim x$). Otherwise, since $[Q]$ is total, we must + have either $E_1 < E_2$ or $E_2 < E_1$. In the first case, this + means $x \lnsim y$, and in the second case $y \lnsim x$. + In either case, $x$ + and $y$ are related. + + Suppose $x$ is in an equivalence class $E$ and $y$ is not. + Then by the totality of $[Q]$, either $E < y$ or + $y < E$. By the definition of $<$, this means either $x \lnsim + y$ or \( y \lnsim x\). + + The case of $x$ not being in an equivalence class while $y$ + is in an equivalence class $E$ is analogously proved. + + Finally suppose that neither $x$ nor $y$ is in an equivalence + class. Then by totality of $[Q]$, they must be related by $Q$. + \end{description} + \end{itemize} +\end{proof} + +Indeed the construction can be carried out in the other direction too. +To do so, we define an ungainly term for the result: +\begin{definition} + For a set $S$, a \emph{semi partition partial order} is a partial + order over a set \(S^{\sharp} = S_0 \cup \aset{S_1,\ldots}\), where \(S = + S_0 \cup S_1 \cup \ldots \) ($S_i\neq \emptyset$ for all + positive $i$) is a partition of $S$ where only the first subset can + be empty. The elements of + \(S^{\sharp}\) are the elements of $S_0$ (which may be empty) plus + the remaining sets of the partition (which may have no further sets). +\end{definition} +\begin{theorem} + For any set S, there is a bijection from the set of quasi-orders of + S to the set of semi partition partial orders. +\end{theorem} +\begin{proof} + For a partial order $Q=(\lesssim,S)$, $[Q]$ is a semi partition + partial order for $S$ where the partition is $\NotSimHat{S}$ plus + the equivalence classes of $\SimHat{S}$. And we have shown that the + construction is unique (one-to-one). It remains to show that is + ``onto'' (surjective). + + Let $P=(<,S^{\sharp})$ be a semi partition partial order for + $S = S_0 \cup S_1 \cup \ldots$ . We + construct a quasi order $Q=(\lesssim,S)$ such that $P = [Q]$. + The relation $\lesssim$ is defined as follows: + \[ + x \lesssim y \textrm{ iff } + \left\{ + \begin{array}{cl} + \textrm{true}& \textrm{if } x,y \in S_i, i > 0 \\ + S_i < S_j & \textrm{if } x \in S_i, i > 0, y \in S_j, i \neq j > 0 \\ + x < S_j & \textrm{if } x \in S_0, y \in S_j, j > 0 \\ + S_i < y & \textrm{if } x \in S_i, i > 0, y \in S_0 \\ + x < y & \textrm{if } x, y \in S_0 + \end{array}\right. + \] + The transitivity of $Q$ follows from the transitivity of $P$, and + clearly $[Q] = P$. +\end{proof} + +\subsection{Linearizing a Quasi-Total Order} + +As a consequence of this construction, we can linearize a quasi order +to a quasi total order without disturbing the equivalence classes: we +convert to the cycle-free representation, topologically sort the strongly-connect that and +then convert back to a quasi (now total) order. + +We can actually do this directly using a minor variant of Tarjan's +strongly-connected components algorithm\footnote{% +\url{https://en.wikipedia.org/wiki/Tarjan\%27s_strongly_connected_components_algorithm} +} +on the original +dependency relation (which $Q$ would be the transitive closure of). +The variation is that in the original algorithm loops are effectively +ignored, but in the variant, we record the existence of a loop. +Tarjan's algorithm produces the topological sorting of +the strongly-connected components in reverse order; the variant +produces the semi-partition total order by just returning an +unpartitioned vertex if its SCC consists of only itself and it has no +loop (self edge), and otherwise returning a partition (set of +vertices) as in the original algorithm. + +\section{Conclusion} + +The concept of a ``quasi order'' described here provides a variation +on partial orders useful for handling possible cyclic dependencies. +Unlike partial orders, it can handle cyclic dependencies, and unlike +preorders, it can distinguish elements that depend on themselves from +those that do not. + +A ``quasi-total order'' similarly usefully characterizes a schedule +that may distinguish segments that require repetition from those that do +not. + +\end{document} + +% LocalWords: maketitle emph qbezier Boyland Amir Hesamian preorder +% LocalWords: Monotonicity irreflexive iff bijection surjective +% LocalWords: linearize hfill alltt subseteq +% LocalWords: sqsubseteq eqnarray sqsubseteq textrm +% LocalWords: stackrel lesssim lesssim lnsim ldots forall neq +% LocalWords: emptyset diff --git a/examples/FJ.aps b/examples/FJ.aps new file mode 100644 index 00000000..825150ed --- /dev/null +++ b/examples/FJ.aps @@ -0,0 +1,40 @@ +-- An abstract syntax tree definition for Featherweight Java +-- (See Pierce et al, 2002) +-- John Boyland +-- March 2002 + +with "symbol"; +module FJ[] begin + + phylum Program; + phylum Classes := SEQUENCE[Class]; + phylum Class; + phylum Features := SEQUENCE[Feature]; + phylum Feature; + phylum Type; + phylum Formals := SEQUENCE[Formal]; + phylum Formal; + phylum Expression; + phylum Actuals := SEQUENCE[Expression]; + + constructor program(classes : Classes) : Program; + + constructor class_decl(name : Symbol; superclass : Symbol; + contents : Features); + + -- no constructor constructor, since in FJ, it is sugar + + constructor field(name : Symbol; (type) : Type) : Feature; + constructor method(name : Symbol; formals : Formals; return_type : Type; + body : Expression) : Feature; + + constructor class_type(name : Symbol) : Type; + + constructor formal(name : Symbol; (type) : Type) : Formal; + + constructor new_expr((type) : Type; actuals : Actuals) : Expression; + constructor call(receiver : Expression; name : Symbol; + actuals : Actuals) : Expression; + constructor select(object : Expression; name : Symbol); + constructor this_expr() : Expression; +end; diff --git a/examples/Makefile b/examples/Makefile new file mode 100644 index 00000000..ed357827 --- /dev/null +++ b/examples/Makefile @@ -0,0 +1,27 @@ +DEBUG_FLAGS = -DCOTo +APSSCHED=../bin/apssched +APS2SCALA=../bin/aps2scala +BASE=.:../base + +default: + @echo "Try 'make simple-oag.sched' to schedule AG in simple-oag" + +%.sched : %.aps + ${APSSCHED} ${DEBUG_FLAGS} -p ${BASE} $* + +%.scc.sched : %.aps + ${APSSCHED} -C ${DEBUG_FLAGS} -p ${BASE} $* + +%.scc.scala : %.aps + ${APS2SCALA} -C ${DEBUG_FLAGS} -p ${BASE} $* + +%.synth.scala : %.aps + ${APS2SCALA} -F ${DEBUG_FLAGS} -p ${BASE} $* + +%.scala : %.aps + ${APS2SCALA} ${DEBUG_FLAGS} -p ${BASE} $* + +%.debug: %.aps + gdb --args "${APSSCHED}" "${DEBUG_FLAGS}" "-p" "${BASE}" "$*" + +.PHONY: default %.sched diff --git a/examples/balanced-binding.aps b/examples/balanced-binding.aps new file mode 100644 index 00000000..8c0f4d3b --- /dev/null +++ b/examples/balanced-binding.aps @@ -0,0 +1,169 @@ +with "balanced"; +-- Example from fiber paper. +module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin + + --- the local object types and their + --- "fields" (attributes in APS). + + -- contours: + phylum Contour; + constructor contour() : Contour; + type Scope := remote Contour; + attribute Contour.enclosing : Scope; -- normally would be a constructor field + type Entities := BAG[EntityRef]; + collection attribute Contour.entities : Entities; + + pragma fiber_cyclic(enclosing); + pragma field_strict(enclosing); + + -- entities: AKA declarations + phylum Entity; + type EntityRef := remote Entity; + constructor entity() : Entity; + attribute Entity.entity_name : String; + attribute Entity.entity_shape : Shape; + collection attribute Entity.entity_used : OrLattice; + + pragma fiber_untracked(entity_name,entity_shape); + pragma field_strict(entity_name); + + -- shapes: used just for object identity + phylum ShapeStructure; + constructor shape(name : String) : ShapeStructure; + type Shape := remote ShapeStructure; + + --- the globals + type Messages := BAG[String]; + var collection msgs : Messages; + + root_scope : Scope := nil; + + not_found : EntityRef := nil; + + no_shape : Shape := nil; + int_shape : Shape := shape("integer"); + str_shape : Shape := shape("string"); + + pragma fiber_untracked(root_scope,not_found,int_shape,str_shape,no_shape); + pragma field_strict(root_scope,not_found,int_shape,str_shape,no_shape); + + --- Attributes on "nonterminals" (i.e. imported and fixed phyla) + + -- don't use polymorphism: + attribute Block.block_scope : Scope; + attribute Decls.decls_scope : Scope; + attribute Decl.decl_scope : Scope; + attribute Stmts.stmts_scope : Scope; + attribute Stmt.stmt_scope : Scope; + attribute Expr.expr_scope : Scope; + + attribute Type.type_shape : Shape; + attribute Expr.expr_shape : Shape; + + pragma inherited(block_scope,decls_scope,decl_scope, + stmts_scope,stmt_scope,expr_scope); + pragma synthesized(type_shape,expr_shape); + + match ?p:Program=program(?b:Block) begin + b.block_scope := root_scope; + end; + + match ?b:Block=block(?ds:Decls,?ss:Stmts) begin + c : Contour := contour(); + c.enclosing := b.block_scope; + ds.decls_scope := c; + ss.stmts_scope := c; + end; + + match ?:Decls=no_decls() begin + end; + + match ?ds:Decls=single_decls(?d:Decl) begin + d.decl_scope := ds.decls_scope; + end; + + match ?ds0:Decls=append_decls(?ds1:Decls,?ds2:Decls) begin + ds1.decls_scope := ds0.decls_scope; + ds2.decls_scope := ds0.decls_scope; + end; + + match ?d:Decl=decl(?id:String,?ty:Type) begin + e : Entity := entity(); + d.decl_scope.entities :> {e}; + e.entity_name := id; + e.entity_shape := ty.type_shape; + if not e.entity_used then + msgs :> {id ++ " is unused"}; + endif; + end; + + --attribute Type.tmp : Messages; + --pragma synthesized(tmp); + + match ?t:Type=integer() begin + t.type_shape := int_shape; + --t.tmp := msgs; + end; + + match ?t:Type=string() begin + t.type_shape := str_shape; + end; + + match ?:Stmts=no_stmts() begin + end; + + match ?ss0:Stmts=single_stmts(?s:Stmt) begin + s.stmt_scope := ss0.stmts_scope; + end; + + match ?ss0:Stmts=append_stmts(?ss1:Stmts,?ss2:Stmts) begin + ss1.stmts_scope := ss0.stmts_scope; + ss2.stmts_scope := ss0.stmts_scope; + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + b.block_scope := s.stmt_scope; + end; + + match ?s:Stmt=assign_stmt(?e1:Expr,?e2:Expr) begin + e1.expr_scope := s.stmt_scope; + e2.expr_scope := s.stmt_scope; + if e1.expr_shape /= e2.expr_shape then + msgs :> {Integer$string(lineno(s))++": type mismatch"}; + endif; + end; + + match ?e:Expr=intconstant(?:Integer) begin + e.expr_shape := int_shape; + end; + + match ?e:Expr=strconstant(?:String) begin + e.expr_shape := str_shape; + end; + + match ?e:Expr=variable(?id:String) begin + decl : EntityRef; + decl := lookup(id,e.expr_scope); + if decl = not_found then + msgs :> {id ++ " not declared"}; + e.expr_shape := no_shape; + else + decl.entity_used :> true; + e.expr_shape := decl.entity_shape; + endif; + end; + + procedure lookup(name : String; scope : Scope) : EntityRef begin + if scope = root_scope then + result := not_found; + else + case scope.entities begin + match {...,?e:EntityRef if e.entity_name=name,...} begin + result := e; + end; + else + result := lookup(name,scope.enclosing); + end; + endif; + end; +end; diff --git a/examples/balanced.aps b/examples/balanced.aps new file mode 100644 index 00000000..d79ab436 --- /dev/null +++ b/examples/balanced.aps @@ -0,0 +1,36 @@ +-- Simple programming language from fiber paper. +module SIMPLE[] begin + pragma root_phylum(type Program); + -- the phyla: + phylum Program; + phylum Block; + phylum Decls; -- we don't use sequences here: + phylum Decl; + phylum Type; + phylum Stmts; + phylum Stmt; + phylum Expr; + + constructor program(block : Block) : Program; + constructor block(decls : Decls; stmts : Stmts) : Block; + constructor no_decls() : Decls; + constructor single_decls(d : Decl) : Decls; + constructor append_decls(ds1, ds2 : Decls) : Decls; + constructor decl(id : String; ty : Type) : Decl; + constructor integer() : Type; + constructor string() : Type; + constructor no_stmts() : Stmts; + constructor single_stmts(s : Stmt) : Stmts; + constructor append_stmts(ss1, ss2 : Stmts) : Stmts; + constructor block_stmt(block : Block) : Stmt; + constructor assign_stmt(e1,e2 : Expr) : Stmt; + constructor intconstant(i : Integer) : Expr; + constructor strconstant(s : String) : Expr; + constructor variable(id : String) : Expr; +end; + +[T :: PHYLUM[]] function lineno(x : T) : Integer; + + + + diff --git a/examples/below-fiber-cycle.aps b/examples/below-fiber-cycle.aps new file mode 100644 index 00000000..fe6f0786 --- /dev/null +++ b/examples/below-fiber-cycle.aps @@ -0,0 +1,68 @@ +with "tiny"; +module BELOW_FIBER_CYCLE[T :: var TINY[]] extends T begin + phylum Context; + constructor context(depth : Integer) : Context; + + type ContextPtr := remote Context; + attribute Context.ptr1 : ContextPtr; + attribute Context.ptr2 : ContextPtr; + + attribute Wood.scope : ContextPtr; + attribute Wood.half : ContextPtr; + attribute Wood.syn : Integer; + + attribute Root.answer : Integer; + + pragma synthesized (syn, answer, half); + pragma inherited (scope); + + match ?this=root(?w) begin + this.answer := w.syn; + c : Context := context(0); + c.ptr1 := w.half; + c.ptr2 := nil; + w.scope := c; + end; + + match ?this=branch(?w1,?w2) begin + this.syn := w1.syn + w2.syn; + c : Context := context(scope_depth(this.scope)+1); + c.ptr1 := this.scope; + c.ptr2 := w2.half; + w1.scope := c; + w2.scope := c; + this.half := w1.half; + end; + + match ?this=leaf(?n) begin + this.half := this.scope; + this.syn := scope_depth(index_scope(this.scope,n)); + end; + + var function index_scope(sc : ContextPtr; i : Integer) : ContextPtr begin + if sc = nil or i = 0 then + result := sc; + else + case sc begin + match context(?) begin + j : Integer := i / 2; + if i = 2 * j then + result := index_scope(sc.ptr1,j); + else + result := index_scope(sc.ptr2,j); + endif; + end; + end; + endif; + end; + + function scope_depth(sc : ContextPtr) : Integer begin + case sc begin + match ?c=context(?n) begin + result := n; + end; + else + result := 0; + end; + end; +end; diff --git a/examples/below-single-fiber-cycle.aps b/examples/below-single-fiber-cycle.aps new file mode 100644 index 00000000..44e9495a --- /dev/null +++ b/examples/below-single-fiber-cycle.aps @@ -0,0 +1,60 @@ +with "tiny"; +module BELOW_SINGLE_FIBER_CYCLE[T :: var TINY[]] extends T begin + phylum Context; + constructor context(depth : Integer) : Context; + + type ContextPtr := remote Context; + attribute Context.ptr : ContextPtr; + + attribute Wood.scope : ContextPtr; + attribute Wood.half : ContextPtr; + attribute Wood.syn : Integer; + + attribute Root.answer : Integer; + + pragma synthesized (syn, answer, half); + pragma inherited (scope); + + match ?this=root(?w) begin + this.answer := w.syn; + c : Context := context(0); + c.ptr := w.half; + w.scope := c; + end; + + match ?this=branch(?w1,?w2) begin + this.syn := w1.syn + w2.syn; + c : Context := context(scope_depth(this.scope)+1); + c.ptr := this.scope; + w1.scope := c; + w2.scope := w1.half; + this.half := w2.half; + end; + + match ?this=leaf(?n) begin + this.half := this.scope; + this.syn := scope_depth(index_scope(this.scope,n)); + end; + + var function index_scope(sc : ContextPtr; i : Integer) : ContextPtr begin + if sc = nil or i = 0 then + result := sc; + else + case sc begin + match context(?) begin + result := index_scope(sc.ptr,i-1); + end; + end; + endif; + end; + + function scope_depth(sc : ContextPtr) : Integer begin + case sc begin + match ?c=context(?n) begin + result := n; + end; + else + result := 0; + end; + end; +end; diff --git a/examples/bigger-binding.aps b/examples/bigger-binding.aps index fd9eab74..e8bb45d6 100644 --- a/examples/bigger-binding.aps +++ b/examples/bigger-binding.aps @@ -62,7 +62,7 @@ module CODE_GENERATION[T :: var BIGGER[]] extends T begin attribute Type.type_shape : Shape; attribute Expr.expr_shape : Shape; - collection size : Integer :> 0, (+); + var collection size : Integer :> 0, (+); pragma inherited(block_scope,decls_scope,decl_scope, stmts_scope,stmt_scope,expr_scope); diff --git a/examples/broad-fiber-cycle.aps b/examples/broad-fiber-cycle.aps new file mode 100644 index 00000000..cb6faa8d --- /dev/null +++ b/examples/broad-fiber-cycle.aps @@ -0,0 +1,75 @@ +with "tiny"; +module BROAD_FIBER_CYCLE[T :: var TINY[]] extends T begin + phylum Context; + constructor context(depth : Integer) : Context; + + type ContextPtr := remote Context; + attribute Context.ptr1 : ContextPtr; + attribute Context.ptr2 : ContextPtr; + + attribute Wood.scope1 : ContextPtr; + attribute Wood.scope2 : ContextPtr; + attribute Wood.half : ContextPtr; + attribute Wood.syn : Integer; + + attribute Root.answer : Integer; + + pragma synthesized (syn, answer, half); + pragma inherited (scope1, scope2); + + match ?this=root(?w) begin + this.answer := w.syn; + c : Context := context(0); + c.ptr1 := nil; + c.ptr2 := nil; + w.scope1 := c; + w.scope2 := w.half; + end; + + match ?this=branch(?w1,?w2) begin + this.syn := w1.syn + w2.syn; + c1 : Context := context(scope_depth(this.scope1)+1); + c2 : Context := context(scope_depth(this.scope2)+1); + c1.ptr1 := this.scope1; + c1.ptr2 := w2.half; + c2.ptr1 := w1.half; + c2.ptr2 := this.scope2; + w1.scope1 := c1; + w2.scope1 := c1; + w1.scope2 := c2; + w2.scope2 := c2; + this.half := w2.half; + end; + + match ?this=leaf(?n) begin + this.half := this.scope1; + this.syn := scope_depth(index_scope(this.scope2,n)); + end; + + var function index_scope(sc : ContextPtr; i : Integer) : ContextPtr begin + if sc = nil or i = 0 then + result := sc; + else + case sc begin + match context(?) begin + j : Integer := i / 2; + if i = 2 * j then + result := index_scope(sc.ptr1,j); + else + result := index_scope(sc.ptr2,j); + endif; + end; + end; + endif; + end; + + function scope_depth(sc : ContextPtr) : Integer begin + case sc begin + match ?c=context(?n) begin + result := n; + end; + else + result := 0; + end; + end; +end; diff --git a/examples/cpp/Makefile b/examples/cpp/Makefile index af539caa..c498ab12 100644 --- a/examples/cpp/Makefile +++ b/examples/cpp/Makefile @@ -1,6 +1,6 @@ APSTOP= ../.. CPP = g++ -CPPFLAGS = -ftemplate-depth-31 -I ${APSTOP}/base/cpp +CPPFLAGS = -ftemplate-depth=63 -I ${APSTOP}/base/cpp APSCPP = ${APSTOP}/bin/apscpp APSCPPFLAGS = -p ..:${APSTOP}/base -G BASECPP = ${APSTOP}/lib/basecpp.a @@ -35,6 +35,10 @@ simple-driver.o : simple.h simple-binding3.h simple-driver : simple-driver.o simple-binding3.o simple.o ${CPP} ${CPPFLAGS} simple-driver.o simple-binding3.o simple.o ${BASECPP} -o $@ +test-coll-driver.o : tiny.h test-coll.h +test-coll-driver: test-coll-driver.o tiny.o test-coll.o + ${CPP} ${CPPFLAGS} test-coll-driver.o tiny.o test-coll.o ${BASECPP} -o $@ + APSMODS = test.aps tiny.aps test-coll.aps test-cicular.aps \ simple.aps \ simple-binding3.aps dynamic-binding.aps classic-binding.aps diff --git a/examples/cpp/classic-driver.cpp b/examples/cpp/classic-driver.cpp index 78c9ec51..c8280c3c 100644 --- a/examples/cpp/classic-driver.cpp +++ b/examples/cpp/classic-driver.cpp @@ -30,7 +30,7 @@ int main() C_SIMPLE* simple = new C_SIMPLE(); C_SIMPLE::T_Decls ds = simple->v_xcons_decls(simple->v_no_decls(), - simple->v_decl("x",simple->v_integer())); + simple->v_decl("x",simple->v_integer_type())); C_SIMPLE::T_Stmt s = simple->v_assign_stmt(simple->v_variable("x"),simple->v_variable("y")); C_SIMPLE::T_Stmts ss = simple->v_xcons_stmts(simple->v_no_stmts(),s); diff --git a/examples/cpp/dynamic-driver.cpp b/examples/cpp/dynamic-driver.cpp index fcba8bd5..eb7ef5f2 100644 --- a/examples/cpp/dynamic-driver.cpp +++ b/examples/cpp/dynamic-driver.cpp @@ -38,10 +38,10 @@ int main() C_SIMPLE::T_Decls ds = add_decl(add_decl(add_decl(add_decl(simple->v_no_decls(), - "v",simple->v_integer()), - "w",simple->v_string()), - "w",simple->v_integer()), - "x",simple->v_integer()); + "v",simple->v_integer_type()), + "w",simple->v_string_type()), + "w",simple->v_integer_type()), + "x",simple->v_integer_type()); C_SIMPLE::T_Stmt s = simple->v_assign_stmt(simple->v_variable("x"),simple->v_variable("y")); C_SIMPLE::T_Stmts ss = simple->v_xcons_stmts(simple->v_no_stmts(),s); diff --git a/examples/cpp/simple-driver.cpp b/examples/cpp/simple-driver.cpp index 2bdf10b9..b3268607 100644 --- a/examples/cpp/simple-driver.cpp +++ b/examples/cpp/simple-driver.cpp @@ -38,10 +38,10 @@ int main() C_SIMPLE::T_Decls ds = add_decl(add_decl(add_decl(add_decl(simple->v_no_decls(), - "v",simple->v_integer()), - "w",simple->v_string()), - "w",simple->v_integer()), - "x",simple->v_integer()); + "v",simple->v_integer_type()), + "w",simple->v_string_type()), + "w",simple->v_integer_type()), + "x",simple->v_integer_type()); C_SIMPLE::T_Stmt s = simple->v_assign_stmt(simple->v_variable("x"),simple->v_variable("y")); C_SIMPLE::T_Stmts ss = simple->v_xcons_stmts(simple->v_no_stmts(),s); diff --git a/examples/cpp/test-coll-driver.cpp b/examples/cpp/test-coll-driver.cpp index 6d2a35c2..77e759c0 100644 --- a/examples/cpp/test-coll-driver.cpp +++ b/examples/cpp/test-coll-driver.cpp @@ -16,16 +16,22 @@ int main() C_SEQUENCE seq_type(m.t_Wood); C_TINY::T_Wood w = m.v_branch(m.v_leaf(3),m.v_leaf(4)); + C_TINY::T_Root r = m.v_root(w); Debug::out(cout); m.finish(); - cout << "sum is " << m.v_sum << endl; + C_TEST_COLL m2(&m); + + m2.finish(); + + cout << "sum is " << m2.v_sum << endl; cout << "leaves is " << COLL - (m.t_Integers,t_Integer).to_string(m.v_leaves) + (m2.t_Integers,t_Integer).to_string(m2.v_leaves) << endl; + cout << "result is " << m2.v_result(r) << endl; } catch (exception& e) { cout << "Got error: " << e.what() << endl; } diff --git a/examples/cycle-series.aps b/examples/cycle-series.aps new file mode 100644 index 00000000..986e7d46 --- /dev/null +++ b/examples/cycle-series.aps @@ -0,0 +1,37 @@ +with "tiny"; +module CYCLE_SERIES[T :: var TINY[]] extends T begin + type IntegerSet := SET[Integer]; + type IntegerSetLattice := UNION_LATTICE[Integer,IntegerSet]; + + circular attribute Wood.ins : IntegerSetLattice; + circular attribute Wood.out : IntegerSetLattice; + + attribute Root.answer : IntegerSet; + + pragma synthesized(answer, out); + pragma inherited(ins); + + match ?r=root(?w) begin + w.ins := {0}; + r.answer := w.out; + end; + + match ?w=branch(?x,?y) begin + c : IntegerSet := y.out; + x.ins := y.out; + y.ins := x.out \/ { 0 }; + w.out := w.ins /\~ c; + end; + + match ?l=leaf(?x) begin + circular collection c : IntegerSetLattice; + c :> l.ins; + for v in l.ins begin + if (v < x) then + c :> {v+1}; + endif; + end; + l.out := c; + end; + +end; diff --git a/examples/cycle.aps b/examples/cycle.aps new file mode 100644 index 00000000..7d53826e --- /dev/null +++ b/examples/cycle.aps @@ -0,0 +1,20 @@ +with "tiny"; + +module THREE[T :: var TINY[]] extends T begin + + attribute Wood.i : Integer := 0; + attribute Wood.s : Integer := 0; + + pragma inherited(i); + pragma synthesized(s); + + match ?l=leaf(?n) begin + l.s := l.i + n; + end; + match ?b=branch(?x,?y) begin + b.s := 0; + x.i := y.s + b.i; + y.i := x.s + b.i; + end; +end; + diff --git a/examples/farrow-lv-tree.aps b/examples/farrow-lv-tree.aps new file mode 100644 index 00000000..bad03e19 --- /dev/null +++ b/examples/farrow-lv-tree.aps @@ -0,0 +1,26 @@ +with "symbol"; +with "table"; + +-- Farrow live-variable analysis tree +module FARROW_LV_TREE[] begin + phylum Stmt; + phylum Stmts; + phylum Expression; + phylum Program; + + constructor stmt_assign(s: Symbol; e: Expression) : Stmt; + constructor stmt_if(e: Expression; s1, s2: Stmts) : Stmt; + constructor stmt_while(e: Expression; s: Stmts) : Stmt; + constructor stmts_append(s: Stmt; ss: Stmts) : Stmts; + constructor stmts_empty() : Stmts; + constructor expr_var(s: Symbol) : Expression; + constructor expr_add(e1: Expression; e2: Expression) : Expression; + constructor expr_subtract(e1: Expression; e2: Expression) : Expression; + constructor expr_equals(e1: Expression; e2: Expression) : Expression; + constructor expr_not_equals(e1: Expression; e2: Expression) : Expression; + constructor expr_less_than(e1: Expression; e2: Expression) : Expression; + constructor expr_lit(s: Symbol) : Expression; + constructor program(ss: Stmts) : Program; + + pragma root_phylum(type Program); +end; diff --git a/examples/farrow-lv.aps b/examples/farrow-lv.aps new file mode 100644 index 00000000..c5f6af2a --- /dev/null +++ b/examples/farrow-lv.aps @@ -0,0 +1,84 @@ +with "symbol"; +with "table"; +with "farrow-lv-tree"; + +-- Farrow live-variable analysis circular-AG +module FARROW_LV[T :: var FARROW_LV_TREE[]] extends T begin + + type Symbols := SET[Symbol]; + type SymbolLattice := UNION_LATTICE[Symbol, Symbols]; + + circular attribute Stmt.stmt_live : SymbolLattice; + circular attribute Stmt.stmt_out : SymbolLattice; + pragma inherited(stmt_out); + pragma synthesized(stmt_live); + + circular attribute Stmts.stmts_live : SymbolLattice; + circular attribute Stmts.stmts_out : SymbolLattice; + pragma inherited(stmts_out); + pragma synthesized(stmts_live); + + circular attribute Program.program_live : SymbolLattice; + pragma synthesized(program_live); + + attribute Expression.expression_inside : SymbolLattice; + pragma synthesized(expression_inside); + + match ?self:Stmt=stmt_assign(?id: Symbol, ?e: Expression) begin + self.stmt_live := (self.stmt_out /\~ { id }) \/ e.expression_inside; + end; + + match ?self:Stmt=stmt_if(?e: Expression, ?s1: Stmts, ?s2: Stmts) begin + self.stmt_live := e.expression_inside \/ s1.stmts_live \/ s2.stmts_live; + s1.stmts_out := self.stmt_out; + s2.stmts_out := self.stmt_out; + end; + + match ?self:Stmt=stmt_while(?e: Expression, ?s: Stmts) begin + self.stmt_live := self.stmt_out \/ e.expression_inside \/ s.stmts_live; + s.stmts_out := self.stmt_out \/ e.expression_inside \/ s.stmts_live; + end; + + match ?self:Stmts=stmts_append(?s: Stmt, ?ss: Stmts) begin + self.stmts_live := s.stmt_live; + s.stmt_out := ss.stmts_live; + ss.stmts_out := self.stmts_out; + end; + + match ?self:Stmts=stmts_empty() begin + self.stmts_live := self.stmts_out; + end; + + match ?self:Expression=expr_var(?s: Symbol) begin + self.expression_inside := { s }; + end; + + match ?self:Expression=expr_add(?e1: Expression, ?e2: Expression) begin + self.expression_inside := e1.expression_inside \/ e2.expression_inside; + end; + + match ?self:Expression=expr_subtract(?e1: Expression, ?e2: Expression) begin + self.expression_inside := e1.expression_inside \/ e2.expression_inside; + end; + + match ?self:Expression=expr_equals(?e1: Expression, ?e2: Expression) begin + self.expression_inside := e1.expression_inside \/ e2.expression_inside; + end; + + match ?self:Expression=expr_not_equals(?e1: Expression, ?e2: Expression) begin + self.expression_inside := e1.expression_inside \/ e2.expression_inside; + end; + + match ?self:Expression=expr_less_than(?e1: Expression, ?e2: Expression) begin + self.expression_inside := e1.expression_inside \/ e2.expression_inside; + end; + + match ?self:Expression=expr_lit(?s: Symbol) begin + self.expression_inside := { }; + end; + + match ?self:Program=program(?ss: Stmts) begin + ss.stmts_out := {}; + self.program_live := ss.stmts_live; + end; +end; diff --git a/examples/farrow-lv.lex b/examples/farrow-lv.lex new file mode 100644 index 00000000..1e02b7df --- /dev/null +++ b/examples/farrow-lv.lex @@ -0,0 +1,29 @@ +%{ + /* Farrow's live-variable analysis lexer */ +%} + +%% + +";" { return SEMICOLON; } +"=" { return EQ; } +"+" { return PLUS; } +"-" { return MINUS; } +"==" { return EQEQ; } +"!=" { return NEQ; } +"<" { return LT; } + +"IF" { return IF; } +"WHILE" { return WHILE; } +"THEN" { return THEN; } +"ELSE" { return ELSE; } +"DO" { return DO; } +"END" { return END; } + +[a-z][_A-Za-z0-9]* { return ID(yytext); } +[1-9][0-9]* { return LITERAL(yytext); } + +[\s\r\n] { /* ignore spaces */ } + +"//".* { /* ignore comments */ } + +<> { return YYEOFT; } diff --git a/examples/farrow-lv.y b/examples/farrow-lv.y new file mode 100644 index 00000000..d60cbc0d --- /dev/null +++ b/examples/farrow-lv.y @@ -0,0 +1,62 @@ +/* Farrow's live-variable analysis parser */ + +/* + * This fragment is intended to be used either by bison or ScalaBison, + * but will need language specific parts to compile as C/C++ or Scala + */ + +%token ID LITERAL +%token SEMICOLON EQ +%token EQEQ NEQ PLUS MINUS LT +%token IF THEN ELSE +%token WHILE DO END + +%type stmt +%type stmts +%type expr +%type program + +%left EQ +%left IF WHILE +%left LT +%left EQEQ NEQ +%left PLUS MINUS + +%% + +program : stmts + { $$ = program($1); } + ; + +stmts : /* NOTHING */ + { $$ = stmts_empty(); } + | stmt SEMICOLON stmts + { $$ = stmts_append($1, $3); } + ; + +stmt : ID EQ expr + { $$ = stmt_assign($1, $3); } + | IF expr THEN stmts ELSE stmts END + { $$ = stmt_if($2, $4, $6); } + | WHILE expr DO stmts END + { $$ = stmt_while($2, $4); } + ; + +expr : ID + { $$ = expr_var($1); } + | LITERAL + { $$ = expr_lit($1); } + | expr PLUS expr + { $$ = expr_add($1, $3); } + | expr MINUS expr + { $$ = expr_subtract($1, $3); } + | expr EQEQ expr + { $$ = expr_equals($1, $3); } + | expr NEQ expr + { $$ = expr_not_equals($1, $3); } + | expr LT expr + { $$ = expr_less_than($1, $3); } + ; + +%% + diff --git a/examples/farrow-ubd-fiber.aps b/examples/farrow-ubd-fiber.aps new file mode 100644 index 00000000..84bd278b --- /dev/null +++ b/examples/farrow-ubd-fiber.aps @@ -0,0 +1,144 @@ +-- Variant of farrow-ubd.aps that uses circular global variable instead. +with "symbol"; +with "table"; +with "flat"; +with "farrow-ubd-tree"; + +-- Farrow use-before-declaration analysis +module FARROW_UBD_FIBER[T :: var FARROW_UBD_TREE[]] extends T begin + + type IntegerLattice := FLAT_LATTICE[Integer]; + type DeclPairLattice := TABLE_LATTICE[Symbol, IntegerLattice]; + + type Symbols := SET[Symbol]; + type Messages := SET[String]; + + var circular collection defs : DeclPairLattice; + + attribute Declaration.decl_name : Symbol; + attribute Declaration.decl_errs : Messages; + pragma synthesized(decl_name, decl_errs); + + attribute Declarations.decls_names : Symbols; + attribute Declarations.decls_errs : Messages; + pragma synthesized(decls_names, decls_errs); + + circular attribute Expression.expr_val : IntegerLattice; + attribute Expression.expr_errs : Messages; + pragma synthesized(expr_val, expr_errs); + + circular attribute Term.term_val : IntegerLattice; + attribute Term.term_errs : Messages; + pragma synthesized(term_val, term_errs); + + circular attribute Operation.op_val : IntegerLattice; + circular attribute Operation.op_lval : IntegerLattice; + circular attribute Operation.op_rval : IntegerLattice; + pragma inherited(op_lval, op_rval); + pragma synthesized(op_val); + + attribute Program.program_errs : Messages; + pragma synthesized(program_errs); + + match ?self:Declarations=scope(?ds: Declarations) begin + self.decls_errs := ds.decls_errs; + self.decls_names := ds.decls_names; + end; + + match ?self:Declaration=decl_assign(?s: Symbol, ?e: Expression) begin + defs :> DeclPairLattice$table_entry(s, e.expr_val); + self.decl_name := s; + self.decl_errs := e.expr_errs; + end; + + match ?self:Declarations=decls_empty() begin + self.decls_errs := {}; + self.decls_names := {}; + end; + + match ?self:Declarations=decls_append(?ds: Declarations, ?d: Declaration) begin + if d.decl_name in ds.decls_names then + self.decls_errs := { "identifier " ++ symbol_name(d.decl_name) ++ " is multiply defined" } \/ ds.decls_errs \/ d.decl_errs; + self.decls_names := ds.decls_names; + else + self.decls_names := {d.decl_name} \/ ds.decls_names; + self.decls_errs := ds.decls_errs \/ d.decl_errs; + endif; + end; + + match ?self:Expression=expr_term(?t: Term) begin + self.expr_val := t.term_val; + self.expr_errs := t.term_errs; + end; + + match ?self:Operation=op_add() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$plus, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Operation=op_mul() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$times, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Operation=op_sub() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$minus, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Operation=op_div() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$divide, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Expression=expr_apply(?e: Expression, ?op: Operation, ?t: Term) begin + self.expr_val := op.op_val; + op.op_lval := e.expr_val; + op.op_rval := t.term_val; + self.expr_errs := e.expr_errs \/ t.term_errs; + end; + + match ?self:Term=term_variable(?s: Symbol) begin + circular variable_value: IntegerLattice := IntegerLattice$bottom; + circular variable_defined: OrLattice := false; + + case DeclPairLattice$select(defs, s) begin + match DeclPairLattice$table_entry(?,?value) begin + variable_value := value; + variable_defined := true; + end; + end; + + if variable_value = IntegerLattice$bottom then + if variable_defined then + self.term_errs := { "identifier " ++ symbol_name(s) ++ " is defined but has an unknown value" }; + else + self.term_errs := { "unknown identifier of " ++ symbol_name(s) }; + endif; + else + self.term_errs := { }; + endif; + self.term_val := variable_value; + end; + + match ?self:Term=term_literal(?i: Integer) begin + self.term_val := IntegerLattice$lift(i); + self.term_errs := {}; + end; + + match ?self:Program=program(?ds: Declarations) begin + self.program_errs := ds.decls_errs; + end; +end; diff --git a/examples/farrow-ubd-tree.aps b/examples/farrow-ubd-tree.aps new file mode 100644 index 00000000..8deb2498 --- /dev/null +++ b/examples/farrow-ubd-tree.aps @@ -0,0 +1,28 @@ +with "symbol"; +with "table"; + +-- Farrow use-before-declaration analysis tree +module FARROW_UBD_TREE[] begin + phylum Declaration; + phylum Declarations; + phylum Expression; + phylum Term; + phylum Operation; + phylum Program; + + constructor scope(ds: Declarations) : Declarations; + constructor decl_assign(s: Symbol; e: Expression) : Declaration; + constructor decls_empty() : Declarations; + constructor decls_append(ds: Declarations; d: Declaration) : Declarations; + constructor expr_term(t: Term) : Expression; + constructor op_add() : Operation; + constructor op_mul() : Operation; + constructor op_sub() : Operation; + constructor op_div() : Operation; + constructor expr_apply(e: Expression; op: Operation; t: Term) : Expression; + constructor term_variable(s: Symbol) : Term; + constructor term_literal(i: Integer) : Term; + constructor program(ds: Declarations) : Program; + + pragma root_phylum(type Program); +end; diff --git a/examples/farrow-ubd.aps b/examples/farrow-ubd.aps new file mode 100644 index 00000000..d066cd98 --- /dev/null +++ b/examples/farrow-ubd.aps @@ -0,0 +1,160 @@ +with "symbol"; +with "table"; +with "flat"; +with "farrow-ubd-tree"; + +-- Farrow use-before-declaration analysis +module FARROW_UBD[T :: var FARROW_UBD_TREE[]] extends T begin + + type IntegerLattice := FLAT_LATTICE[Integer]; + type DeclPairLattice := TABLE_LATTICE[Symbol, IntegerLattice]; + + type Messages := SET[String]; + + attribute Declaration.decl_name : Symbol; + circular collection attribute Declaration.decl_pair : DeclPairLattice; + circular collection attribute Declaration.decl_env : DeclPairLattice; + attribute Declaration.decl_errs : Messages; + pragma inherited(decl_env); + pragma synthesized(decl_name, decl_pair, decl_errs); + + circular collection attribute Declarations.decls_defs : DeclPairLattice; + circular collection attribute Declarations.decls_env : DeclPairLattice; + attribute Declarations.decls_errs : Messages; + pragma inherited(decls_env); + pragma synthesized(decls_defs, decls_errs); + + circular attribute Expression.expr_val : IntegerLattice; + circular collection attribute Expression.expr_env : DeclPairLattice; + attribute Expression.expr_errs : Messages; + pragma inherited(expr_env); + pragma synthesized(expr_val, expr_errs); + + circular attribute Term.term_val : IntegerLattice; + circular collection attribute Term.term_env : DeclPairLattice; + attribute Term.term_errs : Messages; + pragma inherited(term_env); + pragma synthesized(term_val, term_errs); + + circular attribute Operation.op_val : IntegerLattice; + circular attribute Operation.op_lval : IntegerLattice; + circular attribute Operation.op_rval : IntegerLattice; + pragma inherited(op_lval, op_rval); + pragma synthesized(op_val); + + attribute Program.program_errs : Messages; + pragma synthesized(program_errs); + + match ?self:Declarations=scope(?ds: Declarations) begin + self.decls_defs := ds.decls_defs; + self.decls_errs := ds.decls_errs; + ds.decls_env := ds.decls_defs; + end; + + match ?self:Declaration=decl_assign(?s: Symbol, ?e: Expression) begin + self.decl_pair :> DeclPairLattice$table_entry(s, e.expr_val); + self.decl_name := s; + e.expr_env := self.decl_env; + self.decl_errs := e.expr_errs; + end; + + match ?self:Declarations=decls_empty() begin + self.decls_defs := DeclPairLattice$bottom; + self.decls_errs := {}; + end; + + match ?self:Declarations=decls_append(?ds: Declarations, ?d: Declaration) begin + ds.decls_env := self.decls_env; + d.decl_env := self.decls_env; + + case DeclPairLattice$select(ds.decls_defs, d.decl_name) begin + match DeclPairLattice$table_entry(?,?value) begin + self.decls_errs := { "identifier " ++ symbol_name(d.decl_name) ++ " is multiply defined" } \/ ds.decls_errs \/ d.decl_errs; + self.decls_defs := ds.decls_defs; + end; + else + self.decls_defs := DeclPairLattice$join(ds.decls_defs, d.decl_pair); + self.decls_errs := ds.decls_errs \/ d.decl_errs; + end; + end; + + match ?self:Expression=expr_term(?t: Term) begin + self.expr_val := t.term_val; + self.expr_errs := t.term_errs; + t.term_env := self.expr_env; + end; + + match ?self:Operation=op_add() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$plus, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Operation=op_mul() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$times, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Operation=op_sub() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$minus, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Operation=op_div() begin + if self.op_lval = IntegerLattice$bottom or self.op_rval = IntegerLattice$bottom then + self.op_val := IntegerLattice$bottom; + else + self.op_val := IntegerLattice$applyf2(Integer$divide, self.op_lval, self.op_rval); + endif; + end; + + match ?self:Expression=expr_apply(?e: Expression, ?op: Operation, ?t: Term) begin + self.expr_val := op.op_val; + op.op_lval := e.expr_val; + op.op_rval := t.term_val; + e.expr_env := self.expr_env; + t.term_env := self.expr_env; + self.expr_errs := e.expr_errs \/ t.term_errs; + end; + + match ?self:Term=term_variable(?s: Symbol) begin + circular variable_value: IntegerLattice := IntegerLattice$bottom; + circular variable_defined: OrLattice := false; + + case DeclPairLattice$select(self.term_env, s) begin + match DeclPairLattice$table_entry(?,?value) begin + variable_value := value; + variable_defined := true; + end; + end; + + if variable_value = IntegerLattice$bottom then + if variable_defined then + self.term_errs := { "identifier " ++ symbol_name(s) ++ " is defined but has an unknown value" }; + else + self.term_errs := { "unknown identifier of " ++ symbol_name(s) }; + endif; + else + self.term_errs := { }; + endif; + self.term_val := variable_value; + end; + + match ?self:Term=term_literal(?i: Integer) begin + self.term_val := IntegerLattice$lift(i); + self.term_errs := {}; + end; + + match ?self:Program=program(?ds: Declarations) begin + ds.decls_env := DeclPairLattice$bottom; + self.program_errs := ds.decls_errs; + end; +end; diff --git a/examples/farrow-ubd.lex b/examples/farrow-ubd.lex new file mode 100644 index 00000000..b1bb3407 --- /dev/null +++ b/examples/farrow-ubd.lex @@ -0,0 +1,23 @@ +%{ + /* Farrow's use-before-declaration analysis lexer */ +%} + +%% + +";" { return SEMICOLON; } +"=" { return EQ; } +"+" { return PLUS; } +"-" { return MINUS; } +"/" { return DIV; } +"*" { return MUL; } +"{" { return OPN_BRACE; } +"}" { return CLS_BRACE; } + +[a-z][_A-Za-z0-9]* { return ID(yytext); } +0|[1-9][0-9]* { return LITERAL(yytext); } + +[\s\r\n] { /* ignore spaces */ } + +"//".* { /* ignore comments */ } + +<> { return YYEOFT; } diff --git a/examples/farrow-ubd.y b/examples/farrow-ubd.y new file mode 100644 index 00000000..e7c800a8 --- /dev/null +++ b/examples/farrow-ubd.y @@ -0,0 +1,65 @@ +/* Farrow's live-variable analysis parser */ + +/* + * This fragment is intended to be used either by bison or ScalaBison, + * but will need language specific parts to compile as C/C++ or Scala + */ + +%token ID LITERAL +%token SEMICOLON EQ OPN_BRACE CLS_BRACE +%token PLUS MINUS MUL DIV + +%type decl +%type decls +%type expr +%type term +%type op +%type program + +%left EQ +%left OPN_BRACE CLS_BRACE +%left PLUS MINUS +%left MUL DIV + +%% + +program : decls + { $$ = program(scope($1)); } + ; + +decls : /* NOTHING */ + { $$ = decls_empty(); } + | decls decl + { $$ = decls_append($1, $2); } + | OPN_BRACE decls CLS_BRACE + { $$ = scope($2); } + ; + +decl : ID EQ expr SEMICOLON + { $$ = decl_assign($1, $3); } + ; + +term : ID + { $$ = term_variable($1); } + | LITERAL + { $$ = term_literal($1); } + ; + +expr : term + { $$ = expr_term($1); } + | expr op term + { $$ = expr_apply($1, $2, $3); } + ; + +op : PLUS + { $$ = op_add(); } + | MINUS + { $$ = op_sub(); } + | MUL + { $$ = op_mul(); } + | DIV + { $$ = op_div(); } + ; + +%% + diff --git a/examples/first.aps b/examples/first.aps new file mode 100644 index 00000000..d1c04812 --- /dev/null +++ b/examples/first.aps @@ -0,0 +1,82 @@ +with "symbol"; +with "table"; +with "grammar"; + +module FIRST[T :: var GRAMMAR[]] extends T begin + + type Symbols := SET[Symbol]; + type SymbolLattice := UNION_LATTICE[Symbol, Symbols]; + + type DeclarationTable := TABLE_LATTICE[Symbol, SymbolLattice]; + var circular collection firstTable : DeclarationTable; + + circular collection attribute Item.item_first : SymbolLattice := { }; + pragma synthesized(item_first); + + circular collection attribute Items.items_first : SymbolLattice; + pragma synthesized(items_first); + + attribute Grammar.grammar_first : DeclarationTable; + pragma synthesized(grammar_first); + + epsilon : Symbol := make_symbol("epsilon"); + + match ?self:Item=terminal(?s:Symbol) begin + self.item_first := { s }; + end; + + match ?self:Item=nonterminal(?s:Symbol) begin + case DeclarationTable$select(firstTable, s) begin + match DeclarationTable$table_entry(?,?item_first_objs) begin + self.item_first :> item_first_objs; + end; + end; + end; + + match ?self:Production=prod(?nt:Symbol, ?items: Items) begin + circular temp : SymbolLattice := items.items_first; + + firstTable :> DeclarationTable$table_entry(nt, temp); + end; + + match ?self:Grammar=grammar(?prods: Productions) begin + self.grammar_first := firstTable; + end; + + match ?self : Items = Items$none() begin + self.items_first :> { epsilon }; + end; + + match ?self : Items = Items$single(?item : Item) begin + self.items_first :> item.item_first; + end; + + match ?self : Items = Items$append(?items1,?items2 : Items) begin + self.items_first := black_dot(items1.items_first, items2.items_first); + end; + + match ?self : Productions = Productions$none() begin + + end; + + match ?self : Productions = Productions$single(?stmt : Production) begin + + end; + + match ?self : Productions = Productions$append(?stmts1,?stmts2 : Productions) begin + + end; + + var function contains_epsilon(s_set: Symbols) :Boolean begin + result := Symbols$member(epsilon, s_set); + end; + + var function black_dot(s1 :Symbols; s2 :Symbols) : Symbols begin + if contains_epsilon(s1) then + result := (s1 /\~ { epsilon }) \/ s2; + else + result := s1; + endif; + end; + +end; diff --git a/examples/follow.aps b/examples/follow.aps new file mode 100644 index 00000000..576a7083 --- /dev/null +++ b/examples/follow.aps @@ -0,0 +1,86 @@ +with "symbol"; +with "table"; +with "grammar"; + +module FOLLOW[T :: var GRAMMAR[]] extends T begin + + type Symbols := SET[Symbol]; + type SymbolLattice := UNION_LATTICE[Symbol, Symbols]; + + type DeclarationTable := TABLE_LATTICE[Symbol, SymbolLattice]; + var circular collection predictTable : DeclarationTable; + var circular collection followTable : DeclarationTable; + + circular attribute Item.item_predict : SymbolLattice := { }; + circular attribute Item.item_follow : SymbolLattice; + pragma synthesized(item_predict); + pragma inherited (item_follow); + + circular attribute Items.items_follow : SymbolLattice := { }; + circular attribute Items.items_predict : SymbolLattice := { }; + pragma synthesized(items_predict); + pragma inherited (items_follow); + + circular attribute Grammar.grammar_follow : DeclarationTable; + pragma synthesized(grammar_follow); + + epsilon : Symbol := make_symbol("epsilon"); + + match ?self:Item=terminal(?s:Symbol) begin + self.item_predict := { s }; + end; + + match ?self:Item=nonterminal(?s:Symbol) begin + followTable :> DeclarationTable$table_entry(s, self.item_follow); + + case DeclarationTable$select(predictTable, s) begin + match DeclarationTable$table_entry(?,?item_predict_objs) begin + self.item_predict := item_predict_objs; + end; + end; + end; + + match ?self:Production=prod(?nt:Symbol, ?items: Items) begin + case DeclarationTable$select(followTable, nt) begin + match DeclarationTable$table_entry(?,?item_follow_objs) begin + items.items_follow := item_follow_objs; + end; + end; + + predictTable :> DeclarationTable$table_entry(nt, items.items_predict); + end; + + match ?self:Grammar=grammar(?prods: Productions) begin + self.grammar_follow := followTable; + end; + + match ?self : Items = Items$none() begin + self.items_predict := self.items_follow; + end; + + match ?self : Items = Items$single(?item : Item) begin + self.items_predict := item.item_predict; + + item.item_follow := self.items_follow; + end; + + match ?self : Items = Items$append(?items1,?items2 : Items) begin + items1.items_follow := items2.items_predict; + + items2.items_follow := self.items_follow; + + self.items_predict := items1.items_predict; + end; + + match ?self : Productions = Productions$none() begin + + end; + + match ?self : Productions = Productions$single(?stmt : Production) begin + + end; + + match ?self : Productions = Productions$append(?stmts1,?stmts2 : Productions) begin + + end; +end; diff --git a/examples/grammar.aps b/examples/grammar.aps new file mode 100644 index 00000000..a4458271 --- /dev/null +++ b/examples/grammar.aps @@ -0,0 +1,19 @@ +with "symbol"; +with "table"; + +module GRAMMAR[] begin + phylum Grammar; + + phylum Item; + phylum Items := SEQUENCE[Item]; + + phylum Production; + phylum Productions := SEQUENCE[Production]; + + constructor terminal(s: Symbol) : Item; + constructor nonterminal(s: Symbol) : Item; + constructor prod(nt: Symbol; children: Items) : Production; + constructor grammar(prods: Productions) : Grammar; + + pragma root_phylum(type Grammar); +end; diff --git a/examples/grammar.lex b/examples/grammar.lex new file mode 100644 index 00000000..345ee9be --- /dev/null +++ b/examples/grammar.lex @@ -0,0 +1,21 @@ +%{ + /* Scanner for 'grammar' */ +%} + +%% + +":" { return COLON; } + +";" { return SEMICOLON; } + +"|" { return PIPE; } + +[a-z][\w]* { return TERMINAL(yytext); } + +[A-Z][\w]* { return NONTERMINAL(yytext); } + +[\s\r\n] { /* ignore spaces */ } + +"//".* { /* ignore comments */ } + +<> { return YYEOFT; } diff --git a/examples/grammar.y b/examples/grammar.y new file mode 100644 index 00000000..2a0fdd10 --- /dev/null +++ b/examples/grammar.y @@ -0,0 +1,51 @@ +/* Complete parser for grammar.y */ + +/* + * This fragment is intended to be used either by bison or ScalaBison, + * but will need language specific parts to compile as C/C++ or Scala + */ + +%token TERMINAL NONTERMINAL +%token SEMICOLON PIPE COLON + +%type grammar +%type items +%type item +%type items_many +%type productions production +%% + +grammar : productions + { $$ = grammar($1); } + ; + +productions : /* NOTHING */ + { $$ = productions_none(); } + | productions production + { $$ = productions_append($1, $2); } + ; + +production : NONTERMINAL COLON items_many SEMICOLON + { $$ = prods($1, $3); } + ; + +items_many : items + { $$ = items_many_single($1); } + | items_many PIPE items + { $$ = items_many_append($1, items_many_single($3)); } + ; + +items : /* NOTHING */ + { $$ = items_none(); } + | items item + { $$ = items_append($1, items_single($2)); } + ; + +item : TERMINAL + { $$ = terminal($1); } + | NONTERMINAL + { $$ = nonterminal($1); } + ; + +%% + diff --git a/examples/if.aps b/examples/if.aps new file mode 100644 index 00000000..14ecf4ca --- /dev/null +++ b/examples/if.aps @@ -0,0 +1,8 @@ +[T] function (if)(b : Boolean; then_value,else_value : T) : T begin + if b then + result := then_value; + else + result := else_value; + endif; +end; + diff --git a/examples/local-fiber-cycle.aps b/examples/local-fiber-cycle.aps new file mode 100644 index 00000000..97f78271 --- /dev/null +++ b/examples/local-fiber-cycle.aps @@ -0,0 +1,68 @@ +with "tiny"; +module LOCAL_FIBER_CYCLE[T :: var TINY[]] extends T begin + phylum Context; + constructor context(depth : Integer) : Context; + + type ContextPtr := remote Context; + attribute Context.ptr1 : ContextPtr; + attribute Context.ptr2 : ContextPtr; + + attribute Wood.scope : ContextPtr; + attribute Wood.syn : Integer; + + attribute Root.answer : Integer; + + pragma synthesized (syn, answer); + pragma inherited (scope); + + match ?this=root(?w) begin + this.answer := w.syn; + c : Context := context(0); + c.ptr1 := nil; + c.ptr2 := nil; + w.scope := c; + end; + + match ?this=branch(?w1,?w2) begin + this.syn := w1.syn + w2.syn; + c1 : Context := context(scope_depth(this.scope)); + c2 : Context := context(scope_depth(this.scope)); + c1.ptr1 := this.scope; + c1.ptr2 := c2; + c2.ptr1 := c1; + c2.ptr2 := this.scope; + w1.scope := c1; + w2.scope := c2; + end; + + match ?this=leaf(?n) begin + this.syn := scope_depth(index_scope(this.scope,n)); + end; + + var function index_scope(sc : ContextPtr; i : Integer) : ContextPtr begin + if sc = nil or i = 0 then + result := sc; + else + case sc begin + match context(?) begin + j : Integer := i / 2; + if i = 2 * j then + result := index_scope(sc.ptr1,j); + else + result := index_scope(sc.ptr2,j); + endif; + end; + end; + endif; + end; + + function scope_depth(sc : ContextPtr) : Integer begin + case sc begin + match ?c=context(?n) begin + result := n; + end; + else + result := 0; + end; + end; +end; diff --git a/examples/multi-cycle.aps b/examples/multi-cycle.aps new file mode 100644 index 00000000..bb9a105e --- /dev/null +++ b/examples/multi-cycle.aps @@ -0,0 +1,39 @@ +with "tiny"; +module MULTI_CYCLE[T :: var TINY[]] extends T begin + type IntegerLattice := MAX_LATTICE[Integer](-1000); + + circular attribute Wood.i1 : IntegerLattice; + circular attribute Wood.i2 : IntegerLattice; + + circular attribute Wood.s1 : IntegerLattice; + circular attribute Wood.s2 : IntegerLattice; + + attribute Root.answer : Integer; + + pragma synthesized(answer, s1, s2); + pragma inherited(i1, i2); + + match ?r=root(?w) begin + w.i1 := IntegerLattice$join(0,w.s1); + w.i2 := IntegerLattice$meet(0,w.s2); + r.answer := w.s1 + w.s2; + end; + + match ?w=branch(?x,?y) begin + x.i1 := w.i1; + w.s1 := x.s1; + a : Integer := - w.i1; + x.i2 := y.s1; + y.i1 := IntegerLattice$join(a,x.s2); + y.i2 := w.i2; + w.s2 := IntegerLattice$meet(-y.s1,y.s2); -- simple dep on y.s1 + b : Integer := - w.i2; + end; + + match ?l=leaf(?x) begin + c : Integer := x * 2; + l.s1 := IntegerLattice$join(c, l.i1); + l.s2 := IntegerLattice$meet(-c, l.i2); + end; + +end; diff --git a/examples/nested-cycles.aps b/examples/nested-cycles.aps new file mode 100644 index 00000000..9a5a724c --- /dev/null +++ b/examples/nested-cycles.aps @@ -0,0 +1,103 @@ +with "simple"; +-- Example to demonstrate the need for nested cyclic looping. +-- This example doesn't have any real intuitive meaning. +module NESTED_CYCLES[T :: var SIMPLE[]] extends T begin + + -- Sets of names: + type Names := SET[String]; + type NamesLattice := UNION_LATTICE[String,Names]; + + var collection all_names : Names; + + circular attribute Stmt.stmt_assigned_in : NamesLattice; + circular attribute Stmts.stmts_assigned_in : NamesLattice; + circular attribute Decl.decl_assigned_in : NamesLattice; + circular attribute Decls.decls_assigned_in : NamesLattice; + + circular attribute Stmt.stmt_assigned_out : NamesLattice; + circular attribute Stmts.stmts_assigned_out : NamesLattice; + + attribute Expr.names_used : Names; + attribute Block.outer_names : Names; + + pragma synthesized(names_used, + stmt_assigned_out, + stmts_assigned_out); + + pragma inherited(stmt_assigned_in, + stmts_assigned_in, + decl_assigned_in, + decls_assigned_in, + outer_names); + + type Messages := SET[String]; + var collection msgs : Messages; + + match ?p:Program=program(?b:Block) begin + b.outer_names := Names${}; + end; + + match ?b:Block=block(?ds:Decls,?ss:Stmts) begin + ds.decls_assigned_in := ss.stmts_assigned_out; + -- notice simple (non-monotone) use of b.outer_names + ss.stmts_assigned_in := ss.stmts_assigned_out /\~ b.outer_names; + end; + + match ?:Decls=no_decls() begin + end; + + match ?ds0:Decls=xcons_decls(?ds1:Decls,?d:Decl) begin + ds1.decls_assigned_in := ds0.decls_assigned_in; + d.decl_assigned_in := ds0.decls_assigned_in; + end; + + match ?d:Decl=decl(?id:String,?ty:Type) begin + all_names :> Names${id}; + if id not in d.decl_assigned_in then + msgs :> {id ++ " was not 'assigned.'"}; + endif; + end; + + match ?t:Type=integer_type() begin + end; + + match ?t:Type=string_type() begin + end; + + match ?ss:Stmts=no_stmts() begin + ss.stmts_assigned_out := ss.stmts_assigned_in; + end; + + match ?ss0:Stmts=xcons_stmts(?ss1:Stmts,?s:Stmt) begin + ss1.stmts_assigned_in := ss0.stmts_assigned_in; + s.stmt_assigned_in := ss1.stmts_assigned_out; + ss0.stmts_assigned_out := s.stmt_assigned_out; + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + s.stmt_assigned_out := s.stmt_assigned_in; + b.outer_names := s.stmt_assigned_in; + end; + + match ?s:Stmt=assign_stmt(?e1:Expr,?e2:Expr) begin + circular collection out : NamesLattice; + out :> s.stmt_assigned_in; + -- monotone use of s.stmt_assigned_in in this condition: + if e2.names_used <= s.stmt_assigned_in then + out :> e1.names_used; + endif; + s.stmt_assigned_out := out; + end; + + match ?e:Expr=intconstant(?:Integer) begin + e.names_used := Names${}; + end; + + match ?e:Expr=strconstant(?:String) begin + e.names_used := Names${}; + end; + + match ?e:Expr=variable(?id:String) begin + e.names_used := Names${id}; + end; +end; diff --git a/examples/ntuple-lattice.aps b/examples/ntuple-lattice.aps new file mode 100644 index 00000000..2c9421d2 --- /dev/null +++ b/examples/ntuple-lattice.aps @@ -0,0 +1,34 @@ +with "tiny"; + +module TINY_TUPLE_LATTICE[T :: var TINY[]] extends T begin + + type IntegerSet := SET[Integer]; + type IntegerCircularSet := UNION_LATTICE[Integer, IntegerSet]; + type IntegerCircularSetList := LIST[IntegerCircularSet]; + type NTupleLattice := TUPLE_LATTICE[IntegerCircularSet, IntegerCircularSetList]; + + circular attribute Wood.wood_syn : NTupleLattice; + attribute Root.root_syn : NTupleLattice; + + circular attribute Wood.wood_inh : NTupleLattice; + + pragma inherited(wood_inh); + pragma synthesized(wood_syn, root_syn); + + match ?self=root(?w) begin + w.wood_inh := w.wood_syn; + self.root_syn := w.wood_syn; + end; + + match ?self=branch(?w1,?w2) begin + w1.wood_inh := self.wood_inh; + w2.wood_inh := self.wood_inh; + self.wood_syn := NTupleLattice$join(w1.wood_syn, w2.wood_syn); + end; + + match ?self=leaf(?value) begin + value_list: NTupleLattice := IntegerCircularSetList${ IntegerSet${ value } }; + self.wood_syn := NTupleLattice$join(NTupleLattice$meet(value_list, self.wood_inh), value_list); + end; + +end; diff --git a/examples/nullable.aps b/examples/nullable.aps new file mode 100644 index 00000000..fdd31c5c --- /dev/null +++ b/examples/nullable.aps @@ -0,0 +1,64 @@ +with "symbol"; +with "table"; +with "grammar"; + +module NULLABLE[T :: var GRAMMAR[]] extends T begin + + type NullableTable := TABLE_LATTICE[Symbol, OrLattice]; + var circular collection nullableTable : NullableTable; + + circular attribute Item.item_nullable : OrLattice := false; + pragma synthesized(item_nullable); + + circular attribute Items.items_nullable : OrLattice; + pragma synthesized(items_nullable); + + circular attribute Grammar.grammar_nullable : NullableTable; + pragma synthesized(grammar_nullable); + + epsilon : Symbol := make_symbol("epsilon"); + + match ?self:Item=terminal(?s:Symbol) begin + self.item_nullable := s = epsilon; + end; + + match ?self:Item=nonterminal(?s:Symbol) begin + case NullableTable$select(nullableTable, s) begin + match NullableTable$table_entry(?,?item_nullable_boolean) begin + self.item_nullable := item_nullable_boolean; + end; + end; + end; + + match ?self:Production=prod(?nt:Symbol, ?items: Items) begin + nullableTable :> NullableTable$table_entry(nt, items.items_nullable); + end; + + match ?self:Grammar=grammar(?prods: Productions) begin + self.grammar_nullable := nullableTable; + end; + + match ?self : Items = Items$none() begin + self.items_nullable := true; + end; + + match ?self : Items = Items$single(?item : Item) begin + self.items_nullable := item.item_nullable; + end; + + match ?self : Items = Items$append(?items1,?items2 : Items) begin + self.items_nullable := items1.items_nullable and items2.items_nullable; + end; + + match ?self : Productions = Productions$none() begin + + end; + + match ?self : Productions = Productions$single(?stmt : Production) begin + + end; + + match ?self : Productions = Productions$append(?stmts1,?stmts2 : Productions) begin + + end; +end; diff --git a/examples/remote-binding.aps b/examples/remote-binding.aps new file mode 100644 index 00000000..a774ec7b --- /dev/null +++ b/examples/remote-binding.aps @@ -0,0 +1,151 @@ +with "simple"; +-- Example from fiber paper. +-- using remote attributes rather than objects. +module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin + + --- the local object types and their + --- "fields" (attributes in APS). + + type Scope := remote Block; + type Entities := BAG[EntityRef]; + collection attribute Block.entities : Entities; + + pragma synthesized(entities); + + -- entities: AKA declarations + type EntityRef := remote Decl; + collection attribute Decl.decl_used : OrLattice; + + pragma synthesized(decl_used); + + var function entity_shape(e : EntityRef) : Shape begin + case e begin + match decl(?, ?ty) begin + result := ty.type_shape; + end; + end; + end; + + -- shapes: used just for object identity + phylum ShapeStructure; + constructor shape(name : String) : ShapeStructure; + type Shape := remote ShapeStructure; + + --- the globals + type Messages := BAG[String]; + var collection msgs : Messages; + + root_scope : Scope := nil; + + not_found : EntityRef := nil; + + int_shape : Shape := shape("integer"); + str_shape : Shape := shape("string"); + no_shape : Shape := shape(""); + + + --- Attributes on "nonterminals" (i.e. imported and fixed phyla) + + -- don't use polymorphism: + attribute Block.block_scope : Scope; + attribute Decls.decls_scope : Scope; + attribute Decl.decl_scope : Scope; + attribute Stmts.stmts_scope : Scope; + attribute Stmt.stmt_scope : Scope; + attribute Expr.expr_scope : Scope; + + attribute Type.type_shape : Shape; + attribute Expr.expr_shape : Shape; + + pragma inherited(block_scope,decls_scope,decl_scope, + stmts_scope,stmt_scope,expr_scope); + pragma synthesized(type_shape,expr_shape); + + match ?p:Program=program(?b:Block) begin + b.block_scope := root_scope; + end; + + match ?b:Block=block(?ds:Decls,?ss:Stmts) begin + ds.decls_scope := b; + ss.stmts_scope := b; + end; + + match ?:Decls=no_decls() begin + end; + + match ?ds0:Decls=xcons_decls(?ds1:Decls,?d:Decl) begin + ds1.decls_scope := ds0.decls_scope; + d.decl_scope := ds0.decls_scope; + end; + + match ?d:Decl=decl(?id:String,?ty:Type) begin + d.decl_scope.entities :> {d}; + if not d.decl_used then + msgs :> {id ++ " is unused"}; + endif; + end; + + match ?t:Type=integer_type() begin + t.type_shape := int_shape; + end; + + match ?t:Type=string_type() begin + t.type_shape := str_shape; + end; + + match ?:Stmts=no_stmts() begin + end; + + match ?ss0:Stmts=xcons_stmts(?ss1:Stmts,?s:Stmt) begin + ss1.stmts_scope := ss0.stmts_scope; + s.stmt_scope := ss0.stmts_scope; + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + b.block_scope := s.stmt_scope; + end; + + match ?s:Stmt=assign_stmt(?e1:Expr,?e2:Expr) begin + e1.expr_scope := s.stmt_scope; + e2.expr_scope := s.stmt_scope; + if e1.expr_shape /= e2.expr_shape then + msgs :> {"type mismatch"}; + endif; + end; + + match ?e:Expr=intconstant(?:Integer) begin + e.expr_shape := int_shape; + end; + + match ?e:Expr=strconstant(?:String) begin + e.expr_shape := str_shape; + end; + + match ?e:Expr=variable(?id:String) begin + decl : EntityRef; + decl := lookup(id,e.expr_scope); + if decl = not_found then + msgs :> {id ++ " not declared"}; + e.expr_shape := no_shape; + else + decl.decl_used :> true; + e.expr_shape := decl.entity_shape; + endif; + end; + + var function lookup(name : String; scope : Scope) : EntityRef begin + if scope = root_scope then + result := not_found; + else + case scope.entities begin + match {...,?e:EntityRef=decl(?n:String if n=name,?:Type),...} begin + result := e; + end; + else + enc : Scope; + enc := scope.block_scope; + result := lookup(name,enc); + end; + endif; + end; +end; diff --git a/examples/scala/.gitignore b/examples/scala/.gitignore new file mode 100644 index 00000000..0ac9f57a --- /dev/null +++ b/examples/scala/.gitignore @@ -0,0 +1,11 @@ +*.scala +!*-driver.scala +!*.handcode.scala +!tests/ +!*.lex.scala +!*.y.scala +*.lex +*.y +*.output +!*Base.scala +*.scala~ diff --git a/examples/scala/FarrowLv.lex.scala b/examples/scala/FarrowLv.lex.scala new file mode 100644 index 00000000..e5f0d687 --- /dev/null +++ b/examples/scala/FarrowLv.lex.scala @@ -0,0 +1,47 @@ +%% + +%class FarrowLvScanner +%type FarrowLvTokens.YYToken +%implements Iterator[FarrowLvTokens.YYToken] + +%line + +%{ + var lookahead : FarrowLvTokens.YYToken = null; + + override def hasNext() : Boolean = { + if (null == lookahead) lookahead = yylex(); + lookahead match { + case x:FarrowLvTokens.YYEOF => false; + case x:FarrowLvTokens.YYToken => true; + } + }; + + override def next() : FarrowLvTokens.YYToken = { + if (null == lookahead) lookahead = yylex(); + var result : FarrowLvTokens.YYToken = lookahead; + lookahead = null; + result + }; + + def getLineNumber() : Int = yyline+1; + + def YYCHAR(s : String) = FarrowLvTokens.YYCHAR(s.charAt(0)); + def ID(s : String) = FarrowLvTokens.ID(Symbol(s)); + def LITERAL(s : String) = FarrowLvTokens.LITERAL(Symbol(s)); + def EQ = FarrowLvTokens.EQ(); + def EQEQ = FarrowLvTokens.EQEQ(); + def NEQ = FarrowLvTokens.NEQ(); + def LT = FarrowLvTokens.LT(); + def SEMICOLON = FarrowLvTokens.SEMICOLON(); + def WHILE = FarrowLvTokens.WHILE(); + def IF = FarrowLvTokens.IF(); + def THEN = FarrowLvTokens.THEN(); + def ELSE = FarrowLvTokens.ELSE(); + def DO = FarrowLvTokens.DO(); + def END = FarrowLvTokens.END(); + def PLUS = FarrowLvTokens.PLUS(); + def MINUS = FarrowLvTokens.MINUS(); + def YYEOFT = FarrowLvTokens.YYEOF(); + +%} diff --git a/examples/scala/FarrowLv.y.scala b/examples/scala/FarrowLv.y.scala new file mode 100644 index 00000000..53c97cd6 --- /dev/null +++ b/examples/scala/FarrowLv.y.scala @@ -0,0 +1,25 @@ +var scanner : FarrowLvScanner = null; +var filename : String = ""; +var result : Program = null; + +def get_result() : Program = result; + +def reset(sc : FarrowLvScanner, fn : String) : Unit = { + filename = fn; + scanner = sc; + yyreset(sc) +}; + +override def get_line_number() : Int = scanner.getLineNumber(); + +/* This function is called automatically when Bison detects a parse error. */ +def yyerror(message : String) : Unit = { + print (filename + ":" + scanner.getLineNumber() + ": " + + message + ", at or near " + yycur + "\n"); +}; + +override def program(ss:Stmts) : Program = { + var res = super.program(ss); + result = res; + res +} diff --git a/examples/scala/FarrowLvParserBase.scala b/examples/scala/FarrowLvParserBase.scala new file mode 100644 index 00000000..b241275f --- /dev/null +++ b/examples/scala/FarrowLvParserBase.scala @@ -0,0 +1,96 @@ +class FarrowLvParserBase { + def get_line_number() : Int = 0; + + def set_node_numbers() : Unit = { + PARSE.lineNumber = get_line_number() + }; + + object m_Tree extends M_FARROW_LV_TREE("FarrowLvTree") {}; + val t_Tree = m_Tree.t_Result; + type T_Tree = m_Tree.T_Result; + + def getTree() : M_FARROW_LV_TREE = m_Tree; + + type Stmt = t_Tree.T_Stmt; + type Stmts = t_Tree.T_Stmts; + type Expression = t_Tree.T_Expression; + type Program = t_Tree.T_Program; + + def program(ss : Stmts) : Program = { + set_node_numbers(); + var n = t_Tree.v_program(ss); + n + }; + + def stmt_assign(s: Symbol, e: Expression) : Stmt = { + set_node_numbers(); + var n = t_Tree.v_stmt_assign(s, e); + n + }; + + def stmt_if(e: Expression, s1: Stmts, s2: Stmts) : Stmt = { + set_node_numbers(); + var n = t_Tree.v_stmt_if(e, s1, s2); + n + }; + + def stmt_while(e: Expression, s: Stmts) : Stmt = { + set_node_numbers(); + var n = t_Tree.v_stmt_while(e, s); + n + }; + + def stmts_append(s: Stmt, ss: Stmts) : Stmts = { + set_node_numbers(); + var n = t_Tree.v_stmts_append(s, ss); + n + }; + + def stmts_empty() : Stmts = { + set_node_numbers(); + var n = t_Tree.v_stmts_empty(); + n + }; + + def expr_var(s: Symbol) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_var(s); + n + }; + + def expr_add(e1: Expression, e2: Expression) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_add(e1, e2); + n + }; + + def expr_subtract(e1: Expression, e2: Expression) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_subtract(e1, e2); + n + }; + + def expr_equals(e1: Expression, e2: Expression) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_equals(e1, e2); + n + }; + + def expr_not_equals(e1: Expression, e2: Expression) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_not_equals(e1, e2); + n + }; + + def expr_less_than(e1: Expression, e2: Expression) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_less_than(e1, e2); + n + }; + + def expr_lit(s: Symbol) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_lit(s); + n + }; +} diff --git a/examples/scala/FarrowUbd.lex.scala b/examples/scala/FarrowUbd.lex.scala new file mode 100644 index 00000000..5bfa13c6 --- /dev/null +++ b/examples/scala/FarrowUbd.lex.scala @@ -0,0 +1,42 @@ +%% + +%class FarrowUbdScanner +%type FarrowUbdTokens.YYToken +%implements Iterator[FarrowUbdTokens.YYToken] + +%line + +%{ + var lookahead : FarrowUbdTokens.YYToken = null; + + override def hasNext() : Boolean = { + if (null == lookahead) lookahead = yylex(); + lookahead match { + case x:FarrowUbdTokens.YYEOF => false; + case x:FarrowUbdTokens.YYToken => true; + } + }; + + override def next() : FarrowUbdTokens.YYToken = { + if (null == lookahead) lookahead = yylex(); + var result : FarrowUbdTokens.YYToken = lookahead; + lookahead = null; + result + }; + + def getLineNumber() : Int = yyline+1; + + def YYCHAR(s : String) = FarrowUbdTokens.YYCHAR(s.charAt(0)); + def ID(s : String) = FarrowUbdTokens.ID(Symbol(s)); + def LITERAL(s : String) = FarrowUbdTokens.LITERAL(Symbol(s)); + def EQ = FarrowUbdTokens.EQ(); + def SEMICOLON = FarrowUbdTokens.SEMICOLON(); + def PLUS = FarrowUbdTokens.PLUS(); + def MINUS = FarrowUbdTokens.MINUS(); + def MUL = FarrowUbdTokens.MUL(); + def DIV = FarrowUbdTokens.DIV(); + def OPN_BRACE = FarrowUbdTokens.OPN_BRACE(); + def CLS_BRACE = FarrowUbdTokens.CLS_BRACE(); + def YYEOFT = FarrowUbdTokens.YYEOF(); + +%} diff --git a/examples/scala/FarrowUbd.y.scala b/examples/scala/FarrowUbd.y.scala new file mode 100644 index 00000000..35013187 --- /dev/null +++ b/examples/scala/FarrowUbd.y.scala @@ -0,0 +1,25 @@ +var scanner : FarrowUbdScanner = null; +var filename : String = ""; +var result : Program = null; + +def get_result() : Program = result; + +def reset(sc : FarrowUbdScanner, fn : String) : Unit = { + filename = fn; + scanner = sc; + yyreset(sc) +}; + +override def get_line_number() : Int = scanner.getLineNumber(); + +/* This function is called automatically when Bison detects a parse error. */ +def yyerror(message : String) : Unit = { + print (filename + ":" + scanner.getLineNumber() + ": " + + message + ", at or near " + yycur + "\n"); +}; + +override def program(ds:Declarations) : Program = { + var res = super.program(ds); + result = res; + res +} diff --git a/examples/scala/FarrowUbdParserBase.scala b/examples/scala/FarrowUbdParserBase.scala new file mode 100644 index 00000000..471ac080 --- /dev/null +++ b/examples/scala/FarrowUbdParserBase.scala @@ -0,0 +1,98 @@ +class FarrowUbdParserBase { + def get_line_number() : Int = 0; + + def set_node_numbers() : Unit = { + PARSE.lineNumber = get_line_number() + }; + + object m_Tree extends M_FARROW_UBD_TREE("FarrowUbdTree") {}; + val t_Tree = m_Tree.t_Result; + type T_Tree = m_Tree.T_Result; + + def getTree() : M_FARROW_UBD_TREE = m_Tree; + + type Declaration = t_Tree.T_Declaration; + type Declarations = t_Tree.T_Declarations; + type Expression = t_Tree.T_Expression; + type Term = t_Tree.T_Term; + type Operation = t_Tree.T_Operation; + type Program = t_Tree.T_Program; + + def program(ds : Declarations) : Program = { + set_node_numbers(); + var n = t_Tree.v_program(ds); + n + }; + + def scope(ds: Declarations) : Declarations = { + set_node_numbers(); + var n = t_Tree.v_scope(ds); + n + }; + + def decl_assign(s: Symbol, e: Expression) : Declaration = { + set_node_numbers(); + var n = t_Tree.v_decl_assign(s, e); + n + }; + + def decls_empty() : Declarations = { + set_node_numbers(); + var n = t_Tree.v_decls_empty(); + n + }; + + def decls_append(ds: Declarations, d: Declaration) : Declarations = { + set_node_numbers(); + var n = t_Tree.v_decls_append(ds, d); + n + }; + + def expr_term(t: Term) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_term(t); + n + }; + + def op_add() : Operation = { + set_node_numbers(); + var n = t_Tree.v_op_add(); + n + }; + + def op_mul() : Operation = { + set_node_numbers(); + var n = t_Tree.v_op_mul(); + n + }; + + def op_sub() : Operation = { + set_node_numbers(); + var n = t_Tree.v_op_sub(); + n + }; + + def op_div() : Operation = { + set_node_numbers(); + var n = t_Tree.v_op_div(); + n + }; + + def expr_apply(e: Expression, op: Operation, t: Term) : Expression = { + set_node_numbers(); + var n = t_Tree.v_expr_apply(e, op, t); + n + }; + + def term_variable(s: Symbol) : Term = { + set_node_numbers(); + var n = t_Tree.v_term_variable(s); + n + }; + + def term_literal(s: Symbol) : Term = { + set_node_numbers(); + var n = t_Tree.v_term_literal(java.lang.Integer.parseInt(s.name)); + n + }; +} diff --git a/examples/scala/Grammar.lex.scala b/examples/scala/Grammar.lex.scala new file mode 100644 index 00000000..dfba1133 --- /dev/null +++ b/examples/scala/Grammar.lex.scala @@ -0,0 +1,37 @@ +%% + +%class GrammarScanner +%type GrammarTokens.YYToken +%implements Iterator[GrammarTokens.YYToken] + +%line + +%{ + var lookahead : GrammarTokens.YYToken = null; + + override def hasNext() : Boolean = { + if (null == lookahead) lookahead = yylex(); + lookahead match { + case x:GrammarTokens.YYEOF => false; + case x:GrammarTokens.YYToken => true; + } + }; + + override def next() : GrammarTokens.YYToken = { + if (null == lookahead) lookahead = yylex(); + var result : GrammarTokens.YYToken = lookahead; + lookahead = null; + result + }; + + def getLineNumber() : Int = yyline+1; + + def YYCHAR(s : String) = GrammarTokens.YYCHAR(s.charAt(0)); + def TERMINAL(s : String) = GrammarTokens.TERMINAL(Symbol(s)); + def NONTERMINAL(s : String) = GrammarTokens.NONTERMINAL(Symbol(s)); + def PIPE = GrammarTokens.PIPE(); + def COLON = GrammarTokens.COLON(); + def SEMICOLON = GrammarTokens.SEMICOLON(); + def YYEOFT = GrammarTokens.YYEOF(); + +%} diff --git a/examples/scala/Grammar.y.scala b/examples/scala/Grammar.y.scala new file mode 100644 index 00000000..9cbfa177 --- /dev/null +++ b/examples/scala/Grammar.y.scala @@ -0,0 +1,25 @@ +var scanner : GrammarScanner = null; +var filename : String = ""; +var result : Grammar = null; + +def get_result() : Grammar = result; + +def reset(sc : GrammarScanner, fn : String) : Unit = { + filename = fn; + scanner = sc; + yyreset(sc) +}; + +override def get_line_number() : Int = scanner.getLineNumber(); + +/* This function is called automatically when Bison detects a parse error. */ +def yyerror(message : String) : Unit = { + print (filename + ":" + scanner.getLineNumber() + ": " + + message + ", at or near " + yycur + "\n"); +}; + +override def grammar(b:Productions) : Grammar = { + var res = super.grammar(b); + result = res; + res +} diff --git a/examples/scala/GrammarParserBase.scala b/examples/scala/GrammarParserBase.scala new file mode 100644 index 00000000..09c87c57 --- /dev/null +++ b/examples/scala/GrammarParserBase.scala @@ -0,0 +1,96 @@ +class GrammarParserBase { + def get_line_number() : Int = 0; + + def set_node_numbers() : Unit = { + PARSE.lineNumber = get_line_number() + }; + + object m_Tree extends M_GRAMMAR("GrammarTree") {}; + val t_Tree = m_Tree.t_Result; + type T_Tree = m_Tree.T_Result; + + def getTree() : M_GRAMMAR = m_Tree; + + type Item = t_Tree.T_Item; + type Items = t_Tree.T_Items; + type Production = t_Tree.T_Production; + type Productions = t_Tree.T_Productions; + type Grammar = t_Tree.T_Grammar; + type ItemsMany = Seq[Items]; + + def grammar(prods : Productions) : Grammar = { + set_node_numbers(); + var n = t_Tree.v_grammar(prods); + n + }; + + def prod(nt: Symbol, rhs: Items) : Production = { + set_node_numbers(); + var n = t_Tree.v_prod(nt, rhs); + n + }; + + def prods(nt: Symbol, rhs_many: Seq[Items]) : Productions = { + set_node_numbers(); + var n = rhs_many + .map(prod(nt, _)) + .map(productions_single(_)) + .fold(productions_none())(productions_append(_, _)); + n + }; + def nonterminal(s: Symbol) : Item = { + set_node_numbers(); + var n = t_Tree.v_nonterminal(s); + n + }; + def terminal(s: Symbol) : Item = { + set_node_numbers(); + var n = t_Tree.v_terminal(s); + n + }; + + def productions_none() : Productions = { + set_node_numbers(); + var n = t_Tree.t_Productions.v_none(); + n + }; + + def productions_single(v1: Production) : Productions = { + set_node_numbers(); + var n = t_Tree.t_Productions.v_single(v1); + n + }; + + def productions_append(v1: Productions, v2: Productions) : Productions = { + set_node_numbers(); + var n = t_Tree.t_Productions.v_append(v1, v2); + n + }; + + def items_none() : Items = { + set_node_numbers(); + var n = t_Tree.t_Items.v_none(); + n + }; + + def items_single(v1: Item) : Items = { + set_node_numbers(); + var n = t_Tree.t_Items.v_single(v1); + n + }; + + def items_append(v1: Items, v2: Items) : Items = { + set_node_numbers(); + var n = t_Tree.t_Items.v_append(v1, v2); + n + }; + + def items_many_single(v1: Items) : ItemsMany = { + Seq(v1) + }; + + def items_many_append(v1: ItemsMany, v2: ItemsMany) : ItemsMany = { + v1 ++ v2 + }; + +} diff --git a/examples/scala/Makefile b/examples/scala/Makefile index 868e3c66..d3358995 100644 --- a/examples/scala/Makefile +++ b/examples/scala/Makefile @@ -1,46 +1,198 @@ APSTOP= ../.. SCALAC= scalac -SCALAV= 2.10 +SCALA= scala +SCALAV= 2.12 APSLIB = ${APSTOP}/lib/aps-library-${SCALAV}.jar SCALAFLAGS= -cp .:${APSLIB} SCALACFLAGS= ${SCALAFLAGS} APS2SCALA = ${APSTOP}/bin/aps2scala APS2SCALAFLAGS = -p ..:${APSTOP}/base -G -SCALAGEN = simple.scala classic-binding.scala \ - test-coll.scala test-use-coll.scala test-cycle.scala +SCALAGEN = simple.scala classic-binding.scala tiny.scala broad-fiber-cycle.scala \ + test-coll.scala test-use-coll.scala test-cycle.scala use-global.scala \ + grammar.scala first.scala follow.scala nullable.scala \ + farrow-lv-tree.scala farrow-lv.static.scala \ + farrow-ubd-tree.scala farrow-ubd.static.scala -.PHONY: all clean +MISCGEN = nested-cycles.scala SimpleParser.scala SimpleScanner.scala \ + GrammarTokens.scala SimpleTokens.scala GrammarScanner.scala GrammarParser.scala \ + FarrowLvTokens.scala FarrowLvTokens.scala FarrowLvScanner.scala FarrowLvParser.scala farrow-lv.scala \ + FarrowUbdTokens.scala FarrowUbdTokens.scala FarrowUbdScanner.scala FarrowUbdParser.scala farrow-ubd.scala \ + farrow-ubd-fiber.scala \ + *.output *.lex *.y *.scala~ + +.PHONY: all clean phony_explicit all : ${SCALAGEN} -all : simple_implicit.class classic_binding_implicit.class Classic.class +all : grammar_implicit.class simple_implicit.class classic_binding_implicit.class Classic.class +all : GrammarScanner.class GrammarTokens.class SimpleScanner.class SimpleTokens.class +all : GrammarParserBase.class GrammarParser.class SimpleParserBase.class SimpleParser.class all : test_coll_implicit.class TestCollDriver.class all : test_cycle_implicit.class TestCycleDriver.class all : test_use_coll_implicit.class TestUseCollDriver.class +all : grammar_implicit.class GrammarDriver.class +all : FarrowLv_implicit.class FarrowLvDriver.class +all : FarrowUbd_implicit.class FarrowUbdDriver.class +all : FarrowUbdFiber_implicit.class FarrowUbdFiberDriver.class +all : nested_cycles_implicit.class NestedCyclesDriver.class +all : broad_fiber_cycle_implicit.class BroadFiberCycleDriver.class .PHONY: run -run: Classic.run TestCollDriver.run TestCycleDriver.run +run: Classic.run TestCollDriver.run TestCycleDriver.run NestedCyclesDriver.run + +phony_explicit: + +%.static.scala : ../%.aps ${APS2SCALA} + ${APS2SCALA} ${APS2SCALAFLAGS} -S -C $* %.scala : ../%.aps ${APS2SCALA} ${APS2SCALA} ${APS2SCALAFLAGS} $* -%.scala : RCS/%.scala,v - co $< - %_implicit.class : %.scala ${SCALAC} ${SCALACFLAGS} $*.scala +FarrowUbd.lex : ../farrow-ubd.lex FarrowUbd.lex.scala + cat FarrowUbd.lex.scala ../farrow-ubd.lex > $@ + +FarrowUbdScanner.scala : FarrowUbd.lex + java -jar ${SCALAFLEX} --scala $< + +FarrowUbd.y : ../farrow-ubd.y FarrowUbd.y.scala + cat $< FarrowUbd.y.scala > $@ + +FarrowUbdParser.scala FarrowUbdTokens.scala : FarrowUbd.y ${SCALABISON} + bison -v FarrowUbd.y + rm FarrowUbd.tab.c + ${SCALA} -classpath ${SCALABISON} edu.uwm.cs.scalabison.RunGenerator FarrowUbd.y + +FarrowUbdScanner.class : FarrowUbdScanner.scala FarrowUbdTokens.class + ${SCALAC} ${SCALACFLAGS} $< + +FarrowUbdParser.class : FarrowUbdParser.scala FarrowUbdScanner.class FarrowUbdParserBase.class + ${SCALAC} ${SCALACFLAGS} $< + +FarrowUbdParserBase.class : FarrowUbdParserBase.scala FarrowUbd_implicit.class + ${SCALAC} ${SCALACFLAGS} $< + +FarrowUbdTokens.class : FarrowUbdTokens.scala + ${SCALAC} ${SCALACFLAGS} $< + +FarrowUbd_implicit.class : farrow-ubd-tree.scala farrow-ubd.scala + ${SCALAC} ${SCALACFLAGS} $^ + +FarrowUbdFiber_implicit.class : farrow-ubd-tree.scala farrow-ubd-fiber.scala + ${SCALAC} ${SCALACFLAGS} $^ + +FarrowUbdDriver.class : FarrowUbd_implicit.class FarrowUbdParser.class + ${SCALAC} ${SCALACFLAGS} farrow-ubd-driver.scala + +FarrowUbdFiberDriver.class : FarrowUbdFiber_implicit.class FarrowUbdParser.class + ${SCALAC} ${SCALACFLAGS} farrow-ubd-fiber-driver.scala + +FarrowLv.lex : ../farrow-lv.lex FarrowLv.lex.scala + cat FarrowLv.lex.scala ../farrow-lv.lex > $@ + +FarrowLvScanner.scala : FarrowLv.lex + java -jar ${SCALAFLEX} --scala $< + +FarrowLv.y : ../farrow-lv.y FarrowLv.y.scala + cat $< FarrowLv.y.scala > $@ + +FarrowLvParser.scala FarrowLvTokens.scala : FarrowLv.y ${SCALABISON} + bison -v FarrowLv.y + rm FarrowLv.tab.c + ${SCALA} -classpath ${SCALABISON} edu.uwm.cs.scalabison.RunGenerator FarrowLv.y + +FarrowLvScanner.class : FarrowLvScanner.scala FarrowLvTokens.class + ${SCALAC} ${SCALACFLAGS} $< + +FarrowLvParser.class : FarrowLvParser.scala FarrowLvScanner.class FarrowLvParserBase.class + ${SCALAC} ${SCALACFLAGS} $< + +FarrowLvParserBase.class : FarrowLvParserBase.scala FarrowLv_implicit.class + ${SCALAC} ${SCALACFLAGS} $< + +FarrowLvTokens.class : FarrowLvTokens.scala + ${SCALAC} ${SCALACFLAGS} $< + +FarrowLv_implicit.class : farrow-lv-tree.scala farrow-lv.scala + ${SCALAC} ${SCALACFLAGS} $^ + +FarrowLvDriver.class : FarrowLv_implicit.class FarrowLvParser.class + ${SCALAC} ${SCALACFLAGS} farrow-lv-driver.scala + +Grammar.lex : ../grammar.lex Grammar.lex.scala + cat Grammar.lex.scala ../grammar.lex > $@ + +GrammarScanner.scala : Grammar.lex + java -jar ${SCALAFLEX} --scala $< + +Grammar.y : ../grammar.y Grammar.y.scala + cat $< Grammar.y.scala > $@ + +GrammarParser.scala GrammarTokens.scala : Grammar.y ${SCALABISON} + bison -v Grammar.y + rm Grammar.tab.c + ${SCALA} -classpath ${SCALABISON} edu.uwm.cs.scalabison.RunGenerator Grammar.y + +GrammarScanner.class : GrammarScanner.scala GrammarTokens.class + ${SCALAC} ${SCALACFLAGS} $< + +GrammarParser.class : GrammarParser.scala GrammarScanner.class GrammarParserBase.class + ${SCALAC} ${SCALACFLAGS} $< + +GrammarParserBase.class : GrammarParserBase.scala grammar_implicit.class + ${SCALAC} ${SCALACFLAGS} $< + +GrammarTokens.class : GrammarTokens.scala + ${SCALAC} ${SCALACFLAGS} $< + +Simple.lex : ../simple.lex Simple.lex.scala + cat Simple.lex.scala ../simple.lex > $@ + +SimpleScanner.scala : Simple.lex + java -jar ${SCALAFLEX} --scala $< + +Simple.y : ../simple.y Simple.y.scala + cat $< Simple.y.scala > $@ + +SimpleParser.scala SimpleTokens.scala : Simple.y ${SCALABISON} + bison -v Simple.y + rm Simple.tab.c + ${SCALA} -classpath ${SCALABISON} edu.uwm.cs.scalabison.RunGenerator Simple.y + +SimpleScanner.class : SimpleScanner.scala SimpleTokens.class + ${SCALAC} ${SCALACFLAGS} $< + +SimpleParser.class : SimpleParser.scala SimpleParserBase.class SimpleScanner.class + ${SCALAC} ${SCALACFLAGS} $< + +SimpleParserBase.class : SimpleParserBase.scala simple_implicit.class + ${SCALAC} ${SCALACFLAGS} $< + +SimpleTokens.class : SimpleTokens.scala + ${SCALAC} ${SCALACFLAGS} $< + +nested-cycles.class: nested-cycles.static.scala simple_implicit.class + ${SCALAC} ${SCALACFLAGS} nested-cycles.scala + +.PHONY: nested_cycles_implicit.class +nested_cycles_implicit.class : nested-cycles.class simple_implicit.class ; + +NestedCyclesDriver.class: nested_cycles_implicit.class SimpleParser.class + ${SCALAC} ${SCALACFLAGS} nested-cycles-driver.scala + classic_binding_implicit.class : classic-binding.scala ${SCALAC} ${SCALACFLAGS} $< -Classic.class : classic-driver.scala +Classic.class : classic-driver.scala SimpleParser.class ${SCALAC} ${SCALACFLAGS} $< Classic.class classic_binding_implicit.class : simple_implicit.class Classic.class : classic_binding_implicit.class -test_coll_implicit.class : test-coll.scala +test_coll_implicit.class : test-coll.scala tiny_implicit.class ${SCALAC} ${SCALACFLAGS} $< test_use_coll_implicit.class : test-use-coll.scala @@ -58,11 +210,35 @@ test_cycle_implicit.class : test-cycle.scala TestCycleDriver.class : test_cycle_implicit.class test-cycle-driver.scala ${SCALAC} ${SCALACFLAGS} test-cycle-driver.scala +grammar_implicit.class : grammar.scala first.scala follow.scala nullable.scala + ${SCALAC} ${SCALACFLAGS} $^ + +GrammarDriver.class : grammar_implicit.class GrammarParser.class + ${SCALAC} ${SCALACFLAGS} grammar-driver.scala + +TinyParser.class : tiny-parser.handcode.scala tiny_implicit.class + ${SCALAC} ${SCALACFLAGS} tiny-parser.handcode.scala + +use_global_implicit.class : use-global.scala tiny_implicit.class + ${SCALAC} ${SCALACFLAGS} use-global.scala + +UseGlobal.class : use_global_implicit.class TinyParser.class +UseGlobal.class : use-global-driver.scala + ${SCALAC} ${SCALACFLAGS} $< + +broad_fiber_cycle_implicit.class : tiny_implicit.class +broad_fiber_cycle_implicit.class : broad-fiber-cycle.scala + ${SCALAC} ${SCALACFLAGS} $< + +BroadFiberCycleDriver.class : broad_fiber_cycle_implicit.class tiny_implicit.class +BroadFiberCycleDriver.class : broad-fiber-cycle-driver.scala + ${SCALAC} ${SCALACFLAGS} $< + .PHONY: %.run %.run : %.class - scala ${SCALAFLAGS} $* + scala ${SCALAFLAGS} $* ${ARGS} clean: - rm -f *.class ${SCALAGEN} + rm -f *.class ${SCALAGEN} ${MISCGEN} diff --git a/examples/scala/README b/examples/scala/README new file mode 100644 index 00000000..c5e80b80 --- /dev/null +++ b/examples/scala/README @@ -0,0 +1,4 @@ +make ARGS="farrow-lv.program" FarrowLvDriver.run +make ARGS="farrow-ubd.program" FarrowUbdDriver.run +make ARGS="farrow-ubd.program" FarrowUbdFiberDriver.run +make ARGS="grammar.cfg" GrammarDriver.run diff --git a/examples/scala/Simple.lex.scala b/examples/scala/Simple.lex.scala new file mode 100644 index 00000000..faccbd86 --- /dev/null +++ b/examples/scala/Simple.lex.scala @@ -0,0 +1,37 @@ +%% + +%class SimpleScanner +%type SimpleTokens.YYToken +%implements Iterator[SimpleTokens.YYToken] + +%line + +%{ + var lookahead : SimpleTokens.YYToken = null; + + override def hasNext() : Boolean = { + if (null == lookahead) lookahead = yylex(); + lookahead match { + case x:SimpleTokens.YYEOF => false; + case x:SimpleTokens.YYToken => true; + } + }; + + override def next() : SimpleTokens.YYToken = { + if (null == lookahead) lookahead = yylex(); + var result : SimpleTokens.YYToken = lookahead; + lookahead = null; + result + }; + + def getLineNumber() : Int = yyline+1; + + def YYCHAR(s : String) = SimpleTokens.YYCHAR(s.charAt(0)); + def INT = SimpleTokens.INT(); + def STRING = SimpleTokens.STRING(); + def ID(s : String) = SimpleTokens.ID(s); + def INT_LITERAL(i:String) = SimpleTokens.INT_LITERAL(Integer.parseInt(i)); + def STR_LITERAL(s:String) = SimpleTokens.STR_LITERAL(s); + def YYEOFT = SimpleTokens.YYEOF(); + +%} diff --git a/examples/scala/Simple.y.scala b/examples/scala/Simple.y.scala new file mode 100644 index 00000000..b93e36f3 --- /dev/null +++ b/examples/scala/Simple.y.scala @@ -0,0 +1,25 @@ +var scanner : SimpleScanner = null; +var filename : String = ""; +var result : Program = null; + +def get_result() : Program = result; + +def reset(sc : SimpleScanner, fn : String) : Unit = { + filename = fn; + scanner = sc; + yyreset(sc) +}; + +override def get_line_number() : Int = scanner.getLineNumber(); + +/* This function is called automatically when Bison detects a parse error. */ +def yyerror(message : String) : Unit = { + print (filename + ":" + scanner.getLineNumber() + ": " + + message + ", at or near " + yycur + "\n"); +}; + +override def program(b:Block) : Program = { + var res = super.program(b); + result = res; + res +} diff --git a/examples/scala/SimpleParserBase.scala b/examples/scala/SimpleParserBase.scala new file mode 100644 index 00000000..ab51e4db --- /dev/null +++ b/examples/scala/SimpleParserBase.scala @@ -0,0 +1,95 @@ +class SimpleParserBase { + def get_line_number() : Int = 0; + + def set_node_numbers() : Unit = { + PARSE.lineNumber = get_line_number() + }; + + object m_Tree extends M_SIMPLE("SimpleTree") {}; + val t_Tree = m_Tree.t_Result; + type T_Tree = m_Tree.T_Result; + + def getTree() : M_SIMPLE = m_Tree; + + type Program = t_Tree.T_Program; + type Block = t_Tree.T_Block; + type Decl = t_Tree.T_Decl; + type Decls = t_Tree.T_Decls; + type Type = t_Tree.T_Type; + type Stmt = t_Tree.T_Stmt; + type Stmts = t_Tree.T_Stmts; + type Expr = t_Tree.T_Expr; + + def program(b : Block) : Program = { + set_node_numbers(); + var n = t_Tree.v_program(b); + n + }; + + def block(ds: Decls, ss : Stmts) : Block = { + set_node_numbers(); + var n = t_Tree.v_block(ds,ss); + n + }; + def no_decls() : Decls = { + set_node_numbers(); + var n = t_Tree.v_no_decls(); + n + }; + def xcons_decls(ds : Decls, d : Decl) : Decls = { + set_node_numbers(); + var n = t_Tree.v_xcons_decls(ds,d); + n + }; + def decl(id : String, ty : Type) : Decl = { + set_node_numbers(); + var n = t_Tree.v_decl(id,ty); + n + }; + def integer_type() : Type = { + set_node_numbers(); + var n = t_Tree.v_integer_type(); + n + }; + def string_type() : Type = { + set_node_numbers(); + var n = t_Tree.v_string_type(); + n + }; + def no_stmts() : Stmts = { + set_node_numbers(); + var n = t_Tree.v_no_stmts(); + n + }; + def xcons_stmts(ss : Stmts, s : Stmt) : Stmts = { + set_node_numbers(); + var n = t_Tree.v_xcons_stmts(ss,s); + n + }; + def block_stmt(block : Block) : Stmt = { + set_node_numbers(); + var n = t_Tree.v_block_stmt(block); + n + }; + def assign_stmt(e1:Expr, e2 : Expr) : Stmt = { + set_node_numbers(); + var n = t_Tree.v_assign_stmt(e1,e2); + n + }; + def intconstant(i : Integer) : Expr = { + set_node_numbers(); + var n = t_Tree.v_intconstant(i); + n + }; + def strconstant(s : String) : Expr = { + set_node_numbers(); + var n = t_Tree.v_strconstant(s); + n + }; + def variable(id : String) : Expr = { + set_node_numbers(); + var n = t_Tree.v_variable(id); + n + }; + +} diff --git a/examples/scala/broad-fiber-cycle-driver.scala b/examples/scala/broad-fiber-cycle-driver.scala new file mode 100644 index 00000000..2e224ff2 --- /dev/null +++ b/examples/scala/broad-fiber-cycle-driver.scala @@ -0,0 +1,19 @@ +object BroadFiberCycleDriver extends App +{ + val m = new M_TINY("Tiny"); + type T_Tiny = m.T_Result; + val t_Tiny = m.t_Result; + + val w = t_Tiny.v_root(t_Tiny.v_branch(t_Tiny.v_leaf(3),t_Tiny.v_leaf(4))); + + Debug.activate(); + + m.finish(); + + val m2 = new M_BROAD_FIBER_CYCLE("Test broad",m); + val w2 = w.asInstanceOf[m2.T_Root]; + + m2.finish(); + + println("answer is " + m2.v_answer); +} diff --git a/examples/scala/classic-driver.scala b/examples/scala/classic-driver.scala index 45326b39..25de7038 100644 --- a/examples/scala/classic-driver.scala +++ b/examples/scala/classic-driver.scala @@ -1,25 +1,43 @@ /* Driver to run classic driver code */ -object Classic extends Application { - val m_simple = new M_SIMPLE("Simple"); +object Classic extends App { + var simple_tree : M_SIMPLE = null; + var p: Any = _; + if (args.length == 0) { + simple_tree = new M_SIMPLE("Simple"); + val t_simple = simple_tree.t_Result; + val ds = + t_simple.v_xcons_decls(t_simple.v_no_decls(), + t_simple.v_decl("x",t_simple.v_integer_type())); + val s = + t_simple.v_assign_stmt(t_simple.v_variable("x"), + t_simple.v_variable("y")); + val ss = t_simple.v_xcons_stmts(t_simple.v_no_stmts(),s); + p = t_simple.v_program(t_simple.v_block(ds,ss)); + } else { + val ss = new SimpleScanner(new java.io.FileReader(args(0))); + val sp = new SimpleParser(); + sp.reset(ss, args(0)); + if (!sp.yyparse()) { + println("Errors found.\n"); + System.exit(1); + } + simple_tree = sp.getTree(); + p = simple_tree.t_Program; + } + + val m_simple = simple_tree; val m_binding = new M_NAME_RESOLUTION[m_simple.T_Result]("Binding",m_simple.t_Result); val t_binding = m_binding.t_Result; - val t_simple = t_binding; // = m_simple.t_Result; // scala type problems - val ds = - t_simple.v_xcons_decls(t_simple.v_no_decls(), - t_simple.v_decl("x",t_simple.v_integer_type())); - val s = - t_simple.v_assign_stmt(t_simple.v_variable("x"),t_simple.v_variable("y")); - val ss = t_simple.v_xcons_stmts(t_simple.v_no_stmts(),s); - - val p = t_simple.v_program(t_simple.v_block(ds,ss)); Debug.activate(); m_simple.finish(); m_binding.finish(); println("Messages:"); - for (m <- t_binding.v_prog_msgs(p)) { + + var program: t_binding.T_Program = p.asInstanceOf[t_binding.T_Program]; + for (m <- t_binding.v_prog_msgs(program)) { println(m); } } diff --git a/examples/scala/farrow-lv-driver.scala b/examples/scala/farrow-lv-driver.scala new file mode 100644 index 00000000..8beb6e22 --- /dev/null +++ b/examples/scala/farrow-lv-driver.scala @@ -0,0 +1,24 @@ +object FarrowLvDriver extends App { + + val symb = new M_SYMBOL("Symbol") + symb.finish() + + val ss = new FarrowLvScanner(new java.io.FileReader(args(0))); + val sp = new FarrowLvParser(); + sp.reset(ss, args(0)); + if (!sp.yyparse()) { + println("Errors found.\n"); + System.exit(1); + } + var farrow_lv_tree = sp.getTree(); + val p = farrow_lv_tree.t_Program; + + val m_farrow_lv_tree = farrow_lv_tree; + val m_farrow_lv = new M_FARROW_LV[m_farrow_lv_tree.T_Result]("FarrowLv", m_farrow_lv_tree.t_Result); + + Debug.activate(); + + m_farrow_lv_tree.finish(); + m_farrow_lv.finish(); + +} diff --git a/examples/scala/farrow-lv.program b/examples/scala/farrow-lv.program new file mode 100644 index 00000000..4c93fdc8 --- /dev/null +++ b/examples/scala/farrow-lv.program @@ -0,0 +1,10 @@ +// Euclidean algorithm (GCD) +x = 10; +y = 50; +WHILE x != y DO + IF b < a THEN + a = a - b; + ELSE + b = b - a; + END; +END; diff --git a/examples/scala/farrow-ubd-driver.scala b/examples/scala/farrow-ubd-driver.scala new file mode 100644 index 00000000..bb055eb6 --- /dev/null +++ b/examples/scala/farrow-ubd-driver.scala @@ -0,0 +1,24 @@ +object FarrowUbdDriver extends App { + + val symb = new M_SYMBOL("Symbol") + symb.finish() + + val ss = new FarrowUbdScanner(new java.io.FileReader(args(0))); + val sp = new FarrowUbdParser(); + sp.reset(ss, args(0)); + if (!sp.yyparse()) { + println("Errors found.\n"); + System.exit(1); + } + var farrow_ubd_tree = sp.getTree(); + val p = farrow_ubd_tree.t_Program; + + val m_farrow_ubd_tree = farrow_ubd_tree; + val m_farrow_ubd = new M_FARROW_UBD[m_farrow_ubd_tree.T_Result]("FarrowUbd", m_farrow_ubd_tree.t_Result); + + Debug.activate(); + + m_farrow_ubd_tree.finish(); + m_farrow_ubd.finish(); + +} diff --git a/examples/scala/farrow-ubd-fiber-driver.scala b/examples/scala/farrow-ubd-fiber-driver.scala new file mode 100644 index 00000000..0f29de7f --- /dev/null +++ b/examples/scala/farrow-ubd-fiber-driver.scala @@ -0,0 +1,24 @@ +object FarrowUbdFiberDriver extends App { + + val symb = new M_SYMBOL("Symbol") + symb.finish() + + val ss = new FarrowUbdScanner(new java.io.FileReader(args(0))); + val sp = new FarrowUbdParser(); + sp.reset(ss, args(0)); + if (!sp.yyparse()) { + println("Errors found.\n"); + System.exit(1); + } + var farrow_ubd_tree = sp.getTree(); + val p = farrow_ubd_tree.t_Program; + + val m_farrow_ubd_tree = farrow_ubd_tree; + val m_farrow_ubd = new M_FARROW_UBD_FIBER[m_farrow_ubd_tree.T_Result]("FarrowUbdFiber", m_farrow_ubd_tree.t_Result); + + Debug.activate(); + + m_farrow_ubd_tree.finish(); + m_farrow_ubd.finish(); + +} diff --git a/examples/scala/farrow-ubd.program b/examples/scala/farrow-ubd.program new file mode 100644 index 00000000..f62eeb03 --- /dev/null +++ b/examples/scala/farrow-ubd.program @@ -0,0 +1,5 @@ +// Validating Farrow's use-before-declaration +x = 10; +y = x + 20; +x = z / 30; +z = 4 + unkown; diff --git a/examples/scala/grammar-driver.scala b/examples/scala/grammar-driver.scala new file mode 100644 index 00000000..c4185215 --- /dev/null +++ b/examples/scala/grammar-driver.scala @@ -0,0 +1,28 @@ +object GrammarDriver extends App { + + val symb = new M_SYMBOL("Symbol") + symb.finish() + + val ss = new GrammarScanner(new java.io.FileReader(args(0))); + val sp = new GrammarParser(); + sp.reset(ss, args(0)); + if (!sp.yyparse()) { + println("Errors found.\n"); + System.exit(1); + } + var grammar_tree = sp.getTree(); + val p = grammar_tree.t_Grammar; + + val m_grammar = grammar_tree; + val m_first = new M_FIRST[m_grammar.T_Result]("First", m_grammar.t_Result); + val m_follow = new M_FOLLOW[m_grammar.T_Result]("Follow", m_grammar.t_Result); + val m_nullable = new M_NULLABLE[m_grammar.T_Result]("Nullable", m_grammar.t_Result); + + Debug.activate(); + + m_grammar.finish(); + m_first.finish(); + m_follow.finish(); + m_nullable.finish(); + +} diff --git a/examples/scala/grammar.cfg b/examples/scala/grammar.cfg new file mode 100644 index 00000000..e0b14423 --- /dev/null +++ b/examples/scala/grammar.cfg @@ -0,0 +1,6 @@ +// Simple made up grammar +Z : Y X ; +Y : y ; +X : x + | Z z + | ; diff --git a/examples/scala/nested-cycles-driver.scala b/examples/scala/nested-cycles-driver.scala new file mode 100644 index 00000000..d93a21db --- /dev/null +++ b/examples/scala/nested-cycles-driver.scala @@ -0,0 +1,41 @@ +/* Driver to run nested cycles driver code */ + +object NestedCyclesDriver extends App { + var simple_tree : M_SIMPLE = null; + var p: Any = _; + if (args.length == 0) { + simple_tree = new M_SIMPLE("Simple"); + val t_simple = simple_tree.t_Result; + val ds = + t_simple.v_xcons_decls(t_simple.v_no_decls(), + t_simple.v_decl("x",t_simple.v_integer_type())); + val s = + t_simple.v_assign_stmt(t_simple.v_variable("x"), + t_simple.v_variable("y")); + val ss = t_simple.v_xcons_stmts(t_simple.v_no_stmts(),s); + p = t_simple.v_program(t_simple.v_block(ds,ss)); + } else { + val ss = new SimpleScanner(new java.io.FileReader(args(0))); + val sp = new SimpleParser(); + sp.reset(ss, args(0)); + if (!sp.yyparse()) { + println("Errors found.\n"); + System.exit(1); + } + simple_tree = sp.getTree(); + p = simple_tree.t_Program; + } + + val m_simple = simple_tree; + val m_binding = new M_NESTED_CYCLES[m_simple.T_Result]("NestedCycles",m_simple.t_Result); + val t_binding = m_binding.t_Result; + + Debug.activate(); + + m_simple.finish(); + m_binding.finish(); + println("Messages:"); + for (i <- t_binding.v_msgs) { + println(i) + } +} diff --git a/examples/scala/test-coll-driver.scala b/examples/scala/test-coll-driver.scala index 74201b47..8f0156cc 100644 --- a/examples/scala/test-coll-driver.scala +++ b/examples/scala/test-coll-driver.scala @@ -1,15 +1,21 @@ -object TestCollDriver extends Application +object TestCollDriver extends App { - val m = new M_TEST_COLL("Tiny"); + val m = new M_TINY("Tiny"); type T_Tiny = m.T_Result; val t_Tiny = m.t_Result; - val w = t_Tiny.v_branch(t_Tiny.v_leaf(3),t_Tiny.v_leaf(4)); + val w = t_Tiny.v_root(t_Tiny.v_branch(t_Tiny.v_leaf(3),t_Tiny.v_leaf(4))); Debug.activate(); m.finish(); - println("sum is " + t_Tiny.v_sum); - println("leaves is " + t_Tiny.v_leaves); + val m2 = new M_TEST_COLL("Test Coll",m); + val w2 = w.asInstanceOf[m2.T_Root]; + + m2.finish(); + + println("sum is " + m2.v_sum); + println("leaves is " + m2.v_leaves); + println("result is " + m2.v_result(w2)); } diff --git a/examples/scala/test-cycle-driver.scala b/examples/scala/test-cycle-driver.scala index 5d1358d4..739bdfc7 100644 --- a/examples/scala/test-cycle-driver.scala +++ b/examples/scala/test-cycle-driver.scala @@ -1,6 +1,6 @@ -object TestCycleDriver extends Application +object TestCycleDriver extends App { - val m = new M_TEST_CYCLE("Tiny"); + val m = new M_TINY("Tiny"); type T_Tiny = m.T_Result; val t_Tiny = m.t_Result; @@ -10,5 +10,10 @@ object TestCycleDriver extends Application m.finish(); - println("leaves is " + t_Tiny.v_leaves); + val m2 = new M_TEST_CYCLE("Test Cycle",m); + val w2 = w.asInstanceOf[m2.T_Root]; + + m2.finish(); + + println("leaves is " + m2.v_leaves); } diff --git a/examples/scala/test-use-coll-driver.scala b/examples/scala/test-use-coll-driver.scala new file mode 100644 index 00000000..c502123b --- /dev/null +++ b/examples/scala/test-use-coll-driver.scala @@ -0,0 +1,21 @@ +object TestUseCollDriver extends App +{ + val m = new M_TINY("Tiny"); + type T_Tiny = m.T_Result; + val t_Tiny = m.t_Result; + + val w = t_Tiny.v_root(t_Tiny.v_branch(t_Tiny.v_leaf(3),t_Tiny.v_leaf(4))); + + Debug.activate(); + + m.finish(); + + val m2 = new M_TEST_USE_COLL("Test Use Coll",m); + val w2 = w.asInstanceOf[m2.T_Root]; + + m2.finish(); + + println("sum is " + m2.v_sum); + println("leaves is " + m2.v_leaves); + println("result is " + m2.v_result(w2)); +} diff --git a/examples/scala/tests/.gitignore b/examples/scala/tests/.gitignore new file mode 100644 index 00000000..3ab5bb35 --- /dev/null +++ b/examples/scala/tests/.gitignore @@ -0,0 +1,4 @@ +!*Spec.scala +!*Util.scala +tiny.scala +grammar.scala diff --git a/examples/scala/tests/FirstSpec.scala b/examples/scala/tests/FirstSpec.scala new file mode 100644 index 00000000..012ec944 --- /dev/null +++ b/examples/scala/tests/FirstSpec.scala @@ -0,0 +1,25 @@ +object FirstSpec extends Spec { + + import GrammarUtil._ + + def testBasic() = { + // Arrange + var map = List[(String, List[String])]() + map +:= ("Z", List("Y", "X")) + map +:= ("Y", List("y")) + map +:= ("X", List("x")) + map +:= ("X", List("Z", "z")) + + // Act + val first = new M_FIRST("First", buildGrammar(map)) + first.finish() + + val firstTable = (for ((key, value) <- first.v_firstTable) yield (key.name, value.map(x => x.name).toSet)).toMap + + // Assert + assertEquals(3, firstTable.size, "Validate firstTable size") + assertEquals(Set("x", "y"), firstTable("X"), "Validate first of X"); + assertEquals(Set("y"), firstTable("Y"), "Validate first of Y"); + assertEquals(Set("y"), firstTable("Z"), "Validate first of Z"); + } +} diff --git a/examples/scala/tests/FollowSpec.scala b/examples/scala/tests/FollowSpec.scala new file mode 100644 index 00000000..2aa6331f --- /dev/null +++ b/examples/scala/tests/FollowSpec.scala @@ -0,0 +1,25 @@ +object FollowSpec extends Spec { + + import GrammarUtil._ + + def testBasic() = { + // Arrange + var map = List[(String, List[String])]() + map +:= ("Z", List("Y", "X")) + map +:= ("Y", List("y")) + map +:= ("X", List("x")) + map +:= ("X", List("Z", "z")) + + // Act + val first = new M_FOLLOW("Follow", buildGrammar(map)) + first.finish() + + val followTable = (for ((key, value) <- first.v_followTable) yield (key.name, value.map(x => x.name).toSet)).toMap + + // Assert + assertEquals(3, followTable.size, "Validate followTable size") + assertEquals(Set("z"), followTable("X"), "Validate follow of X"); + assertEquals(Set("x", "y"), followTable("Y"), "Validate follow of Y"); + assertEquals(Set("z"), followTable("Z"), "Validate follow of Z"); + } +} diff --git a/examples/scala/tests/GrammarUtil.scala b/examples/scala/tests/GrammarUtil.scala new file mode 100644 index 00000000..3ca7a3e4 --- /dev/null +++ b/examples/scala/tests/GrammarUtil.scala @@ -0,0 +1,45 @@ +import scala.reflect.{ClassTag, classTag} + +object GrammarUtil { + + def createItems(ls: List[String])(implicit grammar: M_GRAMMAR, symb: M_SYMBOL, map: List[(String, List[String])]): grammar.t_Items.T_Result = { + implicit val m = grammar.t_Items + asAPSSequence(ls.map(x => { + if (map.exists { case (nt, _) => nt == x }) + grammar.v_nonterminal(symb.v_create(x)) + else + grammar.v_terminal(symb.v_create(x)) + })) + } + + def createProductions(ls: List[(String, List[String])])(implicit grammar: M_GRAMMAR, symb: M_SYMBOL): grammar.t_Productions.T_Result = { + implicit val m = grammar.t_Productions + asAPSSequence[grammar.t_Production.T_Result](ls.map { case (key, values) => + grammar.v_prod(symb.v_create(key), createItems(values)(grammar, symb, ls) + .asInstanceOf[grammar.t_Items.T_Result]) + }.toList) + } + + def asAPSSequence[T <: Node : ClassTag](ls: List[T])(implicit m: M_SEQUENCE[T]): m.T_Result = { + ls match { + case Nil => m.v_none() + case x :: Nil => m.v_single(x) + case x :: rest => + m.v_append( + asAPSSequence(List(x)), + asAPSSequence(rest)) + } + } + + def buildGrammar(map: List[(String, List[String])]) = { + implicit val symb = new M_SYMBOL("Symbol") + symb.finish() + + implicit val grammar = new M_GRAMMAR("Grammar") + val productions = createProductions(map).asInstanceOf[grammar.t_Productions.T_Result] + grammar.v_grammar(productions) + grammar.finish() + + grammar + } +} diff --git a/examples/scala/tests/Makefile b/examples/scala/tests/Makefile new file mode 100644 index 00000000..93872598 --- /dev/null +++ b/examples/scala/tests/Makefile @@ -0,0 +1,44 @@ +SPECS=FirstSpec FollowSpec NullableSpec NTupleLatticeSpec TestSynthSpec +EXAMPLES_PATH=../.. +ROOT_PATH=../${EXAMPLES_PATH} +SCALAV=2.12 +APSLIB=${ROOT_PATH}/lib/aps-library-${SCALAV}.jar +SCALA_FLAGS=.:${APSLIB} +APS2SCALA=${ROOT_PATH}/bin/aps2scala + +.PHONY: all +all: $(addsuffix .run, $(SPECS)) + +.PHONY: clean +clean: + rm -f *.class grammar.scala first.scala follow.scala tiny.scala ntuple-lattice.scala test-synth.scala nullable.scala + +%.class: %.scala + scalac -cp ${SCALA_FLAGS} $< + +first.scala follow.scala nullable.scala ntuple-lattice.scala: + ${APS2SCALA} -DCOT -C -p ${EXAMPLES_PATH}:${ROOT_PATH}/base -S $(basename $(@F)) + +%.scala: + ${APS2SCALA} -DCOT -p ${EXAMPLES_PATH}:${ROOT_PATH}/base $* + +%.run: %.class + scala -cp ${SCALA_FLAGS} $(basename $<) + +GrammarUtil.class: grammar.class + scalac -cp ${SCALA_FLAGS} GrammarUtil.scala + +FirstSpec.class: Spec.class GrammarUtil.class first.class + scalac -cp ${SCALA_FLAGS} FirstSpec.scala + +FollowSpec.class: Spec.class GrammarUtil.class follow.class + scalac -cp ${SCALA_FLAGS} FollowSpec.scala + +NullableSpec.class: Spec.class GrammarUtil.class nullable.class + scalac -cp ${SCALA_FLAGS} NullableSpec.scala + +NTupleLatticeSpec.class: Spec.class tiny.class ntuple-lattice.class + scalac -cp ${SCALA_FLAGS} NTupleLatticeSpec.scala + +TestSynthSpec.class: Spec.class tiny.class test-synth.class + scalac -cp ${SCALA_FLAGS} TestSynthSpec.scala diff --git a/examples/scala/tests/NTupleLatticeSpec.scala b/examples/scala/tests/NTupleLatticeSpec.scala new file mode 100644 index 00000000..260235b1 --- /dev/null +++ b/examples/scala/tests/NTupleLatticeSpec.scala @@ -0,0 +1,28 @@ +object NTupleLatticeSpec extends Spec { + + def testBasic() = { + // Arrange + val tree = new M_TINY("tiny"); + tree.v_root( + tree.v_branch( + tree.v_branch( + tree.v_leaf(1), + tree.v_leaf(1), + ), + tree.v_branch( + tree.v_leaf(2), + tree.v_leaf(2), + ), + ) + ); + + // Act + val module = new M_TINY_TUPLE_LATTICE("ntuple-lattice", tree); + module.finish(); + + val attr = module.v_root_syn(module.t_Root.nodes(0)); + + // Assert + assertEquals(List(Set(1, 2)), attr, "flattened tree"); + } +} diff --git a/examples/scala/tests/NullableSpec.scala b/examples/scala/tests/NullableSpec.scala new file mode 100644 index 00000000..a0895a13 --- /dev/null +++ b/examples/scala/tests/NullableSpec.scala @@ -0,0 +1,29 @@ +object NullableSpec extends Spec { + + import GrammarUtil._ + + def testBasic() = { + // Arrange + var map = List[(String, List[String])]() + map +:= ("Z", List("Y", "X")) + map +:= ("Y", List()) + map +:= ("X", List()) + map +:= ("A", List("Z", "a")) + map +:= ("B", List("b", "B")) + map +:= ("B", List()) + + // Act + val first = new M_NULLABLE("Nullable", buildGrammar(map)) + first.finish() + + val nullableTable = (for ((key, value) <- first.v_nullableTable) yield (key.name, value)).toMap + + // Assert + assertEquals(5, nullableTable.size, "Validate nullableTable size") + assertEquals(true, nullableTable("X"), "Validate nullable of X"); + assertEquals(true, nullableTable("Y"), "Validate nullable of Y"); + assertEquals(true, nullableTable("Z"), "Validate nullable of Z"); + assertEquals(false, nullableTable("A"), "Validate nullable of A"); + assertEquals(true, nullableTable("B"), "Validate nullable of B"); + } +} diff --git a/examples/scala/tests/Spec.scala b/examples/scala/tests/Spec.scala new file mode 100644 index 00000000..3270fba2 --- /dev/null +++ b/examples/scala/tests/Spec.scala @@ -0,0 +1,21 @@ +class Spec extends App { + + def assertTrue(result: Boolean, message: String) = { + assertEquals(true, result, message); + } + + def assertEquals[T](expected: T, actual: T, message: String) = { + if (expected != actual) { + throw new Error(f"${message} ~> expected: ${expected} but got: ${actual}") + } + } + + { + println(f"Running test file: ${getClass.getSimpleName}") + for (method <- getClass.getMethods if method.getName.startsWith("test")) { + println(f"> Running ${method.getName}") + method.invoke(this) + println(f"> Finished ${method.getName}") + } + } +} diff --git a/examples/scala/tests/TestSynthSpec.scala b/examples/scala/tests/TestSynthSpec.scala new file mode 100644 index 00000000..34061897 --- /dev/null +++ b/examples/scala/tests/TestSynthSpec.scala @@ -0,0 +1,40 @@ +object TestSynthSpec extends Spec { + + def testBasic() = { + // Arrange + val tree = new M_TINY("tiny"); + tree.v_root( + tree.v_branch( + tree.v_branch( + tree.v_branch( + tree.v_leaf(3), + tree.v_leaf(5), + ), + tree.v_branch( + tree.v_leaf(3), + tree.v_leaf(5), + ), + ), + tree.v_branch( + tree.v_branch( + tree.v_leaf(5), + tree.v_leaf(5), + ), + tree.v_branch( + tree.v_leaf(3), + tree.v_leaf(3), + ), + ), + ) + ); + + // Act + val module = new M_TEST_SYNTH("synth", tree); + module.finish(); + + val attr = module.v_syn(module.t_Root.nodes(0)); + + // Assert + assertEquals(3, attr, "final attribute should be 3"); + } +} diff --git a/examples/scala/tiny-parser.handcode.scala b/examples/scala/tiny-parser.handcode.scala new file mode 100644 index 00000000..5e1bd1ff --- /dev/null +++ b/examples/scala/tiny-parser.handcode.scala @@ -0,0 +1,32 @@ +import java.util.StringTokenizer; + +/** + * Class to parse "tiny" trees from strings of comma-separated + * integer values, left associative with parens as needed. + * For example: 1,2,(3,4) which is the same as (1,2),(3,4) + */ +class TinyParser(m_tree : M_TINY) { + val t_Tree = m_tree.t_Result; + + def asRoot(s : String) : t_Tree.T_Root = { + val tok = new StringTokenizer(s,"(,)",true); + parseRoot(tok) + }; + + def parseRoot(t : StringTokenizer) : t_Tree.T_Root = + t_Tree.f_root(parseWood(t)); + + def parseWood(t : StringTokenizer) : t_Tree.T_Wood = { + val tok = t.nextToken(); + var start : t_Tree.T_Wood = null; + if (tok == "(") { + start = parseWood(t) + } else { + start = t_Tree.f_leaf(tok.toInt) + }; + while (t.hasMoreTokens() && t.nextToken() == ",") { + start = t_Tree.f_branch(start,parseWood(t)) + }; + start + }; +} diff --git a/examples/scala/use-global-driver.scala b/examples/scala/use-global-driver.scala new file mode 100644 index 00000000..4055b652 --- /dev/null +++ b/examples/scala/use-global-driver.scala @@ -0,0 +1,25 @@ +object UseGlobal +{ + def main(args : Array[String]) : Unit = { + if (args.length == 0) { + doWith("3,(1,4)") + } else { + for (arg <- args) { + doWith(arg) + }; + } + + }; + + def doWith(s : String) : Unit = { + val t = new M_TINY("Tiny"); + val p = new TinyParser(t); + val m = new M_USE_GLOBAL("UseGlobal",t); + val r = p.asRoot(s).asInstanceOf[m.t_Result.T_Root]; + t.finish(); + Debug.activate(); + m.finish(); + val t_Tiny = m.t_Result; + println("done is " + m.v_done(r)); + }; +} diff --git a/examples/simple-binding.aps b/examples/simple-binding.aps index 3e2ef951..b31f5bcc 100644 --- a/examples/simple-binding.aps +++ b/examples/simple-binding.aps @@ -96,12 +96,12 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin --attribute Type.tmp : Messages; --pragma synthesized(tmp); - match ?t:Type=integer() begin + match ?t:Type=integer_type() begin t.type_shape := int_shape; --t.tmp := msgs; end; - match ?t:Type=string() begin + match ?t:Type=string_type() begin t.type_shape := str_shape; end; @@ -148,7 +148,7 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin endif; end; - function lookup(name : String; scope : Scope) : EntityRef begin + var function lookup(name : String; scope : Scope) : EntityRef begin if scope = root_scope then result := not_found; else diff --git a/examples/simple-binding1.aps b/examples/simple-binding1.aps index 76f271af..73cca0d7 100644 --- a/examples/simple-binding1.aps +++ b/examples/simple-binding1.aps @@ -140,7 +140,7 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin endif; end; - function lookup(name : String; scope : Scope) : EntityRef begin + var function lookup(name : String; scope : Scope) : EntityRef begin if scope = root_scope then result := not_found; else diff --git a/examples/simple-binding2.aps b/examples/simple-binding2.aps index 921d60f4..7e238214 100644 --- a/examples/simple-binding2.aps +++ b/examples/simple-binding2.aps @@ -13,8 +13,8 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin type Entities := BAG[EntityRef]; collection attribute Contour.entities : Entities; - procedure enclosing(e : Scope) : Scope begin - case e begin + procedure enclosing(s : Scope) : Scope begin + case s begin match contour(?e:Scope) begin result := e; end; @@ -29,8 +29,8 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin procedure entity_shape(e : EntityRef) : Shape begin case e begin - match entity(? : String, ?shape : Shape) begin - result := shape; + match entity(? : String, ?s : Shape) begin + result := s; end; end; end; @@ -48,6 +48,7 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin not_found : EntityRef := nil; + no_shape : Shape := Shape$nil; int_shape : Shape := shape("integer"); str_shape : Shape := shape("string"); @@ -95,11 +96,11 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin endif; end; - match ?t:Type=integer() begin + match ?t:Type=integer_type() begin t.type_shape := int_shape; end; - match ?t:Type=string() begin + match ?t:Type=string_type() begin t.type_shape := str_shape; end; @@ -136,6 +137,7 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin decl := lookup(id,e.expr_scope); if decl = not_found then msgs :> {id ++ " not declared"}; + e.expr_shape := no_shape; else decl.entity_used :> true; e.expr_shape := decl.entity_shape; diff --git a/examples/simple-binding3.aps b/examples/simple-binding3.aps index c9ec22c4..65a7ef55 100644 --- a/examples/simple-binding3.aps +++ b/examples/simple-binding3.aps @@ -97,12 +97,12 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin --attribute Type.tmp : Messages; --pragma synthesized(tmp); - match ?t:Type=integer() begin + match ?t:Type=integer_type() begin t.type_shape := int_shape; --t.tmp := msgs; end; - match ?t:Type=string() begin + match ?t:Type=string_type() begin t.type_shape := str_shape; end; @@ -171,7 +171,7 @@ module NAME_RESOLUTION[T :: var SIMPLE[]] extends T begin end; end; - function lookup(name : String; scope : Scope) : EntityRef begin + var function lookup(name : String; scope : Scope) : EntityRef begin if scope = root_scope then result := not_found; else diff --git a/examples/simple-coag.aps b/examples/simple-coag.aps index 5a8de210..b8b417da 100644 --- a/examples/simple-coag.aps +++ b/examples/simple-coag.aps @@ -43,4 +43,34 @@ module SIMPLE_OAG[T :: var SIMPLE[]] extends T begin match ?d:Decl=decl(?id:String,?ty:Type) begin d.bs := d.bi + 1; end; + + match ?p:Program=program(?b:Block) begin + end; + + match ?t:Type=integer_type() begin + end; + + match ?t:Type=string_type() begin + end; + + match ?:Stmts=no_stmts() begin + end; + + match ?ss0:Stmts=xcons_stmts(?ss1:Stmts,?s:Stmt) begin + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + end; + + match ?s:Stmt=assign_stmt(?e1:Expr,?e2:Expr) begin + end; + + match ?e:Expr=intconstant(?:Integer) begin + end; + + match ?e:Expr=strconstant(?:String) begin + end; + + match ?e:Expr=variable(?id:String) begin + end; end; diff --git a/examples/simple-oag.aps b/examples/simple-oag.aps index 44b7effb..96ed2587 100644 --- a/examples/simple-oag.aps +++ b/examples/simple-oag.aps @@ -35,4 +35,34 @@ module SIMPLE_OAG[T :: var SIMPLE[]] extends T begin match ?d:Decl=decl(?id:String,?ty:Type) begin d.bs := d.bi + 1; end; + + match ?p:Program=program(?b:Block) begin + end; + + match ?t:Type=integer_type() begin + end; + + match ?t:Type=string_type() begin + end; + + match ?:Stmts=no_stmts() begin + end; + + match ?ss0:Stmts=xcons_stmts(?ss1:Stmts,?s:Stmt) begin + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + end; + + match ?s:Stmt=assign_stmt(?e1:Expr,?e2:Expr) begin + end; + + match ?e:Expr=intconstant(?:Integer) begin + end; + + match ?e:Expr=strconstant(?:String) begin + end; + + match ?e:Expr=variable(?id:String) begin + end; end; diff --git a/examples/simple-snc.aps b/examples/simple-snc.aps index 0fa7c4dd..fb224df6 100644 --- a/examples/simple-snc.aps +++ b/examples/simple-snc.aps @@ -16,11 +16,47 @@ module SIMPLE_SNC[T :: var SIMPLE[]] extends T begin e1.i2 := e1.s1; e2.i2 := 0; e2.i1 := e2.s2; - s.total := e1.i2 + e2.s1; + s.total := e1.s2 + e2.s1; end; match ?e:Expr=intconstant(?i:Integer) begin e.s1 := e.i1; e.s2 := e.i2; end; + + match ?b:Block=block(?ds:Decls,?ss:Stmts) begin + end; + + match ?ds:Decls=no_decls() begin + end; + + match ?ds0:Decls=xcons_decls(?ds1:Decls,?d:Decl) begin + end; + + match ?d:Decl=decl(?id:String,?ty:Type) begin + end; + + match ?p:Program=program(?b:Block) begin + end; + + match ?t:Type=integer_type() begin + end; + + match ?t:Type=string_type() begin + end; + + match ?:Stmts=no_stmts() begin + end; + + match ?ss0:Stmts=xcons_stmts(?ss1:Stmts,?s:Stmt) begin + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + end; + + match ?e:Expr=strconstant(?:String) begin + end; + + match ?e:Expr=variable(?id:String) begin + end; end; diff --git a/examples/simple-syn.aps b/examples/simple-syn.aps index 73a6b6f2..4cec1630 100644 --- a/examples/simple-syn.aps +++ b/examples/simple-syn.aps @@ -10,5 +10,40 @@ module SIMPLE_SYN[T :: var SIMPLE[]] extends T begin match ?ds0:Decls=xcons_decls(?ds1:Decls,?d:Decl) begin ds0.decl_count := ds1.decl_count + 1; end; + + match ?b:Block=block(?ds:Decls,?ss:Stmts) begin + end; + + match ?d:Decl=decl(?id:String,?ty:Type) begin + end; + + match ?p:Program=program(?b:Block) begin + end; + + match ?t:Type=integer_type() begin + end; + match ?t:Type=string_type() begin + end; + + match ?:Stmts=no_stmts() begin + end; + + match ?ss0:Stmts=xcons_stmts(?ss1:Stmts,?s:Stmt) begin + end; + + match ?s:Stmt=block_stmt(?b:Block) begin + end; + + match ?s:Stmt=assign_stmt(?e1:Expr,?e2:Expr) begin + end; + + match ?e:Expr=intconstant(?:Integer) begin + end; + + match ?e:Expr=strconstant(?:String) begin + end; + + match ?e:Expr=variable(?id:String) begin + end; end; diff --git a/examples/simple.lex b/examples/simple.lex new file mode 100644 index 00000000..022091b8 --- /dev/null +++ b/examples/simple.lex @@ -0,0 +1,20 @@ +%{ + /* An incomplete scanner for 'simple' */ +%} + +%% + +[{};=] { return YYCHAR(yytext); } + +"int" { return INT; } +"string" { return STRING; } + +[a-zA-Z_][a-zA-Z_0-9]* { return ID(yytext); } + +[0-9]+ { return INT_LITERAL(yytext); } + +\"([^\"\n\\]|\\(.|\n))*\" { return STR_LITERAL(yytext); } + +[ \t\r\n]+ {} + +<> { return YYEOFT; } diff --git a/examples/simple.y b/examples/simple.y new file mode 100644 index 00000000..be3d7937 --- /dev/null +++ b/examples/simple.y @@ -0,0 +1,71 @@ +/* An incomplete parser for simple.y */ + +/* + * This fragment is intended to be used either by bison or ScalaBison, + * but will need language specific parts to compile as C/C++ or Scala + */ + +%token INT +%token STRING + +%token ID +%token INT_LITERAL +%token STR_LITERAL + +%type program +%type block +%type decls +%type decl +%type type +%type stmts +%type stmt +%type expr + +%% + +program : block + { $$ = program($1); } + ; + +block : '{' decls stmts '}' + { $$ = block($2, $3); } + ; + +decls : /* NOTHING */ + { $$ = no_decls(); } + | decls decl + { $$ = xcons_decls($1, $2); } + ; + +decl : type ID ';' + { $$ = decl($2, $1); } + ; + +type : INT + { $$ = integer_type; } + | STRING + { $$ = string_type; } + ; + +stmts : /* NOTHING */ + { $$ = no_stmts(); } + | stmts stmt + { $$ = xcons_stmts($1,$2); } + ; + +stmt: block + { $$ = block_stmt($1); } + | expr '=' expr ';' + { $$ = assign_stmt($1,$3); } + ; + +expr : ID + { $$ = variable($1); } + | INT_LITERAL + { $$ = intconstant($1); } + | STR_LITERAL + { $$ = strconstant($1); } + ; + +%% + diff --git a/examples/test-assign.aps b/examples/test-assign.aps new file mode 100644 index 00000000..89eba715 --- /dev/null +++ b/examples/test-assign.aps @@ -0,0 +1,23 @@ +with "tiny"; + +module TEST_COLL[T :: TINY[]] extends T begin + type Integers := SET[Integer]; + + attribute Root.result : Integer; + attribute Wood.total : Integer; + pragma synthesized(result,total); + + match ?p=root(?w) begin + p.result := 100; + if w.total > 0 then + p.result := w.total; + endif; + end; + match ?l=leaf(?x) begin + l.total := x; + end; + match ?b=branch(?x,?y) begin + b.total := x.total + y.total; + end; + +end; diff --git a/examples/test-canonicals.aps b/examples/test-canonicals.aps new file mode 100644 index 00000000..6da979e9 --- /dev/null +++ b/examples/test-canonicals.aps @@ -0,0 +1,955 @@ +module GEN[T :: BASIC[]] +begin + ------------------------------------------------------- + type Misc0 := Integer; + type Misc1 := Real; + type Misc2 := String; + type Misc3 := Range; + type Misc4 := OrLattice; + type Misc5 := MAX_LATTICE[Integer](0); + type Misc6 := IEEEsingle; + type Misc7 := IEEEdouble; + + pragma test_canonical_type(type Misc0, "Integer"); + pragma test_canonical_type(type Misc1, "IEEEdouble"); + pragma test_canonical_type(type Misc2, "String"); + pragma test_canonical_type(type Misc3, "Range"); + pragma test_canonical_type(type Misc4, "OrLattice"); + pragma test_canonical_type(type Misc5, "Misc5"); + pragma test_canonical_type(type Misc6, "IEEEsingle"); + pragma test_canonical_type(type Misc7, "IEEEdouble"); + + pragma test_canonical_base_type(type Misc0, "Integer"); + pragma test_canonical_base_type(type Misc1, "IEEEdouble"); + pragma test_canonical_base_type(type Misc2, "String"); + pragma test_canonical_base_type(type Misc3, "Range"); + pragma test_canonical_base_type(type Misc4, "Boolean"); + pragma test_canonical_base_type(type Misc5, "Integer"); + pragma test_canonical_base_type(type Misc6, "IEEEsingle"); + pragma test_canonical_base_type(type Misc7, "IEEEdouble"); + + pragma test_canonical_signature(type Misc0, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type Misc1, "{TYPE[],IEEE[]}"); + pragma test_canonical_signature(type Misc2, "{TYPE[],LIST[Character],STRING[]}"); + pragma test_canonical_signature(type Misc3, "{TYPE[],LIST[Integer]}"); + pragma test_canonical_signature(type Misc4, "{BASIC[],PRINTABLE[],TYPE[],BOOLEAN[],MAKE_LATTICE[Boolean]}"); + pragma test_canonical_signature(type Misc5, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],MAKE_LATTICE[Integer],MAX_LATTICE[Integer]}"); + pragma test_canonical_signature(type Misc6, "{TYPE[],IEEE[]}"); + pragma test_canonical_signature(type Misc7, "{TYPE[],IEEE[]}"); + + ------------------------------------------------------- + + -- simple (non-polymorphic) + type A := T; + type B := Integer; + type C; + type D := Result; + + pragma test_canonical_type(type A, "T"); + pragma test_canonical_type(type B, "Integer"); + pragma test_canonical_type(type C, "C"); + pragma test_canonical_type(type D, "Result"); + + pragma test_canonical_base_type(type A, "T"); + pragma test_canonical_base_type(type B, "Integer"); + pragma test_canonical_base_type(type C, "C"); + pragma test_canonical_base_type(type D, "Result"); + + pragma test_canonical_signature(type A, "{BASIC[]}"); + pragma test_canonical_signature(type B, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type C, "{TYPE[]}"); + pragma test_canonical_signature(type D, "{TYPE[]}"); + + ------------------------------------------------------- + + -- type use 1 level + type E := A; + type F := B; + type G := C; + type H := D; + + pragma test_canonical_type(type E, "T"); + pragma test_canonical_type(type F, "Integer"); + pragma test_canonical_type(type G, "C"); + pragma test_canonical_type(type H, "Result"); + + pragma test_canonical_base_type(type E, "T"); + pragma test_canonical_base_type(type F, "Integer"); + pragma test_canonical_base_type(type G, "C"); + pragma test_canonical_base_type(type H, "Result"); + + pragma test_canonical_signature(type E, "{BASIC[]}"); + pragma test_canonical_signature(type F, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type G, "{TYPE[]}"); + pragma test_canonical_signature(type H, "{TYPE[]}"); + + ------------------------------------------------------- + + -- type use 2 level + type E2 := E; + type F2 := F; + type G2 := G; + type H2 := H; + + pragma test_canonical_type(type E2, "T"); + pragma test_canonical_type(type F2, "Integer"); + pragma test_canonical_type(type G2, "C"); + pragma test_canonical_type(type H2, "Result"); + + pragma test_canonical_base_type(type E2, "T"); + pragma test_canonical_base_type(type F2, "Integer"); + pragma test_canonical_base_type(type G2, "C"); + pragma test_canonical_base_type(type H2, "Result"); + + pragma test_canonical_signature(type E2, "{BASIC[]}"); + pragma test_canonical_signature(type F2, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type G2, "{TYPE[]}"); + pragma test_canonical_signature(type H2, "{TYPE[]}"); + + ------------------------------------------------------- + + -- polymorphic (generating) + type I := GEN[A]; + type J := GEN[B]; + type K := GEN[C]; + type L := GEN[D]; + + pragma test_canonical_type(type I, "I"); + pragma test_canonical_type(type J, "J"); + pragma test_canonical_type(type K, "K"); + pragma test_canonical_type(type L, "L"); + + pragma test_canonical_base_type(type I, "I"); + pragma test_canonical_base_type(type J, "J"); + pragma test_canonical_base_type(type K, "K"); + pragma test_canonical_base_type(type L, "L"); + + pragma test_canonical_signature(type I, "{TYPE[],GEN[T]}"); + pragma test_canonical_signature(type J, "{TYPE[],GEN[Integer]}"); + pragma test_canonical_signature(type K, "{TYPE[],GEN[C]}"); + pragma test_canonical_signature(type L, "{TYPE[],GEN[Result]}"); + + ------------------------------------------------------- + + -- polymorphic (generating) 1 level type use + type I1 := GEN[E]; + type J1 := GEN[F]; + type K1 := GEN[G]; + type L1 := GEN[H]; + + pragma test_canonical_type(type I1, "I1"); + pragma test_canonical_type(type J1, "J1"); + pragma test_canonical_type(type K1, "K1"); + pragma test_canonical_type(type L1, "L1"); + + pragma test_canonical_base_type(type I1, "I1"); + pragma test_canonical_base_type(type J1, "J1"); + pragma test_canonical_base_type(type K1, "K1"); + pragma test_canonical_base_type(type L1, "L1"); + + pragma test_canonical_signature(type I1, "{TYPE[],GEN[T]}"); + pragma test_canonical_signature(type J1, "{TYPE[],GEN[Integer]}"); + pragma test_canonical_signature(type K1, "{TYPE[],GEN[C]}"); + pragma test_canonical_signature(type L1, "{TYPE[],GEN[Result]}"); + + ------------------------------------------------------- + + -- polymorphic (generating) 2 level type use + type I2 := GEN[E2]; + type J2 := GEN[F2]; + type K2 := GEN[G2]; + type L2 := GEN[H2]; + + pragma test_canonical_type(type I2, "I2"); + pragma test_canonical_type(type J2, "J2"); + pragma test_canonical_type(type K2, "K2"); + pragma test_canonical_type(type L2, "L2"); + + pragma test_canonical_base_type(type I2, "I2"); + pragma test_canonical_base_type(type J2, "J2"); + pragma test_canonical_base_type(type K2, "K2"); + pragma test_canonical_base_type(type L2, "L2"); + + pragma test_canonical_signature(type I2, "{TYPE[],GEN[T]}"); + pragma test_canonical_signature(type J2, "{TYPE[],GEN[Integer]}"); + pragma test_canonical_signature(type K2, "{TYPE[],GEN[C]}"); + pragma test_canonical_signature(type L2, "{TYPE[],GEN[Result]}"); + + ------------------------------------------------------- + + -- polymorphic (non-generating) + type M := EXT[A]; + type N := EXT[B]; + type O := EXT[C]; + type P := EXT[D]; + + pragma test_canonical_type(type M, "M"); + pragma test_canonical_type(type N, "N"); + pragma test_canonical_type(type O, "O"); + pragma test_canonical_type(type P, "P"); + + pragma test_canonical_base_type(type M, "T"); + pragma test_canonical_base_type(type N, "Integer"); + pragma test_canonical_base_type(type O, "C"); + pragma test_canonical_base_type(type P, "Result"); + + pragma test_canonical_signature(type M, "{BASIC[],EXT[T]}"); + pragma test_canonical_signature(type N, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); + pragma test_canonical_signature(type O, "{BASIC[],TYPE[],EXT[C]}"); + pragma test_canonical_signature(type P, "{BASIC[],TYPE[],EXT[Result]}"); + + ------------------------------------------------------- + + -- polymorphic (non-generating) 1 level type use + type M1 := EXT[E]; + type N1 := EXT[F]; + type O1 := EXT[G]; + type P1 := EXT[H]; + + pragma test_canonical_type(type M1, "M1"); + pragma test_canonical_type(type N1, "N1"); + pragma test_canonical_type(type O1, "O1"); + pragma test_canonical_type(type P1, "P1"); + + pragma test_canonical_base_type(type M1, "T"); + pragma test_canonical_base_type(type N1, "Integer"); + pragma test_canonical_base_type(type O1, "C"); + pragma test_canonical_base_type(type P1, "Result"); + + pragma test_canonical_signature(type M1, "{BASIC[],EXT[T]}"); + pragma test_canonical_signature(type N1, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); + pragma test_canonical_signature(type O1, "{BASIC[],TYPE[],EXT[C]}"); + pragma test_canonical_signature(type P1, "{BASIC[],TYPE[],EXT[Result]}"); + + ------------------------------------------------------- + + -- polymorphic (non-generating) 2 level type use + type M2 := EXT[E2]; + type N2 := EXT[F2]; + type O2 := EXT[G2]; + type P2 := EXT[H2]; + + pragma test_canonical_type(type M2, "M2"); + pragma test_canonical_type(type N2, "N2"); + pragma test_canonical_type(type O2, "O2"); + pragma test_canonical_type(type P2, "P2"); + + pragma test_canonical_base_type(type M2, "T"); + pragma test_canonical_base_type(type N2, "Integer"); + pragma test_canonical_base_type(type O2, "C"); + pragma test_canonical_base_type(type P2, "Result"); + + pragma test_canonical_signature(type M2, "{BASIC[],EXT[T]}"); + pragma test_canonical_signature(type N2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); + pragma test_canonical_signature(type O2, "{BASIC[],TYPE[],EXT[C]}"); + pragma test_canonical_signature(type P2, "{BASIC[],TYPE[],EXT[Result]}"); + + ------------------------------------------------------- + + -- type inst nested + type Q := GEN[T]; + type R := EXT[T]; + + pragma test_canonical_type(type Q, "Q"); + pragma test_canonical_type(type R, "R"); + + pragma test_canonical_base_type(type Q, "Q"); + pragma test_canonical_base_type(type R, "T"); + + pragma test_canonical_signature(type Q, "{TYPE[],GEN[T]}"); + pragma test_canonical_signature(type R, "{BASIC[],EXT[T]}"); + + ------------------------------------------------------- + + -- type inst Q $ simple + type QA := Q$A; + type QB := Q$B; + type QC := Q$C; + type QD := Q$D; + + pragma test_canonical_type(type QA, "T"); + pragma test_canonical_type(type QB, "Integer"); + pragma test_canonical_type(type QC, "Q$C"); + pragma test_canonical_type(type QD, "Q"); + + pragma test_canonical_base_type(type QA, "T"); + pragma test_canonical_base_type(type QB, "Integer"); + pragma test_canonical_base_type(type QC, "Q$C"); + pragma test_canonical_base_type(type QD, "Q"); + + pragma test_canonical_signature(type QA, "{BASIC[]}"); + pragma test_canonical_signature(type QB, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type QC, "{TYPE[]}"); + pragma test_canonical_signature(type QD, "{TYPE[],GEN[T]}"); + + ------------------------------------------------------- + + -- type inst R $ simple + type RA := R$A; + type RB := R$B; + type RC := R$C; + type RD := R$D; + + pragma test_canonical_type(type RA, "T"); + pragma test_canonical_type(type RB, "Integer"); + pragma test_canonical_type(type RC, "R$C"); + pragma test_canonical_type(type RD, "R"); + + pragma test_canonical_base_type(type RA, "T"); + pragma test_canonical_base_type(type RB, "Integer"); + pragma test_canonical_base_type(type RC, "R$C"); + pragma test_canonical_base_type(type RD, "T"); + + pragma test_canonical_signature(type RA, "{BASIC[]}"); + pragma test_canonical_signature(type RB, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type RC, "{TYPE[]}"); + pragma test_canonical_signature(type RD, "{BASIC[],EXT[T]}"); + + ------------------------------------------------------- + + -- type inst Q $ type use 2 level + type QE2 := Q$E; + type QF2 := Q$F; + type QG2 := Q$G; + type QH2 := Q$H; + + pragma test_canonical_type(type QE2, "T"); + pragma test_canonical_type(type QF2, "Integer"); + pragma test_canonical_type(type QG2, "Q$C"); + pragma test_canonical_type(type QH2, "Q"); + + pragma test_canonical_base_type(type QE2, "T"); + pragma test_canonical_base_type(type QF2, "Integer"); + pragma test_canonical_base_type(type QG2, "Q$C"); + pragma test_canonical_base_type(type QH2, "Q"); + + pragma test_canonical_signature(type QE2, "{BASIC[]}"); + pragma test_canonical_signature(type QF2, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type QG2, "{TYPE[]}"); + pragma test_canonical_signature(type QH2, "{TYPE[],GEN[T]}"); + + ------------------------------------------------------- + + -- type inst R $ type use 2 level + type RE2 := R$E2; + type RF2 := R$F2; + type RG2 := R$G2; + type RH2 := R$H2; + + pragma test_canonical_type(type RE2, "T"); + pragma test_canonical_type(type RF2, "Integer"); + pragma test_canonical_type(type RG2, "R$C"); + pragma test_canonical_type(type RH2, "R"); + + pragma test_canonical_base_type(type RE2, "T"); + pragma test_canonical_base_type(type RF2, "Integer"); + pragma test_canonical_base_type(type RG2, "R$C"); + pragma test_canonical_base_type(type RH2, "T"); + + pragma test_canonical_signature(type RE2, "{BASIC[]}"); + pragma test_canonical_signature(type RF2, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); + pragma test_canonical_signature(type RG2, "{TYPE[]}"); + pragma test_canonical_signature(type RH2, "{BASIC[],EXT[T]}"); + + ------------------------------------------------------- + + -- type inst Q $ polymorphic (generating) 2 level + type QI2 := Q$I2; + type QJ2 := Q$J2; + type QK2 := Q$K2; + type QL2 := Q$L2; + + pragma test_canonical_type(type QI2, "Q$I2"); + pragma test_canonical_type(type QJ2, "Q$J2"); + pragma test_canonical_type(type QK2, "Q$K2"); + pragma test_canonical_type(type QL2, "Q$L2"); + + pragma test_canonical_base_type(type QI2, "Q$I2"); + pragma test_canonical_base_type(type QJ2, "Q$J2"); + pragma test_canonical_base_type(type QK2, "Q$K2"); + pragma test_canonical_base_type(type QL2, "Q$L2"); + + pragma test_canonical_signature(type QI2, "{TYPE[],GEN[T]}"); + pragma test_canonical_signature(type QJ2, "{TYPE[],GEN[Integer]}"); + pragma test_canonical_signature(type QK2, "{TYPE[],GEN[Q$C]}"); + pragma test_canonical_signature(type QL2, "{TYPE[],GEN[Q]}"); + + ------------------------------------------------------- + + -- type inst R $ polymorphic (generating) 2 level + type RI2 := R$I2; + type RJ2 := R$J2; + type RK2 := R$K2; + type RL2 := R$L2; + + pragma test_canonical_type(type RI2, "R$I2"); + pragma test_canonical_type(type RJ2, "R$J2"); + pragma test_canonical_type(type RK2, "R$K2"); + pragma test_canonical_type(type RL2, "R$L2"); + + pragma test_canonical_base_type(type RI2, "R$I2"); + pragma test_canonical_base_type(type RJ2, "R$J2"); + pragma test_canonical_base_type(type RK2, "R$K2"); + pragma test_canonical_base_type(type RL2, "R$L2"); + + pragma test_canonical_signature(type RI2, "{TYPE[],GEN[T]}"); + pragma test_canonical_signature(type RJ2, "{TYPE[],GEN[Integer]}"); + pragma test_canonical_signature(type RK2, "{TYPE[],GEN[R$C]}"); + pragma test_canonical_signature(type RL2, "{TYPE[],GEN[T]}"); + + ------------------------------------------------------- + + -- type inst Q $ polymorphic (non-generating) 2 level + type QM2 := Q$M2; + type QN2 := Q$N2; + type QO2 := Q$O2; + type QP2 := Q$P2; + + pragma test_canonical_type(type QM2, "Q$M2"); + pragma test_canonical_type(type QN2, "Q$N2"); + pragma test_canonical_type(type QO2, "Q$O2"); + pragma test_canonical_type(type QP2, "Q$P2"); + + pragma test_canonical_base_type(type QM2, "T"); + pragma test_canonical_base_type(type QN2, "Integer"); + pragma test_canonical_base_type(type QO2, "Q$C"); + pragma test_canonical_base_type(type QP2, "Q"); + + pragma test_canonical_signature(type QM2, "{BASIC[],EXT[T]}"); + pragma test_canonical_signature(type QN2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); + pragma test_canonical_signature(type QO2, "{BASIC[],TYPE[],EXT[Q$C]}"); + pragma test_canonical_signature(type QP2, "{BASIC[],TYPE[],GEN[T],EXT[Q]}"); + + ------------------------------------------------------- + + -- type inst R $ polymorphic (non-generating) 2 level + type RM2 := R$M2; + type RN2 := R$N2; + type RO2 := R$O2; + type RP2 := R$P2; + + pragma test_canonical_type(type RM2, "R$M2"); + pragma test_canonical_type(type RN2, "R$N2"); + pragma test_canonical_type(type RO2, "R$O2"); + pragma test_canonical_type(type RP2, "R$P2"); + + pragma test_canonical_base_type(type RM2, "T"); + pragma test_canonical_base_type(type RN2, "Integer"); + pragma test_canonical_base_type(type RO2, "R$C"); + pragma test_canonical_base_type(type RP2, "T"); + + pragma test_canonical_signature(type RM2, "{BASIC[],EXT[T]}"); + pragma test_canonical_signature(type RN2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); + pragma test_canonical_signature(type RO2, "{BASIC[],TYPE[],EXT[R$C]}"); + pragma test_canonical_signature(type RP2, "{BASIC[],EXT[T]}"); + + ------------------------------------------------------- + + -- function types + type FT1 := function(a: A ; b : B ; c: C ; d: D) : D; + type FT2 := function(a: E ; b : F ; c: G ; d: H) : H; + type FT3 := function(a: E2 ; b : F2 ; c: G2 ; d: H2) : H2; + type FT4 := function(a: I ; b : J ; c: K ; d: L) : L2; + type FT5 := function(a: I2 ; b : J2 ; c: K2 ; d: L2) : L2; + type FT6 := function(a: M ; b : N ; c: O ; d: P) : P; + type FT7 := function(a: M1 ; b : N1 ; c: O1 ; d: P1) : P1; + type FT8 := function(a: M2 ; b : N2 ; c: O2 ; d: P2) : P2; + + pragma test_canonical_type(type FT1, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_type(type FT2, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_type(type FT3, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_type(type FT4, "(I,J,K,L)=>L2"); + pragma test_canonical_type(type FT5, "(I2,J2,K2,L2)=>L2"); + pragma test_canonical_type(type FT6, "(M,N,O,P)=>P"); + pragma test_canonical_type(type FT7, "(M1,N1,O1,P1)=>P1"); + pragma test_canonical_type(type FT8, "(M2,N2,O2,P2)=>P2"); + + pragma test_canonical_base_type(type FT1, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_base_type(type FT2, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_base_type(type FT3, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_base_type(type FT4, "(I,J,K,L)=>L2"); + pragma test_canonical_base_type(type FT5, "(I2,J2,K2,L2)=>L2"); + pragma test_canonical_base_type(type FT6, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_base_type(type FT7, "(T,Integer,C,Result)=>Result"); + pragma test_canonical_base_type(type FT8, "(T,Integer,C,Result)=>Result"); + + pragma test_canonical_signature(type FT1, "{}"); + pragma test_canonical_signature(type FT2, "{}"); + pragma test_canonical_signature(type FT3, "{}"); + pragma test_canonical_signature(type FT4, "{}"); + pragma test_canonical_signature(type FT5, "{}"); + pragma test_canonical_signature(type FT6, "{}"); + pragma test_canonical_signature(type FT7, "{}"); + pragma test_canonical_signature(type FT8, "{}"); + +end; + +module EXT[T :: BASIC[]] extends T +begin + -- simple (non-polymorphic) + type A := T; + type B := Integer; + type C; + type D := Result; + + -- type use 1 level + type E := A; + type F := B; + type G := C; + type H := D; + + -- type use 2 level + type E2 := E; + type F2 := F; + type G2 := G; + type H2 := H; + + -- polymorphic (generating) + type I := GEN[A]; + type J := GEN[B]; + type K := GEN[C]; + type L := GEN[D]; + + -- polymorphic (generating) 1 level type use + type I1 := GEN[E]; + type J1 := GEN[F]; + type K1 := GEN[G]; + type L1 := GEN[H]; + + -- polymorphic (generating) 2 level type use + type I2 := GEN[E2]; + type J2 := GEN[F2]; + type K2 := GEN[G2]; + type L2 := GEN[H2]; + + -- polymorphic (non-generating) + type M := EXT[A]; + type N := EXT[B]; + type O := EXT[C]; + type P := EXT[D]; + + -- polymorphic (non-generating) 1 level type use + type M1 := EXT[E]; + type N1 := EXT[F]; + type O1 := EXT[G]; + type P1 := EXT[H]; + + -- polymorphic (non-generating) 2 level type use + type M2 := EXT[E2]; + type N2 := EXT[F2]; + type O2 := EXT[G2]; + type P2 := EXT[H2]; + + -- function types + type FT1 := function(a: A ; b : B ; c: C ; d: D) : D; + type FT2 := function(a: E ; b : F ; c: G ; d: H) : H; + type FT3 := function(a: E2 ; b : F2 ; c: G2 ; d: H2) : H2; + type FT4 := function(a: I ; b : J ; c: K ; d: L) : L2; + type FT5 := function(a: I2 ; b : J2 ; c: K2 ; d: L2) : L2; + type FT6 := function(a: M ; b : N ; c: O ; d: P) : P; + type FT7 := function(a: M1 ; b : N1 ; c: O1 ; d: P1) : P1; + type FT8 := function(a: M2 ; b : N2 ; c: O2 ; d: P2) : P2; +end; + +-- type inst +type G0 := GEN[String]; +type E0 := EXT[String]; + +pragma test_canonical_type(type G0, "G0"); +pragma test_canonical_type(type E0, "E0"); + +pragma test_canonical_base_type(type G0, "G0"); +pragma test_canonical_base_type(type E0, "String"); + +pragma test_canonical_signature(type G0, "{TYPE[],GEN[String]}"); +pragma test_canonical_signature(type E0, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); + +------------------------------------------------------- + +-- G0 $ simple +type G0A := G0$A; +type G0B := G0$B; +type G0C := G0$C; +type G0D := G0$D; + +pragma test_canonical_type(type G0A, "String"); +pragma test_canonical_type(type G0B, "Integer"); +pragma test_canonical_type(type G0C, "G0$C"); +pragma test_canonical_type(type G0D, "G0"); + +pragma test_canonical_base_type(type G0A, "String"); +pragma test_canonical_base_type(type G0B, "Integer"); +pragma test_canonical_base_type(type G0C, "G0$C"); +pragma test_canonical_base_type(type G0D, "G0"); + +pragma test_canonical_signature(type G0A, "{TYPE[],LIST[Character],STRING[]}"); +pragma test_canonical_signature(type G0B, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); +pragma test_canonical_signature(type G0C, "{TYPE[]}"); +pragma test_canonical_signature(type G0D, "{TYPE[],GEN[String]}"); + +------------------------------------------------------- + +-- E0 $ simple +type E0A := E0$A; +type E0B := E0$B; +type E0C := E0$C; +type E0D := E0$D; + +pragma test_canonical_type(type E0A, "String"); +pragma test_canonical_type(type E0B, "Integer"); +pragma test_canonical_type(type E0C, "E0$C"); +pragma test_canonical_type(type E0D, "E0"); + +pragma test_canonical_base_type(type E0A, "String"); +pragma test_canonical_base_type(type E0B, "Integer"); +pragma test_canonical_base_type(type E0C, "E0$C"); +pragma test_canonical_base_type(type E0D, "String"); + +pragma test_canonical_signature(type E0A, "{TYPE[],LIST[Character],STRING[]}"); +pragma test_canonical_signature(type E0B, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); +pragma test_canonical_signature(type E0C, "{TYPE[]}"); +pragma test_canonical_signature(type E0D, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); + +------------------------------------------------------- + +-- G0 $ type use 1 level +type G0E := G0$E; +type G0F := G0$F; +type G0G := G0$G; +type G0H := G0$H; + +pragma test_canonical_type(type G0E, "String"); +pragma test_canonical_type(type G0F, "Integer"); +pragma test_canonical_type(type G0G, "G0$C"); +pragma test_canonical_type(type G0H, "G0"); + +pragma test_canonical_base_type(type G0E, "String"); +pragma test_canonical_base_type(type G0F, "Integer"); +pragma test_canonical_base_type(type G0G, "G0$C"); +pragma test_canonical_base_type(type G0H, "G0"); + +pragma test_canonical_signature(type G0E, "{TYPE[],LIST[Character],STRING[]}"); +pragma test_canonical_signature(type G0F, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); +pragma test_canonical_signature(type G0G, "{TYPE[]}"); +pragma test_canonical_signature(type G0H, "{TYPE[],GEN[String]}"); + +------------------------------------------------------- + +-- E0 $ type use 1 level +type E0E := E0$E; +type E0F := E0$F; +type E0G := E0$G; +type E0H := E0$H; + +pragma test_canonical_type(type E0E, "String"); +pragma test_canonical_type(type E0F, "Integer"); +pragma test_canonical_type(type E0G, "E0$C"); +pragma test_canonical_type(type E0H, "E0"); + +pragma test_canonical_base_type(type E0E, "String"); +pragma test_canonical_base_type(type E0F, "Integer"); +pragma test_canonical_base_type(type E0G, "E0$C"); +pragma test_canonical_base_type(type E0H, "String"); + +pragma test_canonical_signature(type E0E, "{TYPE[],LIST[Character],STRING[]}"); +pragma test_canonical_signature(type E0F, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); +pragma test_canonical_signature(type E0G, "{TYPE[]}"); +pragma test_canonical_signature(type E0H, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); + +------------------------------------------------------- + +-- G0 $ type use 2 level +type G0E2 := G0$E2; +type G0F2 := G0$F2; +type G0G2 := G0$G2; +type G0H2 := G0$H2; + +pragma test_canonical_type(type G0E2, "String"); +pragma test_canonical_type(type G0F2, "Integer"); +pragma test_canonical_type(type G0G2, "G0$C"); +pragma test_canonical_type(type G0H2, "G0"); + +pragma test_canonical_base_type(type G0E2, "String"); +pragma test_canonical_base_type(type G0F2, "Integer"); +pragma test_canonical_base_type(type G0G2, "G0$C"); +pragma test_canonical_base_type(type G0H2, "G0"); + +pragma test_canonical_signature(type G0E2, "{TYPE[],LIST[Character],STRING[]}"); +pragma test_canonical_signature(type G0F2, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); +pragma test_canonical_signature(type G0G2, "{TYPE[]}"); +pragma test_canonical_signature(type G0H2, "{TYPE[],GEN[String]}"); + +------------------------------------------------------- + +-- E0 $ type use 2 level +type E0E2 := E0$E2; +type E0F2 := E0$F2; +type E0G2 := E0$G2; +type E0H2 := E0$H2; + +pragma test_canonical_type(type E0E2, "String"); +pragma test_canonical_type(type E0F2, "Integer"); +pragma test_canonical_type(type E0G2, "E0$C"); +pragma test_canonical_type(type E0H2, "E0"); + +pragma test_canonical_base_type(type E0E2, "String"); +pragma test_canonical_base_type(type E0F2, "Integer"); +pragma test_canonical_base_type(type E0G2, "E0$C"); +pragma test_canonical_base_type(type E0H2, "String"); + +pragma test_canonical_signature(type E0E2, "{TYPE[],LIST[Character],STRING[]}"); +pragma test_canonical_signature(type E0F2, "{PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[]}"); +pragma test_canonical_signature(type E0G2, "{TYPE[]}"); +pragma test_canonical_signature(type E0H2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); + +------------------------------------------------------- + +-- G0 $ polymorphic (generating) 2 level type use +type G0I2 := G0$I2; +type G0J2 := G0$J2; +type G0K2 := G0$K2; +type G0L2 := G0$L2; + +pragma test_canonical_type(type G0I2, "G0$I2"); +pragma test_canonical_type(type G0J2, "G0$J2"); +pragma test_canonical_type(type G0K2, "G0$K2"); +pragma test_canonical_type(type G0L2, "G0$L2"); + +pragma test_canonical_base_type(type G0I2, "G0$I2"); +pragma test_canonical_base_type(type G0J2, "G0$J2"); +pragma test_canonical_base_type(type G0K2, "G0$K2"); +pragma test_canonical_base_type(type G0L2, "G0$L2"); + +pragma test_canonical_signature(type G0I2, "{TYPE[],GEN[String]}"); +pragma test_canonical_signature(type G0J2, "{TYPE[],GEN[Integer]}"); +pragma test_canonical_signature(type G0K2, "{TYPE[],GEN[G0$C]}"); +pragma test_canonical_signature(type G0L2, "{TYPE[],GEN[G0]}"); + +------------------------------------------------------- + +-- E0 $ polymorphic (generating) 2 level type use +type E0I2 := E0$I2; +type E0J2 := E0$J2; +type E0K2 := E0$K2; +type E0L2 := E0$L2; + +pragma test_canonical_type(type E0I2, "E0$I2"); +pragma test_canonical_type(type E0J2, "E0$J2"); +pragma test_canonical_type(type E0K2, "E0$K2"); +pragma test_canonical_type(type E0L2, "E0$L2"); + +pragma test_canonical_base_type(type E0I2, "E0$I2"); +pragma test_canonical_base_type(type E0J2, "E0$J2"); +pragma test_canonical_base_type(type E0K2, "E0$K2"); +pragma test_canonical_base_type(type E0L2, "E0$L2"); + +pragma test_canonical_signature(type E0I2, "{TYPE[],GEN[String]}"); +pragma test_canonical_signature(type E0J2, "{TYPE[],GEN[Integer]}"); +pragma test_canonical_signature(type E0K2, "{TYPE[],GEN[E0$C]}"); +pragma test_canonical_signature(type E0L2, "{TYPE[],GEN[String]}"); + +------------------------------------------------------- + +-- G0 $ polymorphic (non-generating) 2 level type use +type G0M2 := G0$M2; +type G0N2 := G0$N2; +type G0O2 := G0$O2; +type G0P2 := G0$P2; + +pragma test_canonical_type(type G0M2, "G0$M2"); +pragma test_canonical_type(type G0N2, "G0$N2"); +pragma test_canonical_type(type G0O2, "G0$O2"); +pragma test_canonical_type(type G0P2, "G0$P2"); + +pragma test_canonical_base_type(type G0M2, "String"); +pragma test_canonical_base_type(type G0N2, "Integer"); +pragma test_canonical_base_type(type G0O2, "G0$C"); +pragma test_canonical_base_type(type G0P2, "G0"); + +pragma test_canonical_signature(type G0M2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); +pragma test_canonical_signature(type G0N2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); +pragma test_canonical_signature(type G0O2, "{BASIC[],TYPE[],EXT[G0$C]}"); +pragma test_canonical_signature(type G0P2, "{BASIC[],TYPE[],GEN[String],EXT[G0]}"); + +------------------------------------------------------- + +-- E0 $ polymorphic (non-generating) 2 level type use +type E0M2 := E0$M2; +type E0N2 := E0$N2; +type E0O2 := E0$O2; +type E0P2 := E0$P2; + +pragma test_canonical_type(type E0M2, "E0$M2"); +pragma test_canonical_type(type E0N2, "E0$N2"); +pragma test_canonical_type(type E0O2, "E0$O2"); +pragma test_canonical_type(type E0P2, "E0$P2"); + +pragma test_canonical_base_type(type E0M2, "String"); +pragma test_canonical_base_type(type E0N2, "Integer"); +pragma test_canonical_base_type(type E0O2, "E0$C"); +pragma test_canonical_base_type(type E0P2, "String"); + +pragma test_canonical_signature(type E0M2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); +pragma test_canonical_signature(type E0N2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); +pragma test_canonical_signature(type E0O2, "{BASIC[],TYPE[],EXT[E0$C]}"); +pragma test_canonical_signature(type E0P2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); + +------------------------------------------------------- + +-- G0 $ polymorphic (non-generating) 2 level type use $ polymorphic (non-generating) 2 level type use +type G0M2M2 := G0$M2$M2; +type G0N2N2 := G0$N2$N2; +type G0O2O2 := G0$O2$O2; +type G0P2P2 := G0$P2$P2; + +pragma test_canonical_type(type G0M2M2, "G0$M2$M2"); +pragma test_canonical_type(type G0N2N2, "G0$N2$N2"); +pragma test_canonical_type(type G0O2O2, "G0$O2$O2"); +pragma test_canonical_type(type G0P2P2, "G0$P2$P2"); + +pragma test_canonical_base_type(type G0M2M2, "String"); +pragma test_canonical_base_type(type G0N2N2, "Integer"); +pragma test_canonical_base_type(type G0O2O2, "G0$O2$C"); +pragma test_canonical_base_type(type G0P2P2, "G0"); + +pragma test_canonical_signature(type G0M2M2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); +pragma test_canonical_signature(type G0N2N2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); +pragma test_canonical_signature(type G0O2O2, "{BASIC[],TYPE[],EXT[G0$O2$C]}"); +pragma test_canonical_signature(type G0P2P2, "{BASIC[],TYPE[],GEN[String],EXT[G0]}"); + +------------------------------------------------------- + +-- E0 $ polymorphic (non-generating) 2 level type use $ polymorphic (non-generating) 2 level type use +type E0M2M2 := E0$M2$M2; +type E0N2N2 := E0$N2$N2; +type E0O2O2 := E0$O2$O2; +type E0P2P2 := E0$P2$P2; + +pragma test_canonical_type(type E0M2M2, "E0$M2$M2"); +pragma test_canonical_type(type E0N2N2, "E0$N2$N2"); +pragma test_canonical_type(type E0O2O2, "E0$O2$O2"); +pragma test_canonical_type(type E0P2P2, "E0$P2$P2"); + +pragma test_canonical_base_type(type E0M2M2, "String"); +pragma test_canonical_base_type(type E0N2N2, "Integer"); +pragma test_canonical_base_type(type E0O2O2, "E0$O2$C"); +pragma test_canonical_base_type(type E0P2P2, "String"); + +pragma test_canonical_signature(type E0M2M2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); +pragma test_canonical_signature(type E0N2N2, "{BASIC[],PRINTABLE[],ORDERED[],NUMERIC[],TYPE[],INTEGER[],EXT[Integer]}"); +pragma test_canonical_signature(type E0O2O2, "{BASIC[],TYPE[],EXT[E0$O2$C]}"); +pragma test_canonical_signature(type E0P2P2, "{BASIC[],TYPE[],LIST[Character],EXT[String],STRING[]}"); + +------------------------------------------------------- + +-- G0 $ polymorphic (generating) 2 level type use $ polymorphic (generating) 2 level type use +type G0I2I2 := G0$I2$I2; +type G0J2J2 := G0$J2$J2; +type G0K2K2 := G0$K2$K2; +type G0L2L2 := G0$L2$L2; + +pragma test_canonical_type(type G0I2I2, "G0$I2$I2"); +pragma test_canonical_type(type G0J2J2, "G0$J2$J2"); +pragma test_canonical_type(type G0K2K2, "G0$K2$K2"); +pragma test_canonical_type(type G0L2L2, "G0$L2$L2"); + +pragma test_canonical_base_type(type G0I2I2, "G0$I2$I2"); +pragma test_canonical_base_type(type G0J2J2, "G0$J2$J2"); +pragma test_canonical_base_type(type G0K2K2, "G0$K2$K2"); +pragma test_canonical_base_type(type G0L2L2, "G0$L2$L2"); + +pragma test_canonical_signature(type G0I2I2, "{TYPE[],GEN[String]}"); +pragma test_canonical_signature(type G0J2J2, "{TYPE[],GEN[Integer]}"); +pragma test_canonical_signature(type G0K2K2, "{TYPE[],GEN[G0$K2$C]}"); +pragma test_canonical_signature(type G0L2L2, "{TYPE[],GEN[G0$L2]}"); + +------------------------------------------------------- + +-- E0 $ polymorphic (generating) 2 level type use $ polymorphic (generating) 2 level type use +type E0I2I2 := E0$I2$I2; +type E0J2J2 := E0$J2$J2; +type E0K2K2 := E0$K2$K2; +type E0L2L2 := E0$L2$L2; + +pragma test_canonical_type(type E0I2I2, "E0$I2$I2"); +pragma test_canonical_type(type E0J2J2, "E0$J2$J2"); +pragma test_canonical_type(type E0K2K2, "E0$K2$K2"); +pragma test_canonical_type(type E0L2L2, "E0$L2$L2"); + +pragma test_canonical_base_type(type E0I2I2, "E0$I2$I2"); +pragma test_canonical_base_type(type E0J2J2, "E0$J2$J2"); +pragma test_canonical_base_type(type E0K2K2, "E0$K2$K2"); +pragma test_canonical_base_type(type E0L2L2, "E0$L2$L2"); + +pragma test_canonical_signature(type E0I2I2, "{TYPE[],GEN[String]}"); +pragma test_canonical_signature(type E0J2J2, "{TYPE[],GEN[Integer]}"); +pragma test_canonical_signature(type E0K2K2, "{TYPE[],GEN[E0$K2$C]}"); +pragma test_canonical_signature(type E0L2L2, "{TYPE[],GEN[E0$L2]}"); + +------------------------------------------------------- + +-- Qual function type from generating module +type G0FT1 := G0$FT1; +type G0FT2 := G0$FT2; +type G0FT3 := G0$FT3; +type G0FT4 := G0$FT4; +type G0FT5 := G0$FT5; +type G0FT6 := G0$FT6; +type G0FT7 := G0$FT7; +type G0FT8 := G0$FT8; + +pragma test_canonical_type(type G0FT1, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_type(type G0FT2, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_type(type G0FT3, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_type(type G0FT4, "(G0$I,G0$J,G0$K,G0$L)=>G0$L2"); +pragma test_canonical_type(type G0FT5, "(G0$I2,G0$J2,G0$K2,G0$L2)=>G0$L2"); +pragma test_canonical_type(type G0FT6, "(G0$M,G0$N,G0$O,G0$P)=>G0$P"); +pragma test_canonical_type(type G0FT7, "(G0$M1,G0$N1,G0$O1,G0$P1)=>G0$P1"); +pragma test_canonical_type(type G0FT8, "(G0$M2,G0$N2,G0$O2,G0$P2)=>G0$P2"); + +pragma test_canonical_base_type(type G0FT1, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_base_type(type G0FT2, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_base_type(type G0FT3, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_base_type(type G0FT4, "(G0$I,G0$J,G0$K,G0$L)=>G0$L2"); +pragma test_canonical_base_type(type G0FT5, "(G0$I2,G0$J2,G0$K2,G0$L2)=>G0$L2"); +pragma test_canonical_base_type(type G0FT6, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_base_type(type G0FT7, "(String,Integer,G0$C,G0)=>G0"); +pragma test_canonical_base_type(type G0FT8, "(String,Integer,G0$C,G0)=>G0"); + +------------------------------------------------------- + +-- Qual function type from non-generating module +type E0FT1 := E0$FT1; +type E0FT2 := E0$FT2; +type E0FT3 := E0$FT3; +type E0FT4 := E0$FT4; +type E0FT5 := E0$FT5; +type E0FT6 := E0$FT6; +type E0FT7 := E0$FT7; +type E0FT8 := E0$FT8; + +pragma test_canonical_type(type E0FT1, "(String,Integer,E0$C,E0)=>E0"); +pragma test_canonical_type(type E0FT2, "(String,Integer,E0$C,E0)=>E0"); +pragma test_canonical_type(type E0FT3, "(String,Integer,E0$C,E0)=>E0"); +pragma test_canonical_type(type E0FT4, "(E0$I,E0$J,E0$K,E0$L)=>E0$L2"); +pragma test_canonical_type(type E0FT5, "(E0$I2,E0$J2,E0$K2,E0$L2)=>E0$L2"); +pragma test_canonical_type(type E0FT6, "(E0$M,E0$N,E0$O,E0$P)=>E0$P"); +pragma test_canonical_type(type E0FT7, "(E0$M1,E0$N1,E0$O1,E0$P1)=>E0$P1"); +pragma test_canonical_type(type E0FT8, "(E0$M2,E0$N2,E0$O2,E0$P2)=>E0$P2"); + +pragma test_canonical_base_type(type E0FT1, "(String,Integer,E0$C,String)=>String"); +pragma test_canonical_base_type(type E0FT2, "(String,Integer,E0$C,String)=>String"); +pragma test_canonical_base_type(type E0FT3, "(String,Integer,E0$C,String)=>String"); +pragma test_canonical_base_type(type E0FT4, "(E0$I,E0$J,E0$K,E0$L)=>E0$L2"); +pragma test_canonical_base_type(type E0FT5, "(E0$I2,E0$J2,E0$K2,E0$L2)=>E0$L2"); +pragma test_canonical_base_type(type E0FT6, "(String,Integer,E0$C,String)=>String"); +pragma test_canonical_base_type(type E0FT7, "(String,Integer,E0$C,String)=>String"); +pragma test_canonical_base_type(type E0FT8, "(String,Integer,E0$C,String)=>String"); + +------------------------------------------------------- + +-- Qual use from canonical type validation + +phylum CoolClassPhylum; +type CoolClass := remote CoolClassPhylum; +pragma test_canonical_type(CoolClass$nil, "CoolClassPhylum"); \ No newline at end of file diff --git a/examples/test-coll.aps b/examples/test-coll.aps index 86859c97..183331f8 100644 --- a/examples/test-coll.aps +++ b/examples/test-coll.aps @@ -1,17 +1,18 @@ -module TEST_COLL[] begin - phylum Wood; - - constructor branch(x,y : Wood) : Wood; - constructor leaf(x : Integer) : Wood; - +with "tiny"; + +module TEST_COLL[T :: TINY[]] extends T begin type Integers := SET[Integer]; var collection sum : Integer :> 0, (+); var collection leaves : Integers; + attribute Root.result : Integer; + pragma synthesized(result); + match ?p=root(?w) begin + p.result := sum; + end; match ?l=leaf(?x) begin sum :> x; - leaves :> {x}; end; match ?b=branch(?x,?y) begin end; diff --git a/examples/test-cycle.aps b/examples/test-cycle.aps index f240c6d4..fc51c6c9 100644 --- a/examples/test-cycle.aps +++ b/examples/test-cycle.aps @@ -1,9 +1,6 @@ -module TEST_CYCLE[] begin - phylum Wood; - - constructor branch(x,y : Wood) : Wood; - constructor leaf(x : Integer) : Wood; - +with "tiny"; +module TEST_CYCLE[T :: var TINY[]] extends T begin + type Integers := SET[Integer]; type IntegerLattice := UNION_LATTICE[Integer,Integers]; diff --git a/examples/test-for.aps b/examples/test-for.aps new file mode 100644 index 00000000..f14e2de3 --- /dev/null +++ b/examples/test-for.aps @@ -0,0 +1,31 @@ +with "tiny"; +-- Example to test implementation of for-in loops +module TEST_FOR[T :: var TINY[]] extends T begin + + -- Sets of names: + type Leaves := SET[Integer]; + + type Biggest := MAX_LATTICE[Integer](-1000); + + var collection all_leaves : Leaves; + + collection attribute Root.answer : Biggest; + + pragma synthesized(answer); + + match ?r=root(?w) begin + for all_leaves begin + match {...,?leaf,...} begin + r.answer :> leaf; + end; + end; + end; + + match ?w=branch(?x,?y) begin + end; + + match ?l=leaf(?x) begin + all_leaves :> {x}; + end; + +end; diff --git a/examples/test-forin.aps b/examples/test-forin.aps new file mode 100644 index 00000000..05b3d179 --- /dev/null +++ b/examples/test-forin.aps @@ -0,0 +1,29 @@ +with "tiny"; +-- Example to test implementation of for-in loops +module TEST_FORIN[T :: var TINY[]] extends T begin + + -- Sets of names: + type Leaves := SET[Integer]; + + type Biggest := MAX_LATTICE[Integer](-1000); + + var collection all_leaves : Leaves; + + collection attribute Root.answer : Biggest; + + pragma synthesized(answer); + + match ?r=root(?w) begin + for leaf in all_leaves begin + r.answer :> leaf; + end; + end; + + match ?w=branch(?x,?y) begin + end; + + match ?l=leaf(?x) begin + all_leaves :> {x}; + end; + +end; diff --git a/examples/test-remote.aps b/examples/test-remote.aps new file mode 100644 index 00000000..705d4a8c --- /dev/null +++ b/examples/test-remote.aps @@ -0,0 +1,49 @@ +-- TEST-REMOTE.APS +-- August 2022 +-- Test extending remote attribution to uses of attributes of remote nodes. + +-- This test uses some circumlocutions to hide what we are doing: +-- - using "RWood" instead of "remote Wood" gets past one check +-- - using "other.s" rather than "l.w.s" gets past another syntactic check +-- But semantically the examples still does something that the +-- scheduler cannot handle. In the version of APS that this file +-- was first checked in on, apssched will happily schedule this file, +-- incorrectly evaluating x.s before y.i despite the fact that +-- x.s depends indirectly on y.i + +with "tiny"; +module TEST_REMOTE[T :: var TINY[]] extends T begin + type RWood := remote Wood; + + attribute Wood.s : Integer; + attribute Wood.i : Integer; + attribute Wood.w : RWood; + + attribute Root.r : Integer; + + pragma inherited(i, w); + pragma synthesized(s, r); + + match ?self=root(?t) begin + self.r := t.s; + t.i := 0; + t.w := Wood$nil; + end; + + match ?self=branch(?x, ?y) begin + x.w := y; + y.w := self.w; + x.i := self.i + 1; + y.i := self.i + 2; + self.s := x.s + y.s; + end; + + match ?l=leaf(?n) begin + other : RWood := l.w; + if other = nil then + l.s := n + l.i; + else + l.s := l.i + other.i + other.s; + endif; + end; +end; \ No newline at end of file diff --git a/examples/test-synth.aps b/examples/test-synth.aps new file mode 100644 index 00000000..7e7c73df --- /dev/null +++ b/examples/test-synth.aps @@ -0,0 +1,57 @@ +with "tiny"; + +module TEST_SYNTH[T :: TINY[]] extends T begin + var collection sum : Integer :> 0, (+); + + attribute Root.syn : Integer; + pragma synthesized(syn); + + attribute Wood.i1 : Integer := 10; + attribute Wood.i2 : Integer; + pragma inherited(i1, i2); + + attribute Wood.s1 : Integer := 100; + attribute Wood.s2 : Integer; + pragma synthesized(s1, s2); + + function hf(x : Integer; y : Integer) : Boolean begin + q : Integer := x / y; + result := q * y = x; + end; + + match ?r=root(?w) begin + if hf(sum,3) then + r.syn := 3; + w.i1 := 2; + endif; + if hf(sum,5) then + r.syn := 5; + w.i2 := 4; + endif; + w.i2 := w.s1; + r.syn := 2; + end; + + match ?w=leaf(?x) begin + if hf(x,2) then + sum :> 2; + w.s1 := 2 * w.i1; + endif; + if hf(x,3) then + sum :> 3; + w.s2 := x; + endif; + sum :> 5; + w.s2 := 1; + end; + + match ?w=branch(?x,?y) begin + y.i1 := y.s2; + y.i2 := 2; + x.i2 := y.s1; + w.s1 := w.i1; + w.s2 := w.i2; + sum :> x.s2; + sum :> x.s1; + end; +end; diff --git a/examples/test-use-coll.aps b/examples/test-use-coll.aps new file mode 100644 index 00000000..758a98e5 --- /dev/null +++ b/examples/test-use-coll.aps @@ -0,0 +1,32 @@ +with "tiny"; + +module TEST_USE_COLL[T :: var TINY[]] extends T +begin + type Integers := SET[Integer]; + + var collection sum : Integer :> 0, (+); + var collection leaves : Integers; + + attribute Wood.sumsum : Integer; + attribute Root.result : Integer; + attribute Root.result2 : Integers := leaves; + + pragma synthesized(sumsum,result,result2); + + var function f1() f1r : Integer := sum; + + var function f2() f2r : Integer := f1(); + + match ?l=leaf(?x) begin + sum :> x; + leaves :> {x}; + l.sumsum := f2(); + end; + match ?b=branch(?x,?y) begin + b.sumsum := x.sumsum + y.sumsum; + end; + match ?p=root(?b) begin + p.result := b.sumsum; + end; + +end; diff --git a/examples/test-use-object-coll.aps b/examples/test-use-object-coll.aps new file mode 100644 index 00000000..ea3db663 --- /dev/null +++ b/examples/test-use-object-coll.aps @@ -0,0 +1,32 @@ +with "tiny"; + +module USE_OBJECT_COLL[T :: var TINY[]] extends T +begin + phylum Object; + constructor object(x : Integer) : Object; + + var collection attribute Object.col : Integer :> 0, (+); + + type Obj := remote Object; + + attribute Wood.obj : Obj; + pragma inherited(obj); + + attribute Root.total : Integer; + pragma synthesized(total); + + match ?p=root(?w) begin + o : Object := object(42); + w.obj := o; + p.total := o.col; + end; + + match ?self=branch(?w1,?w2) begin + w1.obj := self.obj; + w2.obj := self.obj; + end; + + match ?self=leaf(?n) begin + self.obj.col :> n; + end; +end; diff --git a/examples/test-use-object.aps b/examples/test-use-object.aps new file mode 100644 index 00000000..6e55f395 --- /dev/null +++ b/examples/test-use-object.aps @@ -0,0 +1,40 @@ +with "tiny"; + +module USE_OBJECT[T :: var TINY[]] extends T +begin + phylum Object; + constructor object(x : Integer) : Object; + + attribute Object.field : Integer := 42; + + type Obj := remote Object; + + attribute Wood.obj : Obj; + attribute Wood.num : Integer; + attribute Wood.isBranch : Boolean := false; + pragma inherited(obj); + pragma synthesized(num, isBranch); + + attribute Root.total : Integer; + pragma synthesized(total); + + match ?p=root(?w) begin + o : Object := object(42); + w.obj := o; + p.total := w.num; + if w.isBranch then + o.field := 7; + endif; + end; + + match ?self=branch(?w1,?w2) begin + w1.obj := self.obj; + w2.obj := self.obj; + self.isBranch := true; + self.num := w1.num + w2.num; + end; + + match ?self=leaf(?n) begin + self.num := n + self.obj.field; + end; +end; diff --git a/examples/tiny-coll-cond.aps b/examples/tiny-coll-cond.aps new file mode 100644 index 00000000..939dc9ef --- /dev/null +++ b/examples/tiny-coll-cond.aps @@ -0,0 +1,34 @@ +with "tiny"; + +module TINY_COLL_COND[T :: var TINY[]] extends T +begin + attribute Wood.depth : Integer; + collection attribute Wood.sum : Integer :> 0, (+); + + pragma inherited(depth); + pragma synthesized(sum); + + function isEven(n : Integer) : Boolean := (n / 2) * 2 = n; + + match ?l=leaf(?x) begin + if l.depth > 2 then + l.sum :> x; + endif; + end; + + match ?b=branch(?x,?y) begin + if b.depth > 3 then + b.sum :> x.sum; + endif; + if isEven(b.depth) then + b.sum :> y.sum; + endif; + x.depth := b.depth + 1; + y.depth := b.depth + 1; + end; + + match ?p=root(?b) begin + b.depth := 1; + end; + +end; diff --git a/examples/tiny-coll.aps b/examples/tiny-coll.aps new file mode 100644 index 00000000..6937789b --- /dev/null +++ b/examples/tiny-coll.aps @@ -0,0 +1,22 @@ +with "tiny"; + +module TINY_COLL[T :: var TINY[]] extends T +begin + type Integers := SET[Integer]; + + collection attribute Wood.sum : Integer :> 0, (+); + + pragma synthesized(sum); + + match ?l=leaf(?x) begin + l.sum :> x; + end; + match ?b=branch(?x,?y) begin + b.sum :> x.sum; + b.sum :> y.sum; + end; + + match ?p=root(?b) begin + end; + +end; diff --git a/examples/tiny.aps b/examples/tiny.aps new file mode 100644 index 00000000..e54fe02a --- /dev/null +++ b/examples/tiny.aps @@ -0,0 +1,11 @@ +module TINY[] begin + phylum Root; + phylum Wood; + + pragma root_phylum(type Root); + + constructor root(w : Wood) : Root; + constructor branch(x,y : Wood) : Wood; + constructor leaf(x : Integer) : Wood; + +end; diff --git a/examples/type-binding.aps b/examples/type-binding.aps index 63c30225..0a00b16b 100644 --- a/examples/type-binding.aps +++ b/examples/type-binding.aps @@ -184,7 +184,7 @@ module TYPE_BINDING[T :: var TYPE_DECL[]] extends T begin e.expr_shape := element_type(a.expr_shape); end; - function element_type(at : Shape) : Shape begin + var function element_type(at : Shape) : Shape begin case at begin match vector_shape(?et:Shape) begin result := et; diff --git a/examples/use-global.aps b/examples/use-global.aps new file mode 100644 index 00000000..ed8efd27 --- /dev/null +++ b/examples/use-global.aps @@ -0,0 +1,23 @@ +with "tiny"; +-- Very simple example of using a global collection attribute +module USE_GLOBAL[T :: var TINY[]] extends T begin + + var collection max_value : Integer :> 0, max; + attribute Wood.result : Integer := 0; + pragma synthesized(result); + attribute Root.done : Integer; + pragma synthesized(done); + + match ?r:Root=root(?w:Wood) begin + r.done := w.result; + end; + + match ?b:Wood=branch(?w1:Wood,?w2:Wood) begin + b.result := w1.result + w2.result; + end; + + match ?l=leaf(?v:Integer) begin + max_value :> v; + l.result := max_value; + end; +end; diff --git a/parse/.gitignore b/parse/.gitignore new file mode 100644 index 00000000..a34372fb --- /dev/null +++ b/parse/.gitignore @@ -0,0 +1,2 @@ +aps.tab.* +aps-lex.* diff --git a/parse/Makefile b/parse/Makefile index f033cff1..6ba6289c 100644 --- a/parse/Makefile +++ b/parse/Makefile @@ -1,5 +1,6 @@ CC=gcc -CFLAGS=-Wall -g -DYYDEBUG -DYY_SKIP_YYWRAP +LINTER_FLAGS= -Wno-pointer-to-int-cast -Wno-unused-function -Wno-int-to-pointer-cast +CFLAGS=-Wall -g -DYYDEBUG -DYY_SKIP_YYWRAP ${LINTER_FLAGS} APSLIBOBJS= alloc.o string.o symbol.o \ aps-tree.o aps-traverse.o aps-util.o \ @@ -23,7 +24,7 @@ aps.tab.h aps.tab.c : aps.y bison -d -p aps_yy aps.y install : aps-lib.o - mv aps-lib.o ../lib + mv aps-lib.o ../lib/. clean: rm -f *.o aps.tab.* aps2lisp core aps-lex.c diff --git a/parse/alloc.c b/parse/alloc.c index 7b4e8d66..ea74919c 100644 --- a/parse/alloc.c +++ b/parse/alloc.c @@ -3,7 +3,7 @@ #include "jbb-alloc.h" -#define BLOCKSIZE 1000000 +#define BLOCKSIZE 10000000 struct block { struct block *next, *prev; char contents[BLOCKSIZE]; diff --git a/parse/aps-lex.h b/parse/aps-lex.h index a1574629..82d4f166 100644 --- a/parse/aps-lex.h +++ b/parse/aps-lex.h @@ -1,5 +1,8 @@ #ifndef APS_LEX_H #define APS_LEX_H +#include +#include "jbb-symbol.h" +#include "jbb-string.h" /* define yy things used in both aps.y and aps.lex */ @@ -25,8 +28,9 @@ extern int yylex(); extern int yydebug; #define YY_USER_ACTION if (yydebug) {printf("Lexed: \"%s\"\n",yytext);} -extern void set_infix(Symbol sym, int kind); -extern void set_code_name(Symbol sym, String s); -extern String get_code_name(Symbol sym); +extern void init_lexer(FILE *f); +extern void set_infix(SYMBOL sym, int kind); +extern void set_code_name(SYMBOL sym, STRING s); +extern STRING get_code_name(SYMBOL sym); #endif diff --git a/parse/aps-tree-dump.handcode.i b/parse/aps-tree-dump.handcode.i index 0bf333b6..a6e0d74d 100644 --- a/parse/aps-tree-dump.handcode.i +++ b/parse/aps-tree-dump.handcode.i @@ -1,5 +1,5 @@ void dump_lisp_Symbol(Symbol s) { - char *name = symbol_name(s); + const char *name = symbol_name(s); printf(" aps-boot::|"); while (*name != '\0') { switch (*name) { diff --git a/parse/aps-tree.h b/parse/aps-tree.h index b47245ab..35b2ce2b 100644 --- a/parse/aps-tree.h +++ b/parse/aps-tree.h @@ -1,3 +1,6 @@ +#ifndef APS_TREE_H +#define APS_TREE_H + /* Generated by a program written by John Boyland */ #include "jbb-tree.h" #include "aps-tree.handcode.h" @@ -816,3 +819,4 @@ extern Expression controlled_expr(Expression); extern Declaration controlled_formal(Expression); extern Expression controlled_set(Expression); +#endif diff --git a/parse/aps-util.c b/parse/aps-util.c index 085764c0..c9a8c059 100644 --- a/parse/aps-util.c +++ b/parse/aps-util.c @@ -260,3 +260,21 @@ Block some_function_decl_body(Declaration _node) { case KEYprocedure_decl: return procedure_decl_body(_node); } } + +Expression some_case_stmt_expr(Declaration node) { + switch (Declaration_KEY(node)) { + default: fatal_error("some_case_stmt_expr: called with something on line %d", + tnode_line_number(node)); + case KEYcase_stmt: return case_stmt_expr(node); + case KEYfor_stmt: return for_stmt_expr(node); + } +} + +Matches some_case_stmt_matchers(Declaration node) { + switch (Declaration_KEY(node)) { + default: fatal_error("some_case_stmt_matchers: called with something on line %d", + tnode_line_number(node)); + case KEYcase_stmt: return case_stmt_matchers(node); + case KEYfor_stmt: return for_stmt_matchers(node); + } +} diff --git a/parse/aps-util.h b/parse/aps-util.h index 31f55b66..c9b22239 100644 --- a/parse/aps-util.h +++ b/parse/aps-util.h @@ -87,9 +87,14 @@ extern Def some_function_decl_def(Declaration); extern Type some_function_decl_type(Declaration); extern Block some_function_decl_body(Declaration); +#define KEYsome_case_stmt KEYcase_stmt: \ + case KEYfor_stmt +extern Expression some_case_stmt_expr(Declaration); +extern Matches some_case_stmt_matchers(Declaration); + /* for loop for accessing elements (only works in C++ with stl) */ #define FOR_SEQUENCE(etype,id,stype,seq,body) \ - { stack _st; _st.push(seq); \ + { std::stack _st; _st.push(seq); \ while (!_st.empty()) { \ stype _t = _st.top(); _st.pop(); \ switch(stype##_KEY(_t)) { \ diff --git a/parse/apsc.c b/parse/apsc.c index 1208fc35..f2c84ce4 100644 --- a/parse/apsc.c +++ b/parse/apsc.c @@ -12,7 +12,7 @@ int aps_parse_error = 0; int info_size = 0; -main(int argc,char **argv) { +int main(int argc,char **argv) { extern FILE *aps_yyin; extern char *aps_yyfilename; if (argc != 2) usage(); @@ -41,6 +41,7 @@ main(int argc,char **argv) { dump_lisp_Program(the_tree); printf("\n"); exit(0); + return 0; } int yywrap() diff --git a/parse/jbb.h b/parse/jbb.h index 0cf19271..401ba3ea 100644 --- a/parse/jbb.h +++ b/parse/jbb.h @@ -57,7 +57,8 @@ extern char *strcpy(), *strcat(); #endif #ifndef assert #ifndef _crash -#define _crash() (*(int *)0 = -1) +#include +#define _crash() (abort(),0) #endif #ifndef _assert #define _assert(ex) \ diff --git a/parse/string.c b/parse/string.c index 2adb037e..54ae037b 100644 --- a/parse/string.c +++ b/parse/string.c @@ -8,9 +8,9 @@ * We store regular constant strings as * character pointers (unless they have high bits set) * Otherwise we use the first character as a type code: - * « (left european quote) for constant strings - * © (copyright sign) for concatenated strings - * ¥ (Yen sign) for integer strings + * « (left european quote) for constant strings + * © (copyright sign) for concatenated strings + * Â¥ (Yen sign) for integer strings */ struct jbb_string { @@ -26,7 +26,7 @@ struct special_string { #define AS_SPECIAL(p,x) struct special_string *p=(struct special_string *)(x) -#define CONSTANT_STRING (128+'+') +#define CONSTANT_STRING (((unsigned char)(128+(unsigned char)'+'))) struct constant_string { struct special_string header; char *value; @@ -34,7 +34,7 @@ struct constant_string { #define AS_CONSTANT(p,x) \ struct constant_string *p=(struct constant_string *)(x) -#define CONC_STRING (128+')') +#define CONC_STRING (((unsigned char)(128+(unsigned char)')'))) struct conc_string { struct special_string header; STRING str1, str2; @@ -91,7 +91,7 @@ STRING make_saved_string(char *s) { return make_string(strcpy((char *)HALLOC(strlen(s)+1),s)); } -static int digits(n,base) { +static int digits(int n, int base) { int digits = 0; if (n < 0) { ++digits; diff --git a/utilities/Makefile b/utilities/Makefile new file mode 100644 index 00000000..ea489ac9 --- /dev/null +++ b/utilities/Makefile @@ -0,0 +1,25 @@ +CC= gcc +AR= ar +CFLAGS= -g + +.PHONY: default all clean + +all : utilities.o +prime.o : prime.h +hashcons.o : prime.o hashcons.h +stack.o : stack.h +scc.o : stack.o scc.h +hashtable.o : prime.o hashtable.h + +install: utilities.o +utilities.o: prime.o hashcons.o stack.o scc.o hashtable.o + $(AR) -cr $@ $^ + +%.o: %.c + $(CC) $(CFLAGS) -c $< -o $@ + +.PHONY: clean install + +realclean: clean +clean: + -rm -f *.o diff --git a/utilities/hashcons.c b/utilities/hashcons.c new file mode 100644 index 00000000..79feee5e --- /dev/null +++ b/utilities/hashcons.c @@ -0,0 +1,321 @@ +#include "hashcons.h" +#include +#include +#include +#include "prime.h" + +#define HC_INITIAL_BASE_SIZE 61 +#define MAX_DENSITY 0.5 +#define DOUBLE_SIZE(x) (((x) << 1) + 1) + +/** + * Initializes a table + * @param hc table + * @param capacity new capacity + */ +void hc_initialize(HASH_CONS_TABLE hc, const int capacity) +{ + hc->capacity = capacity; + hc->table = calloc(hc->capacity, sizeof(void *)); + hc->size = 0; +} + +/** + * Finds the candidate index intended to get inserted or searched in table + * @param hc table + * @param item the item looking to be added or removed + * @return + */ +static int hc_candidate_index(HASH_CONS_TABLE hc, void *item) +{ + long hash = hc->hashf(item) & LONG_MAX; + int index = hash % hc->capacity; + int step_size = hash % (hc->capacity - 2) + 1; + + while (true) + { + if (hc->table[index] == NULL || hc->equalf(hc->table[index], item)) + { + return index; + } + + index = (index + step_size) % hc->capacity; + } +} + +/** + * Insert an item into table + * @param hc table + * @param item the item intended to get inserted into the table + */ +static void hc_insert_at(HASH_CONS_TABLE hc, void *item, int index) +{ + hc->table[index] = item; + hc->size++; +} + +/** + * Insert an item into table + * @param hc table + * @param item the item intended to get inserted into the table + */ +static void hc_insert(HASH_CONS_TABLE hc, void *item) +{ + int index = hc_candidate_index(hc, item); + + hc_insert_at(hc, item, index); +} + +/** + * Search an item in table + * @param hc table + * @param item the item intended to get searched in the table + * @return possible index of the item + */ +static int hc_search(HASH_CONS_TABLE hc, void *item) +{ + int index = hc_candidate_index(hc, item); + + return index; +} + +/** + * Resizes the table given new capacity + * @param hc table + * @param capacity new capacity + */ +static void hc_resize(HASH_CONS_TABLE hc, const int capacity) +{ + void **old_table = hc->table; + int old_capacity = hc->capacity; + hc_initialize(hc, capacity); + + for (int i = 0; i < old_capacity; i++) + { + void *item = old_table[i]; + if (item != NULL) + { + hc_insert(hc, item); + } + } + + free(old_table); +} + +/** + * Insert an item into table if item is not already in table or just returns the existing item + * @param item the item + * @param temp_size item size + * @param hc table + * @return item just got inserted into the table or existing item + */ +void *hash_cons_get(void *item, size_t temp_size, HASH_CONS_TABLE hc) +{ + if (hc->table == NULL) + { + hc_initialize(hc, HC_INITIAL_BASE_SIZE); + } + + int candidate_index = hc_search(hc, item); + + if (hc->table[candidate_index] != NULL) + { + return hc->table[candidate_index]; + } + + void *result = malloc(temp_size); + memcpy(result, item, temp_size); + + hc_insert_at(hc, result, candidate_index); + + if (hc->size > hc->capacity * MAX_DENSITY) + { + const int new_capacity = next_twin_prime(DOUBLE_SIZE(hc->capacity)); + hc_resize(hc, new_capacity); + } + + return result; +} + +/** + * Hashes hashcons set + * @param untyped InferredSignature + * @return hash integer value + */ +static long hashcons_set_hash(void *untyped) +{ + HASH_CONS_SET set = (HASH_CONS_SET)untyped; + + int i; + long hash = 17; + for (i = 0; i < set->num_elements; i++) hash |= ((long)set->elements[i]); + + return hash; +} + +/** + * Equality test for hashcons set + * @param untyped1 untyped hashcons set + * @param untyped2 untyped hashcons set + * @return boolean indicating the result of equality + */ +static bool hashcons_set_equal(void *untyped1, void *untyped2) +{ + HASH_CONS_SET set_a = (HASH_CONS_SET)untyped1; + HASH_CONS_SET set_b = (HASH_CONS_SET)untyped2; + + if (set_a->num_elements != set_b->num_elements) return false; + + int i; + for (i = 0; i < set_a->num_elements; i++) + { + if (set_a->elements[i] != set_b->elements[i]) return false; + } + + return true; +} + +/** + * Used to hold hashconsed sets + */ +static struct hash_cons_table hashcons_set_table = { hashcons_set_hash, hashcons_set_equal }; + +/** + * Take a temporary set and hash cons it, returning the set that results + * NOTE: The elements array will be sorted by address to ensure a canonical representation. + * @param set hashcons set + * @return new hashcons set that includes the item + */ +HASH_CONS_SET new_hash_cons_set(HASH_CONS_SET set) +{ + size_t struct_size = sizeof(struct hash_cons_set) + set->num_elements * sizeof(void *); + HASH_CONS_SET sorted_set = (HASH_CONS_SET)alloca(struct_size); + sorted_set->num_elements = 0; + + int i, j; + for (i = 0; i < set->num_elements; i++) + { + long key = (long) set->elements[i]; + int j = i - 1; + + while (j >= 0 && (long)sorted_set->elements[j] > key) + { + sorted_set->elements[j + 1] = sorted_set->elements[j]; + j--; + } + + sorted_set->elements[j + 1] = set->elements[i]; + sorted_set->num_elements++; + } + + void *memory = hash_cons_get(sorted_set, struct_size, &hashcons_set_table); + return (HASH_CONS_SET)memory; +} + +/** + * Return the empty set + * @return hashconsed empty set + */ +HASH_CONS_SET get_hash_cons_empty_set() +{ + struct hash_cons_set empty_set = (struct hash_cons_set) { 0 }; + + void *memory = hash_cons_get(&empty_set, sizeof(empty_set), &hashcons_set_table); + return (HASH_CONS_SET)memory; +} + +/** + * Adds an element to the hashcons set, returning the set that results + * @param item item to be added to the set + * @param set hashcons set + * @return new hashcons set that includes the item + */ +HASH_CONS_SET add_hash_cons_set(void *item, HASH_CONS_SET set) +{ + size_t struct_size = sizeof(struct hash_cons_set) + 1 * sizeof(void *); + HASH_CONS_SET single_element_set = (HASH_CONS_SET)alloca(struct_size); + single_element_set->num_elements = 1; + single_element_set->elements[0] = item; + + return union_hash_const_set(single_element_set, set); +} + +/** + * Unions two hashcons set, returning the set that results + * @param set_a hashcons set A + * @param set_b hashcons set B + * @return new hashcons set that includes the item + */ +HASH_CONS_SET union_hash_const_set(HASH_CONS_SET set_a, HASH_CONS_SET set_b) +{ + int updated_count = set_a->num_elements + set_b->num_elements; + size_t item_size = sizeof(void *); + size_t struct_size = sizeof(struct hash_cons_set) + updated_count * item_size; + HASH_CONS_SET sorted_set = (HASH_CONS_SET)alloca(struct_size); + sorted_set->num_elements = updated_count; + + int i = 0, j = 0, k = 0; + while (i < set_a->num_elements && j < set_b->num_elements) + { + if (set_a->elements[i] == set_b->elements[j]) + { + sorted_set->elements[k] = set_a->elements[i]; + sorted_set->num_elements--; + struct_size -= item_size; + i++; + j++; + } + else if (set_a->elements[i] < set_b->elements[j]) + { + sorted_set->elements[k] = set_a->elements[i]; + i++; + } + else + { + sorted_set->elements[k] = set_b->elements[j]; + j++; + } + + k++; + } + + while (i < set_a->num_elements) sorted_set->elements[k++] = set_a->elements[i++]; + + while (j < set_b->num_elements) sorted_set->elements[k++] = set_b->elements[j++]; + + void *memory = hash_cons_get(sorted_set, struct_size, &hashcons_set_table); + return (HASH_CONS_SET)memory; +} + +/** + * Hash string and returns a hash value + * Source: http://www.cse.yorku.ca/~oz/hash.html + * @param string + * @return integer hash value + */ +long hash_string(char *str) +{ + long hash = 5381; + long c; + + while ((c = *str++)) + { + hash = ((hash << 5) + hash) + c; + } + + return hash; +} + +/** + * Combine two hash values into one hash value + * @param hash1 + * @param hash2 + * @return combined hash + */ +long hash_mix(long h1, long h2) +{ + long hash = 17; + hash = hash * 31 + h1; + hash = hash * 31 + h2; + return hash; +} diff --git a/utilities/hashcons.h b/utilities/hashcons.h new file mode 100644 index 00000000..b4acc356 --- /dev/null +++ b/utilities/hashcons.h @@ -0,0 +1,77 @@ +#ifndef HASHCONS_H +#define HASHCONS_H + +#include +#include + +typedef long (*Hash_Cons_Hash)(void *); +typedef bool (*Hash_Cons_Equal)(void *, void *); + +typedef struct hash_cons_table { + Hash_Cons_Hash hashf; + Hash_Cons_Equal equalf; + int size; + int capacity; + void **table; +} * HASH_CONS_TABLE; + +typedef struct hash_cons_set { + int num_elements; + void *elements[]; +} * HASH_CONS_SET; + +/** + * Return the empty set + * @return hashconsed empty set + */ +HASH_CONS_SET get_hash_cons_empty_set(); + +/** + * Take a temporary set and hash cons it, returning the set that results + * NOTE: The elements array will be sorted + * by address to ensure a canonical representation. + * @param set hashcons set + * @return new hashcons set that includes the item + */ +HASH_CONS_SET new_hash_cons_set(HASH_CONS_SET set); + +/** + * Adds an element to the hashcons set, returning the set that results + * @param item item to be added to the set + * @param set hashcons set + * @return new hashcons set that includes the item + */ +HASH_CONS_SET add_hash_cons_set(void *item, HASH_CONS_SET set); + +/** + * Unions two hashcons set, returning the set that results + * @param set_a hashcons set A + * @param set_b hashcons set B + * @return new hashcons set that includes the item + */ +HASH_CONS_SET union_hash_const_set(HASH_CONS_SET set_a, HASH_CONS_SET set_b); + +/** + * Get item if there is one otherwise create one + * @param temp_item it is a temporary or perhaps stack allocated creation of item + * @param temp_size how many bytes it is + * @param hashcons table + */ +void *hash_cons_get(void *temp_item, size_t temp_size, HASH_CONS_TABLE table); + +/** + * Hash string + * @param string + * @return integer hash value + */ +long hash_string(char *str); + +/** + * Combine two hash values into one + * @param hash1 + * @param hash2 + * @return combined hash + */ +long hash_mix(long h1, long h2); + +#endif \ No newline at end of file diff --git a/utilities/hashtable.c b/utilities/hashtable.c new file mode 100644 index 00000000..99d0e324 --- /dev/null +++ b/utilities/hashtable.c @@ -0,0 +1,205 @@ +#include "hashtable.h" +#include +#include +#include +#include +#include "prime.h" + +#define MAX_DENSITY 0.5 +#define DOUBLE_SIZE(x) (((x) << 1) + 1) + +/** + * Finds the candidate index intended to get inserted or searched in hash table + * @param item the item looking to be added or removed + * @param table hash table + * @return candidate index to insert or search for hash entry + */ +static int hash_table_candidate_index(HASH_TABLE* table, void* key) { + long hash = table->hashf(key) & LONG_MAX; + int index = hash % table->capacity; + int step_size = hash % (table->capacity - 2) + 1; + + while (true) { + if (table->table[index].key == NULL || + table->equalf(table->table[index].key, key)) { + return index; + } + + index = (index + step_size) % table->capacity; + } +} + +/** + * Insert an item into table + * @param key key to insert at given index + * @param value value to insert at given index + * @param index index to insert the hash entry + * @param item the item intended to get inserted into the table + */ +static void hash_table_insert_at(HASH_TABLE* table, + void* key, + void* value, + const int index) { + table->table[index].key = key; + table->table[index].value = value; + table->size++; +} + +/** + * Searches for hash entry index in the table + * @param item the key of the hash entry + * @return possible index of the hash entry + */ +static int hash_table_search(HASH_TABLE* table, void* key) { + int index = hash_table_candidate_index(table, key); + + return index; +} + +/** + * Resizes the hash table given new larger capacity + * @param capacity new capacity + * @param table hash table + */ +static void hash_table_resize(HASH_TABLE* table, const int capacity) { + HASH_TABLE_ENTRY* old_table = table->table; + int old_capacity = table->capacity; + hash_table_initialize(table, capacity, table->hashf, table->equalf); + + for (int i = 0; i < old_capacity; i++) { + HASH_TABLE_ENTRY item = old_table[i]; + if (item.key != NULL) { + hash_table_add_or_update(table, item.key, item.value); + } + } + + free(old_table); +} + +/** + * Get hash entry value if there is one otherwise returns NULL + * @param key key to lookup item + * @param table hash table + * @return the value of the hash entry given the key or NULL + */ +void* hash_table_get(HASH_TABLE* table, void* key) { + int candidate_index = hash_table_search(table, key); + + if (table->equalf(table->table[candidate_index].key, key)) { + return table->table[candidate_index].value; + } + + return NULL; +} + +/** + * Adds an entry to the hashtable if not exists or updates the value if entry + * exists + * @param key hash entry key + * @param value hash entry value + * @param table hash table + */ +void hash_table_add_or_update(HASH_TABLE* table, void* key, void* value) { + if (key == NULL) { + fprintf(stdout, "NULL key is not allowed in hashtable.\n"); + exit(1); + return; + } + + if (table->size + 1 > table->capacity * MAX_DENSITY) { + const int new_capacity = next_twin_prime(DOUBLE_SIZE(table->capacity)); + hash_table_resize(table, new_capacity); + } + + int candidate_index = hash_table_candidate_index(table, key); + + if (table->equalf(table->table[candidate_index].key, key)) { + table->table[candidate_index].value = value; + return; + } + + hash_table_insert_at(table, key, value, candidate_index); +} + +/** + * Initialize a new hashtable + * @param initial_capacity hashtable initial capacity + * @param hashf hash function to hash the key + * @param equalf equality function + */ +void hash_table_initialize(HASH_TABLE* table, + unsigned int initial_capacity, + Hash_Table_Hash hashf, + Hash_Table_Equal equalf) { + table->capacity = next_twin_prime(initial_capacity); + table->hashf = hashf; + table->equalf = equalf; + table->size = 0; + table->table = + (HASH_TABLE_ENTRY*)calloc(table->capacity, sizeof(HASH_TABLE_ENTRY)); +} + +/** + * Removes hash entry if there is one otherwise returns NULL + * @param key key to lookup item + * @param table hash table + * @return boolean indicating if updating the entry's value was successful or + * not + */ +bool hash_table_remove(HASH_TABLE* table, void* key) { + int candidate_index = hash_table_search(table, key); + + if (table->equalf(table->table[candidate_index].key, key)) { + table->table[candidate_index].key = NULL; + table->size--; + return true; + } + + return false; +} + +/** + * Clears the hashtable and removes all the elements + * @param key key to lookup item + * @param table hash table + */ +void hash_table_clear(HASH_TABLE* table) { + table->capacity = 0; + table->size = 0; + free(table->table); +} + +/** + * Test whether hash entry with value exists or not in the table + * @param key key to lookup item + * @param table hash table + * @return boolean indicating whether entry with the value exists or not + */ +bool hash_table_contains(HASH_TABLE* table, void* key) { + int candidate_index = hash_table_search(table, key); + + if (table->equalf(table->table[candidate_index].key, key)) { + return true; + } + + return false; +} + +/** + * Generic function that maps void* ptr to hash value + * @param v void* ptr + * @return address of void* ptr used as hash value + */ +long ptr_hashf(void* v) { + return (long)v; +} + +/** + * Generic function that creates void* equality + * @param v1 void* ptr1 + * @param v1 void* ptr2 + * @return boolean indicating whether two ptrs are equal + */ +bool ptr_equalf(void* v1, void* v2) { + return v1 == v2; +} diff --git a/utilities/hashtable.h b/utilities/hashtable.h new file mode 100644 index 00000000..8d701cd5 --- /dev/null +++ b/utilities/hashtable.h @@ -0,0 +1,95 @@ +#ifndef HASHTABLE_H +#define HASHTABLE_H + +#include +#include +#include + +#define INT2VOIDP(i) (void*)(uintptr_t)(i) +#define VOIDP2INT(i) (int)(uintptr_t)(i) + +typedef long (*Hash_Table_Hash)(void*); +typedef bool (*Hash_Table_Equal)(void*, void*); + +typedef struct hash_table_entry { + void* key; + void* value; +} HASH_TABLE_ENTRY; + +typedef struct hash_table { + Hash_Table_Hash hashf; + Hash_Table_Equal equalf; + int size; + int capacity; + HASH_TABLE_ENTRY* table; +} HASH_TABLE; + +/** + * Initialize a new hashtable + * @param initial_capacity hashtable initial capacity + * @param hashf hash function to hash the key + * @param equalf equality function + * @return new hashcons set that includes the item + */ +void hash_table_initialize(HASH_TABLE* table, + unsigned int initial_capacity, + Hash_Table_Hash hashf, + Hash_Table_Equal equalf); + +/** + * Adds an entry to the hashtable if not exists or updates the value if entry + * exists + * @param key hash entry key + * @param value hash entry value + * @param table hash table + */ +void hash_table_add_or_update(HASH_TABLE* table, void* key, void* value); + +/** + * Get hash entry value if there is one otherwise returns NULL + * @param key key to lookup item + * @param table hash table + * @return the value of the hash entry given the key or NULL + */ +void* hash_table_get(HASH_TABLE* table, void* key); + +/** + * Removes hash entry if there is one otherwise returns NULL + * @param key key to lookup item + * @param table hash table + * @return boolean indicating if updating the entry's value was successful or + * not + */ +bool hash_table_remove(HASH_TABLE* table, void* key); + +/** + * Clears the hashtable and removes all the elements + * @param key key to lookup item + * @param table hash table + */ +void hash_table_clear(HASH_TABLE* table); + +/** + * Test whether hash entry with value exists or not in the table + * @param key key to lookup item + * @param table hash table + * @return boolean indicating whether entry with the value exists or not + */ +bool hash_table_contains(HASH_TABLE* table, void* key); + +/** + * Generic function that maps void* ptr to hash value + * @param v void* ptr + * @return address of void* ptr used as hash value + */ +long ptr_hashf(void* v); + +/** + * Generic function that creates void* equality + * @param v1 void* ptr1 + * @param v1 void* ptr2 + * @return boolean indicating whether two ptrs are equal + */ +bool ptr_equalf(void* v1, void* v2); + +#endif diff --git a/utilities/prime.c b/utilities/prime.c new file mode 100644 index 00000000..48d12d31 --- /dev/null +++ b/utilities/prime.c @@ -0,0 +1,96 @@ +#include "prime.h" +#include +#include "stdlib.h" +#include "string.h" + +#define INITIAL_TABLE_SIZE 4973 +#define DOUBLE_SIZE(x) (((x) << 1) + 1) + +typedef struct +{ + bool *array; + unsigned int size; +} PRIMES; + +/** + * Holds the dynamically allocated array and its size + */ +static PRIMES primes = {NULL, 0}; + +/** + * Create a boolean array "prime[0..n]" and initialize + * all entries as true. A value in prime[i] will + * finally be false if i is not a prime, else true. + * @param n size of the lookup array + */ +static void sieve_of_eratosthenes(int n) +{ + primes.size = n; + + size_t bytes = n * sizeof(bool); + if (primes.array == NULL) + { + primes.array = malloc(bytes); + } + else + { + primes.array = realloc(primes.array, bytes); + } + + memset(primes.array, true, bytes); + + primes.array[0] = false; // 0 is not a prime + primes.array[1] = false; // 1 is not a prime + + int i, j; + for (i = 2; i * i < n; i++) + { + // If primes[p] is not changed, then it is a prime + if (primes.array[i] == true) + { + // Update all multiples of p + for (j = i * i; j < n; j += i) + { + primes.array[j] = false; + } + } + } +} + +/** + * Return the next prime number n great that or equal to the argument + * such that n -2 is also prime + * @param x lower bound prime number + * @return larger of the next twin prime + */ +int next_twin_prime(int p) +{ + // If array size is not enough then resize the array + if (p >= primes.size) + { + int new_size = DOUBLE_SIZE(primes.size + INITIAL_TABLE_SIZE); + + // Resized array is also not enough + if (new_size <= p) + { + new_size = DOUBLE_SIZE(p); + } + + sieve_of_eratosthenes(new_size); + } + + while (true) + { + int i; + for (i = p; i < primes.size; i++) + { + if (primes.array[i] && primes.array[i - 2]) + { + return i; + } + } + + // Resize the prime array and try again + sieve_of_eratosthenes(DOUBLE_SIZE(primes.size)); + } +} diff --git a/utilities/prime.h b/utilities/prime.h new file mode 100644 index 00000000..c6959e78 --- /dev/null +++ b/utilities/prime.h @@ -0,0 +1,12 @@ +#ifndef PRIME_H +#define PRIME_H + +/** + * Return the next prime number n great that or equal to the argument + * such that n -2 is also prime + * @param x lower bound prime number + * @return larger of the next twin prime + */ +int next_twin_prime(int x); + +#endif \ No newline at end of file diff --git a/utilities/scc.c b/utilities/scc.c new file mode 100644 index 00000000..3a339f5c --- /dev/null +++ b/utilities/scc.c @@ -0,0 +1,386 @@ +/** + * Kosaraju's algorithm implementation which is a linear time algorithm + * to find the strongly connected components of a directed graph. + * https://en.wikipedia.org/wiki/kosaraju's_algorithm + */ + +#include "scc.h" +#include +#include +#include +#include +#include +#include +#include "hashtable.h" +#include "stack.h" + +/** + * Give a graph and vertex it returns the internal index + * @param graph SCC graph + * @param v vertex to lookup + * @return int corresponding internal index of vertex + */ +static int get_vertex_index_from_ptr(SccGraph* graph, void* v) { + if (!hash_table_contains(graph->vertices_ptr_to_index_map, v)) { + fprintf(stderr, "Failed to find vertex ptr %d in the vertex map\n", + VOIDP2INT(v)); + exit(1); + return -1; + } + + int index = VOIDP2INT(hash_table_get(graph->vertices_ptr_to_index_map, v)); + + if (index < 0 || index >= graph->num_vertices) { + fprintf(stderr, "Unexpected index %d was retrieved from the vertex map\n", + index); + exit(1); + return -1; + } + + return index; +} + +/** + * @brief Internal utility function that checks whether edge exists + * @param graph pointer to graph + * @param source index of source + * @param sink index of sink + * @return boolean indicating the edge between source and sink + */ +static bool contains_edge(SccGraph* graph, int source, int sink) { + return graph->adjacency_matrix[source * graph->num_vertices + sink]; +} + +/** + * Populates the neighbors for each vertex + * @param graph SCC graph + * @return vertices array + */ +static Vertex** collect_neighbors(SccGraph* graph) { + int n = graph->num_vertices; + Vertex** vertices = (Vertex**)calloc(n, sizeof(Vertex*)); + + int i, j; + for (i = 0; i < n; i++) { + for (j = 0; j < n; j++) { + if (contains_edge(graph, i, j)) { + Vertex* new_vertex = (Vertex*)malloc(sizeof(Vertex)); + new_vertex->value = j; + new_vertex->next = vertices[i]; + vertices[i] = new_vertex; + } + } + } + + return vertices; +} + +/** + * Give a graph and internal index it returns the vertex + * @param graph SCC graph + * @param index internal index + * @return int corresponding vertex + */ +static void* get_vertex_ptr_from_int(SccGraph* graph, int index) { + if (index < 0 || index >= graph->num_vertices) { + fprintf(stderr, + "Unexpected index %d was retrieved requested the vertex map\n", + index); + exit(1); + return NULL; + } + + void* ptr = graph->vertices_index_to_ptr_map[index]; + + return ptr; +} + +/** + * @brief Create graph given number of vertices implemented using adjacency + * @return pointer to allocated graph + */ +void scc_graph_initialize(SccGraph* graph, int num_vertices) { + graph->num_vertices = num_vertices; + graph->adjacency_matrix = + (bool*)calloc(num_vertices * num_vertices, sizeof(bool)); + + // Create a map to lookup from ptr to index + graph->vertices_ptr_to_index_map = (HASH_TABLE*)malloc(sizeof(HASH_TABLE)); + hash_table_initialize(graph->vertices_ptr_to_index_map, num_vertices, + ptr_hashf, ptr_equalf); + + // Create a map to lookup from index to ptr + graph->vertices_index_to_ptr_map = + (void**)malloc(sizeof(void*) * num_vertices); + + // Index of vertex starts from 0 + graph->next_vertex_index = 0; +} + +/** + * @brief Add edge method of graph to be used internally + * @param graph pointer to graph + * @param source index of source + * @param sink index of sink + */ +static void scc_graph_add_edge_internal(SccGraph* graph, int source, int sink) { + graph->adjacency_matrix[source * graph->num_vertices + sink] = true; +} + +/** + * @brief Add edge method of graph + * @param graph pointer to graph + * @param source source ptr + * @param sink sink ptr + */ +void scc_graph_add_edge(SccGraph* graph, void* source, void* sink) { + int source_index = get_vertex_index_from_ptr(graph, source); + int sink_index = get_vertex_index_from_ptr(graph, sink); + + scc_graph_add_edge_internal(graph, source_index, sink_index); +} + +/** + * @brief Deallocate graph vertex + * @param vertex vertex linked list node + */ +static void graph_destroy_vertex(Vertex* vertex) { + if (vertex == NULL) + return; + + if (vertex->next != NULL) + graph_destroy_vertex(vertex->next); + + free(vertex); +} + +/** + * @brief Deallocate graph + * @param graph pointer to graph + */ +void scc_graph_destroy(SccGraph* graph) { + int i; + for (i = 0; i < graph->num_vertices; i++) { + graph_destroy_vertex(graph->neighbors[i]); + } + + hash_table_clear(graph->vertices_ptr_to_index_map); + + free(graph->vertices_ptr_to_index_map); + free(graph->vertices_index_to_ptr_map); +} + +/** + * @brief DFS traversal of graph + * @param graph pointer to graph + * @param stack pointer to stack + * @param visited visited boolean array + * @param v vertex + */ +static void dfs(SccGraph* graph, LinkedStack** stack, bool* visited, int v) { + visited[v] = true; + Vertex* neighbors = graph->neighbors[v]; + while (neighbors != NULL) { + if (!visited[neighbors->value]) { + dfs(graph, stack, visited, neighbors->value); + } + neighbors = neighbors->next; + } + stack_push(stack, INT2VOIDP(v)); +} + +/** + * @brief Builds reverse of graph + * @param graph pointer to graph + * @return reversed graph + */ +static SccGraph* transpose_graph(SccGraph* graph, SccGraph* reversed_graph) { + scc_graph_initialize(reversed_graph, graph->num_vertices); + + int i; + for (i = 0; i < graph->num_vertices; i++) { + Vertex* neighbors = graph->neighbors[i]; + while (neighbors != NULL) { + scc_graph_add_edge_internal(reversed_graph, neighbors->value, i); + neighbors = neighbors->next; + } + } + + reversed_graph->neighbors = collect_neighbors(reversed_graph); + return reversed_graph; +} + +/** + * @brief Add vertex to the graph + * @param graph pointer to graph + * @param v pointer of vertex + */ +void scc_graph_add_vertex(SccGraph* graph, void* v) { + if (graph->next_vertex_index >= graph->num_vertices) { + fprintf(stderr, "Expected %d vertices to be added\n", graph->num_vertices); + exit(1); + return; + } + + if (hash_table_contains(graph->vertices_ptr_to_index_map, v)) { + fprintf(stderr, "Graph already contains the vertex\n"); + exit(1); + return; + } + + // Associate ptr -> index + hash_table_add_or_update(graph->vertices_ptr_to_index_map, v, + INT2VOIDP(graph->next_vertex_index)); + + // Associate index -> ptr + graph->vertices_index_to_ptr_map[graph->next_vertex_index] = v; + + graph->next_vertex_index++; +} + +/** + * @brief Use dfs to list a set of vertices dfs_and_print from a vertex v in + * reversed graph + * @param graph pointer to graph + * @param visited boolean array indicating whether index has been visited or not + * @param deleted boolean array indicating whether index has been popped or not + * @param v vertex + * @param result_array result int array + * @param result_count result counter + */ +static void dfs_collect_scc(SccGraph* graph, + bool* visited, + bool* deleted, + int v, + int* result_array, + int* result_count) { + result_array[(*result_count)++] = v; + visited[v] = true; + deleted[v] = true; + Vertex* arcs = graph->neighbors[v]; // the adjacent list of vertex v + while (arcs != NULL) { + int u = arcs->value; + if (!visited[u] && !deleted[u]) { + dfs_collect_scc(graph, visited, deleted, u, result_array, result_count); + } + arcs = arcs->next; + } +} + +/** + * @brief Kosaraju logic + * @param graph pointer to graph + */ +SCC_COMPONENTS* scc_graph_components(SccGraph* graph) { + if (graph == NULL || graph->num_vertices <= 0) { + fprintf(stderr, + "Graph parameter passed to Kosaraju method is not valid.\n"); + exit(1); + return NULL; + } + + int i, j, k; + int n = graph->num_vertices; + + // Run transitive closure of the graph + bool changed; + int count_transitive_edges_added = 0; + do { + changed = false; + for (i = 0; i < n; i++) { + for (j = 0; j < n; j++) { + for (k = 0; k < n; k++) { + if (contains_edge(graph, i, j) && contains_edge(graph, j, k) && + !contains_edge(graph, i, k)) { + scc_graph_add_edge_internal(graph, i, k); + changed = true; + count_transitive_edges_added++; + } + } + } + } + } while (changed); + + if (count_transitive_edges_added > 0) { + printf( + "Graph provided to SCC utility has not gone through " + "transitive closure (%d new transitive edges have been added)\n", + count_transitive_edges_added); + } + + graph->neighbors = collect_neighbors(graph); + + LinkedStack* stack; + stack_create(&stack); + + size_t visited_size = n * sizeof(bool); + bool* visited = (bool*)alloca(visited_size); + memset(visited, false, visited_size); + for (i = 0; i < n; i++) { + if (!visited[i]) { + dfs(graph, &stack, visited, i); + } + } + + SccGraph reversed_graph; + transpose_graph(graph, &reversed_graph); + + bool* deleted = (bool*)alloca(n * sizeof(bool)); + memset(deleted, false, n * sizeof(bool)); + + // Integer array to hold on to the size of each component indexed by component + // index + int* components_count = (int*)alloca(n * sizeof(int)); + memset(components_count, 0, n * sizeof(int)); + + // Integer pointer array to hold on to items in each component + int* components_array = (int*)malloc(n * n * sizeof(int*)); + // Number of all component + int num_components = 0; + + while (!stack_is_empty(&stack)) { + void* temp; + bool any = stack_pop(&stack, &temp); + int v = VOIDP2INT(temp); + if (any && !deleted[v]) { + memset(visited, false, + n * sizeof(bool)); // mark all vertices of reverse as not visited + + dfs_collect_scc(&reversed_graph, visited, deleted, v, + &components_array[num_components * n], + &components_count[num_components]); + + num_components++; + } + } + + // Collect components as vector of integers + SCC_COMPONENTS* result = (SCC_COMPONENTS*)malloc(sizeof(SCC_COMPONENTS)); + result->length = num_components; + result->array = + (SCC_COMPONENT**)malloc(num_components * sizeof(SCC_COMPONENT*)); + + for (i = 0; i < num_components; i++) { + SCC_COMPONENT* comp = (SCC_COMPONENT*)malloc(sizeof(SCC_COMPONENT)); + comp->length = components_count[i]; + comp->array = (void**)malloc(components_count[i] * sizeof(void*)); + + for (j = 0; j < components_count[i]; j++) { + comp->array[j] = + get_vertex_ptr_from_int(graph, components_array[i * n + j]); + } + + result->array[i] = comp; + } + + // De-allocate the temp array + free(components_array); + + // Free memory allocated via malloc + scc_graph_destroy(&reversed_graph); + + // De-allocate the stack + stack_destroy(&stack); + + return result; +} diff --git a/utilities/scc.h b/utilities/scc.h new file mode 100644 index 00000000..99923473 --- /dev/null +++ b/utilities/scc.h @@ -0,0 +1,72 @@ +#ifndef SCC_H +#define SCC_H + +#include +#include +#include "hashtable.h" + +typedef struct scc_component { + void** array; + int length; +} SCC_COMPONENT; + +typedef struct scc_components { + SCC_COMPONENT** array; + int length; +} SCC_COMPONENTS; + +// Vertex linked list node +struct vertex { + int value; + struct vertex* next; +}; + +typedef struct vertex Vertex; + +struct scc_graph { + int num_vertices; // Number of vertices in the graph + bool* adjacency_matrix; // adjacency list of the edges + + HASH_TABLE* vertices_ptr_to_index_map; // Map of void* to int index + void** vertices_index_to_ptr_map; // Map of int index to void* + + int next_vertex_index; // Index of the next vertex + Vertex** neighbors; // O(1) way of getting neighbors +}; + +typedef struct scc_graph SccGraph; + +/** + * @brief Create graph given number of vertices implemented using adjacency + * @return ptr to allocated graph + */ +void scc_graph_initialize(SccGraph* graph, int num_vertices); + +/** + * @brief Deallocate graph + * @param graph ptr to graph + */ +void scc_graph_destroy(SccGraph* graph); + +/** + * @brief Add vertex to the graph + * @param graph ptr to SCC graph + * @param v vertex to be added to the graph + */ +void scc_graph_add_vertex(SccGraph* graph, void* v); + +/** + * @brief Add edge method of graph + * @param graph ptr to SCC graph + * @param source source of the edge + * @param sink sink of the edge + */ +void scc_graph_add_edge(SccGraph* graph, void* source, void* sink); + +/** + * @brief Finds strongly connected components of a given graph + * @param graph ptr to SCC graph + */ +SCC_COMPONENTS* scc_graph_components(SccGraph* graph); + +#endif diff --git a/utilities/stack.c b/utilities/stack.c new file mode 100644 index 00000000..2afaf8f5 --- /dev/null +++ b/utilities/stack.c @@ -0,0 +1,62 @@ +#include "stack.h" +#include +#include +#include + +/** + * @brief Create stack using endogenous linked list + * @param stack pointer to a stack pointer + */ +void stack_create(LinkedStack** stack) { + *stack = NULL; +} + +/** + * @brief Push method of stack + * @param stack pointer to a stack pointer + * @param value value to push to stack + */ +void stack_push(LinkedStack** stack, void* value) { + LinkedStack* item = (LinkedStack*)malloc(sizeof(LinkedStack)); + item->value = value; + item->next = *stack; + *stack = item; +} + +/** + * @brief Pop method of stack + * @param stack pointer to a stack pointer + * @param value that has just been popped from the stack + * @return boolean indicating whether popping from the stack was successful or + * not + */ +bool stack_pop(LinkedStack** stack, void** v) { + LinkedStack* old = *stack; + if (old == NULL) + return false; + + *v = old->value; + *stack = old->next; + free(old); + return true; +} + +/** + * @brief Checks whether stack is empty or not + * @param stack pointer to a stack pointer + * @return boolean indicating whether stack is empty or not + */ +bool stack_is_empty(LinkedStack** stack) { + return *stack == NULL; +} + +/** + * @brief Frees the memory allocated for the stack and deallocates each + * individual element of stack + * @param stack pointer to a stack pointer + */ +void stack_destroy(LinkedStack** stack) { + void* v; + while (stack_pop(stack, &v)) + ; +} diff --git a/utilities/stack.h b/utilities/stack.h new file mode 100644 index 00000000..e13221e9 --- /dev/null +++ b/utilities/stack.h @@ -0,0 +1,51 @@ + +#ifndef LINKED_STACK_H +#define LINKED_STACK_H + +#include +#include + +struct linked_stack { + void* value; + struct linked_stack* next; +}; + +typedef struct linked_stack LinkedStack; + +/** + * @brief Create stack using endogenous linked list + * @param stack pointer to a stack pointer + */ +void stack_create(LinkedStack** stack); + +/** + * @brief Push method of stack + * @param stack pointer to a stack pointer + * @param value value to push to stack + */ +void stack_push(LinkedStack** stack, void* value); + +/** + * @brief Pop method of stack + * @param stack pointer to a stack pointer + * @param value that has just been popped from the stack + * @return boolean indicating whether popping from the stack was successful or + * not + */ +bool stack_pop(LinkedStack** stack, void** v); + +/** + * @brief Checks whether stack is empty or not + * @param stack pointer to a stack pointer + * @return boolean indicating whether stack is empty or not + */ +bool stack_is_empty(LinkedStack** stack); + +/** + * @brief Frees the memory allocated for the stack and deallocates each + * individual element of stack + * @param stack pointer to a stack pointer + */ +void stack_destroy(LinkedStack** stack); + +#endif diff --git a/utilities/tests/Makefile b/utilities/tests/Makefile new file mode 100644 index 00000000..75a2d6d0 --- /dev/null +++ b/utilities/tests/Makefile @@ -0,0 +1,23 @@ +CC=gcc +LINTER_FLAGS=-Wno-int-to-pointer-cast -Wno-pointer-to-int-cast +EXTERNAL_DEPENDENCIES=utilities.o +CFLAGS=-g ${LINTER_FLAGS} -I. -I.. +LINKFLAGS=-lm +TESTS=prime_test.o hashcons_test.o hashtable_test.o stack_test.o scc_test.o + +all: build test clean +build: ${TESTS} $(addprefix ../, ${EXTERNAL_DEPENDENCIES}) common.o driver.o + $(CC) $(CFLAGS) $^ -o test.out ${LINKFLAGS} + +%_test.o: %_test.h %_test.c + $(CC) -c $(CFLAGS) $*_test.c ${LINKFLAGS} + +%.o: ${TESTS} %.c + $(CC) -c $(CFLAGS) $*.c + +test: build + @./test.out + +realclean: clean +clean: + @rm -rf *.o *.out diff --git a/utilities/tests/common.c b/utilities/tests/common.c new file mode 100644 index 00000000..2df64996 --- /dev/null +++ b/utilities/tests/common.c @@ -0,0 +1,27 @@ +#include "common.h" +#include +#include +#include + +void run_tests(char* testname, TEST tests[], int n) { + printf("Found %d tests in %s:\n", n, testname); + + int i; + for (i = 0; i < n; i++) { + printf("\t>>> Starting to run `%s`\n", tests[i].name); + + time_t start, end; + double diff; + + time(&start); + + tests[i].signature(); + + time(&end); + diff = difftime(end, start); + printf("\t<<< Successfully finished running `%s` (%.2lf seconds)\n", tests[i].name, + diff); + } + + printf("\n"); +} diff --git a/utilities/tests/common.h b/utilities/tests/common.h new file mode 100644 index 00000000..be6ef6d1 --- /dev/null +++ b/utilities/tests/common.h @@ -0,0 +1,20 @@ +#ifndef COMMON_H +#define COMMON_H + +#include "stdbool.h" +#include "assert.h" + +#define TOTAL_COUNT ((int)1<<10) + +#define _ASSERT_EXPR(message, test) assert(((message), test)) + +typedef void (*Test_Signature)(); + +typedef struct test { + Test_Signature signature; + char* name; +} TEST; + +void run_tests(char* testname, TEST tests[], int n); + +#endif diff --git a/utilities/tests/driver.c b/utilities/tests/driver.c new file mode 100644 index 00000000..be0ba696 --- /dev/null +++ b/utilities/tests/driver.c @@ -0,0 +1,19 @@ +#include +#include "common.h" +#include "hashcons_test.h" +#include "hashtable_test.h" +#include "prime_test.h" +#include "scc_test.h" +#include "stack_test.h" + +int main() { + printf("Test regression size: %d\n", TOTAL_COUNT); + + test_prime(); + test_hash_cons(); + test_hash_table(); + test_stack(); + test_scc(); + + printf("Finished running all tests successfully\n"); +} \ No newline at end of file diff --git a/utilities/tests/hashcons_test.c b/utilities/tests/hashcons_test.c new file mode 100644 index 00000000..a328288d --- /dev/null +++ b/utilities/tests/hashcons_test.c @@ -0,0 +1,180 @@ +#include "../hashcons.h" +#include "../hashtable.h" + +#include +#include +#include +#include +#include "common.h" +#include "assert.h" + +#define PRIME_MODULO 977 + +static struct hash_cons_table integer_table = {&ptr_hashf, &ptr_equalf, 0, 0}; + +static int* new_hash_cons_integer(int n) { + return hash_cons_get(&n, sizeof(int), &integer_table); +} + +/** + * Validating consistency of hashcons by storing and retrieving integers + */ +static void test_integer_table_consistency() { + char buffer[256]; + int i, j; + for (i = 0, j = 0; i < TOTAL_COUNT; ++i, j = (j + 1) % PRIME_MODULO) { + sprintf(buffer, "integer for %d", i); + _ASSERT_EXPR(buffer, *new_hash_cons_integer(j) == j); + } +} + +/** + * Validating empty hashcons set is unique and has a size of zero + */ +static void test_integer_set_empty_consistency() { + HASH_CONS_SET set = get_hash_cons_empty_set(); + + _ASSERT_EXPR("empty set should have size of 0", set->num_elements == 0); + _ASSERT_EXPR("empty set should be unique", set == get_hash_cons_empty_set()); +} + +/** + * Validating add_hash_cons_set, making sure size is getting incremented and + * values are unique + */ +static void test_integer_set_add_consistency() { + char buffer[256]; + HASH_CONS_SET set = get_hash_cons_empty_set(); + + long i, j; + for (i = 0, j = 0; i < TOTAL_COUNT; ++i, j = (j + 1) % PRIME_MODULO) { + set = add_hash_cons_set((long*)j, set); + + sprintf(buffer, "size should never go pass %d but it is %d", PRIME_MODULO, + set->num_elements); + _ASSERT_EXPR(buffer, set->num_elements <= PRIME_MODULO); + + sprintf(buffer, "size contain the element %ld at correct index", j); + _ASSERT_EXPR(buffer, (long)set->elements[j] == j); + } +} + +/** + * Validating union_hash_const_set, making sure size is correct and values are + * sorted without any duplicates + */ +static void test_integer_set_union_consistency() { + int size = 1000; + char buffer[256]; + HASH_CONS_SET set1 = get_hash_cons_empty_set(); + HASH_CONS_SET set2 = get_hash_cons_empty_set(); + + int i; + for (i = 0; i < (size >> 1); i++) { + set1 = add_hash_cons_set(INT2VOIDP(i), set1); + } + + for (i = 0; i < size; i++) { + set2 = add_hash_cons_set(INT2VOIDP(i), set2); + } + + HASH_CONS_SET result_set = union_hash_const_set(set1, set2); + + sprintf(buffer, "size contain the element %d at correct index", i); + _ASSERT_EXPR(buffer, result_set->num_elements == size); + + for (i = 0; i < PRIME_MODULO; i++) { + sprintf(buffer, "size contain the element %d at correct index", i); + _ASSERT_EXPR(buffer, VOIDP2INT(result_set->elements[i]) == i); + } +} + +/** + * Validating new_hash_cons_set, making sure size is correct and values are + * sorted + */ +static void test_integer_set_new_consistency() { + int size = 1000; + char buffer[256]; + HASH_CONS_SET set1 = get_hash_cons_empty_set(); + HASH_CONS_SET set2 = get_hash_cons_empty_set(); + + size_t struct_size = sizeof(struct hash_cons_set) + 2 * size * sizeof(void*); + HASH_CONS_SET set = (HASH_CONS_SET)alloca(struct_size); + set->num_elements = size; + + int i; + for (i = 0; i < size; i++) { + set->elements[i] = INT2VOIDP(size - i - 1); + } + + HASH_CONS_SET sorted_set = new_hash_cons_set(set); + _ASSERT_EXPR("size should be correct", sorted_set->num_elements == size); + + for (i = 0; i < size; i++) { + sprintf(buffer, "elements should be in sorted order %d != %d", + VOIDP2INT(sorted_set->elements[i]), i); + _ASSERT_EXPR(buffer, VOIDP2INT(sorted_set->elements[i]) == i); + } +} + +typedef struct dummy { + int key; + char text[]; +} * DUMMY; + +static long hashcons_dummy_hash(void* untyped) { + DUMMY item = (DUMMY)untyped; + return hash_mix(item->key, hash_string(item->text)); +} + +static bool hashcons_dummy_equals(void* untyped_a, void* untyped_b) { + if (untyped_a == NULL || untyped_b == NULL) { + return false; + } + + DUMMY item_a = (DUMMY)untyped_a; + DUMMY item_b = (DUMMY)untyped_b; + + return item_a->key == item_b->key && !strcmp(item_a->text, item_b->text); +} + +static struct hash_cons_table dummy_table = {&hashcons_dummy_hash, + &hashcons_dummy_equals, 0, 0}; + +static DUMMY new_hash_cons_dummy(int key, char text[]) { + size_t struct_size = sizeof(key) + (strlen(text) + 1) * sizeof(char); + DUMMY dummy = alloca(struct_size); + dummy->key = key; + strcpy(dummy->text, text); + + return hash_cons_get(dummy, struct_size, &dummy_table); +} + +/** + * Validating hashcons works with a struct + */ +static void test_dummy_table_consistency() { + char buffer[256]; + int i, j; + for (i = 0, j = 0; i < TOTAL_COUNT; ++i, j = (j + 1) % PRIME_MODULO) { + sprintf(buffer, "dummy for %d", i); + _ASSERT_EXPR(buffer, new_hash_cons_dummy(j, buffer) == + new_hash_cons_dummy(j, buffer)); + } +} + +/** + * Run tests in a sequence + */ +void test_hash_cons() { + TEST tests[] = { + {test_integer_set_new_consistency, "hashcons new"}, + {test_integer_table_consistency, "int hashcons table consistency"}, + {test_dummy_table_consistency, "dummy struct hashcons table consistency"}, + {test_integer_set_empty_consistency, "empty hashcons table consistency"}, + {test_integer_set_add_consistency, "hashcons add"}, + {test_integer_set_union_consistency, "hashcons union"}}; + + run_tests("hashcons", tests, 6); +} \ No newline at end of file diff --git a/utilities/tests/hashcons_test.h b/utilities/tests/hashcons_test.h new file mode 100644 index 00000000..f4cc5bf9 --- /dev/null +++ b/utilities/tests/hashcons_test.h @@ -0,0 +1,9 @@ +#ifndef HASHCONS_TEST_H +#define HASHCONS_TEST_H + +/** + * Hashcons unit tests + */ +void test_hash_cons(); + +#endif \ No newline at end of file diff --git a/utilities/tests/hashtable_test.c b/utilities/tests/hashtable_test.c new file mode 100644 index 00000000..fb19e498 --- /dev/null +++ b/utilities/tests/hashtable_test.c @@ -0,0 +1,109 @@ +#include "../hashtable.h" + +#include "common.h" +#include "assert.h" + +/** + * Validating consistency of hashtable by storing and retrieving integers + */ +void test_hash_table_consistency() { + HASH_TABLE table; + hash_table_initialize(&table, 10, ptr_hashf, ptr_equalf); + + // Ensure size is initially zero + _ASSERT_EXPR("size of table should initially be zero", table.size == 0); + + // Add items to the hashtable and ensure they exist + int i; + for (i = 1; i <= TOTAL_COUNT; i++) { + // Should be first time seeing this key + _ASSERT_EXPR("hashtable should not initially contain the number", + !hash_table_contains(&table, INT2VOIDP(i))); + + // Add entry (i,i+1) to the hash table + hash_table_add_or_update(&table, INT2VOIDP(i), INT2VOIDP(i + 1)); + + // Ensure hash table now holds the (i,i+1) + _ASSERT_EXPR("hashtable should contain the number after adding", + hash_table_contains(&table, INT2VOIDP(i))); + _ASSERT_EXPR("hashtable value should be as expected", + VOIDP2INT(hash_table_get(&table, INT2VOIDP(i))) == i + 1); + } + + // Size should be TOTAL_COUNT + _ASSERT_EXPR("should table size should be > 0 and expected number", + table.size == TOTAL_COUNT); + + // Update hash table value and make sure its updates + for (i = 1; i <= TOTAL_COUNT; i++) { + // Should not be first time seeing this key + _ASSERT_EXPR("hashtable should contain the number already added", + hash_table_contains(&table, INT2VOIDP(i))); + + // Update entry (i,i+2) in the hash table + hash_table_add_or_update(&table, INT2VOIDP(i), INT2VOIDP(i + 2)); + + // Ensure hash table now holds the (i,i+2) + _ASSERT_EXPR("after update entry given key should still be in table", + hash_table_contains(&table, INT2VOIDP(i))); + _ASSERT_EXPR("updated value should be in table", + VOIDP2INT(hash_table_get(&table, INT2VOIDP(i))) == i + 2); + } + + // Size should still be TOTAL_COUNT + _ASSERT_EXPR("hashtable size should still be unchanged after update", + table.size == TOTAL_COUNT); + + // Remove hash table entry and make sure its updates + for (i = 1; i <= TOTAL_COUNT; i++) { + // Should not be first time seeing this key + _ASSERT_EXPR( + "contains should return true for element expected to be in the table", + hash_table_contains(&table, INT2VOIDP(i))); + + // Remove entry with key i from the hash table + _ASSERT_EXPR( + "remove should return true because table should have contained this " + "element", + hash_table_remove(&table, INT2VOIDP(i))); + + // Remove -i key from the hash table + _ASSERT_EXPR( + "remove should return false because table should not have contained " + "this " + "element", + !hash_table_remove(&table, INT2VOIDP(-i))); + + // Ensure hash table now holds the (i, i+1) + _ASSERT_EXPR("hashtable should not contain element that was just removed", + !hash_table_contains(&table, INT2VOIDP(i))); + _ASSERT_EXPR("hashtable size should be correct after removing an element", + table.size == TOTAL_COUNT - i); + } + + // Size should be 0 + _ASSERT_EXPR( + "hashtable size should be zero after removing elements one by one", + table.size == 0); + + // Ensure other items do not exist in the hash table + for (i = 1; i <= TOTAL_COUNT; i++) { + // Ensure hash table doesn't hold -i key + _ASSERT_EXPR("hashtable should not contain value that has not been added", + !hash_table_contains(&table, INT2VOIDP(-i))); + } + + hash_table_clear(&table); + + // Ensure size is cleared to zero + _ASSERT_EXPR("hash table should be empty after clear", table.size == 0); +} + +/** + * Run tests in a sequence + */ +void test_hash_table() { + TEST tests[] = {{test_hash_table_consistency, "hashtable consistency"}}; + + run_tests("hashtable", tests, 1); +} diff --git a/utilities/tests/hashtable_test.h b/utilities/tests/hashtable_test.h new file mode 100644 index 00000000..030ee620 --- /dev/null +++ b/utilities/tests/hashtable_test.h @@ -0,0 +1,9 @@ +#ifndef HASHTABLE_TEST_H +#define HASHTABLE_TEST_H + +/** + * Hashtable unit tests + */ +void test_hash_table(); + +#endif diff --git a/utilities/tests/prime_test.c b/utilities/tests/prime_test.c new file mode 100644 index 00000000..1f678181 --- /dev/null +++ b/utilities/tests/prime_test.c @@ -0,0 +1,25 @@ +#include "../prime.h" +#include "common.h" +#include "assert.h" + +static void test_few_twin_primes() { + _ASSERT_EXPR("next twin prime of 1 should be", next_twin_prime(1) == 5); + _ASSERT_EXPR("next twin prime of 2 should be", next_twin_prime(2) == 5); + _ASSERT_EXPR("next twin prime of 3 should be", next_twin_prime(3) == 5); + _ASSERT_EXPR("next twin prime of 4 should be", next_twin_prime(4) == 5); + _ASSERT_EXPR("next twin prime of 5 should be", next_twin_prime(5) == 5); + _ASSERT_EXPR("next twin prime of 6 should be", next_twin_prime(6) == 7); + _ASSERT_EXPR("next twin prime of 7 should be", next_twin_prime(7) == 7); + _ASSERT_EXPR("next twin prime of 8 should be", next_twin_prime(8) == 13); + _ASSERT_EXPR("next twin prime of 9 should be", next_twin_prime(9) == 13); + _ASSERT_EXPR("next twin prime of 10 should be", next_twin_prime(10) == 13); + _ASSERT_EXPR("next twin prime of 11 should be", next_twin_prime(11) == 13); + _ASSERT_EXPR("next twin prime of 12 should be", next_twin_prime(12) == 13); + _ASSERT_EXPR("next twin prime of 13 should be", next_twin_prime(13) == 13); +} + +void test_prime() { + TEST tests[] = {{test_few_twin_primes, "test few twin primes"}}; + + run_tests("prime", tests, 1); +} \ No newline at end of file diff --git a/utilities/tests/prime_test.h b/utilities/tests/prime_test.h new file mode 100644 index 00000000..7fc8a048 --- /dev/null +++ b/utilities/tests/prime_test.h @@ -0,0 +1,9 @@ +#ifndef PRIME_TEST_H +#define PRIME_TEST_H + +/** + * Prime unit tests + */ +void test_prime(); + +#endif diff --git a/utilities/tests/scc_test.c b/utilities/tests/scc_test.c new file mode 100644 index 00000000..926576a3 --- /dev/null +++ b/utilities/tests/scc_test.c @@ -0,0 +1,216 @@ +#include "../scc.h" + +#include +#include +#include "common.h" +#include "assert.h" + +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) + +static void test_all_disjoints() { + SccGraph graph; + scc_graph_initialize(&graph, TOTAL_COUNT); + + int i; + for (i = 1; i <= TOTAL_COUNT; i++) { + scc_graph_add_vertex(&graph, INT2VOIDP(i)); + } + + SCC_COMPONENTS* components = scc_graph_components(&graph); + + _ASSERT_EXPR("should have many disjoint components", + components->length == TOTAL_COUNT); + + for (i = 0; i < components->length; i++) { + SCC_COMPONENT* component = components->array[i]; + + _ASSERT_EXPR("component should have one element in it", component->length); + } + + scc_graph_destroy(&graph); +} + +static void test_all_disjoints2() { + SccGraph graph; + scc_graph_initialize(&graph, TOTAL_COUNT); + + int i; + for (i = 1; i <= TOTAL_COUNT; i++) { + scc_graph_add_vertex(&graph, INT2VOIDP(i)); + } + + for (i = 1; i <= TOTAL_COUNT - 1; i++) { + scc_graph_add_edge(&graph, INT2VOIDP(i), INT2VOIDP(i + 1)); + } + + SCC_COMPONENTS* components = scc_graph_components(&graph); + + _ASSERT_EXPR("should have many disjoint components", + components->length == TOTAL_COUNT); + + for (i = 0; i < components->length; i++) { + SCC_COMPONENT* component = components->array[i]; + + _ASSERT_EXPR("component should have one element in it", component->length); + } + + scc_graph_destroy(&graph); +} + +static void test_all_connected() { + SccGraph graph; + scc_graph_initialize(&graph, TOTAL_COUNT); + + int i; + for (i = 1; i <= TOTAL_COUNT; i++) { + scc_graph_add_vertex(&graph, INT2VOIDP(i)); + } + + for (i = 1; i <= TOTAL_COUNT - 1; i++) { + scc_graph_add_edge(&graph, INT2VOIDP(i), INT2VOIDP(i + 1)); + } + + scc_graph_add_edge(&graph, INT2VOIDP(TOTAL_COUNT), INT2VOIDP(1)); + + SCC_COMPONENTS* components = scc_graph_components(&graph); + + _ASSERT_EXPR("should have one large component", components->length == 1); + + SCC_COMPONENT* component = components->array[0]; + _ASSERT_EXPR("component should have all elements in it", + component->length == TOTAL_COUNT); + + scc_graph_destroy(&graph); +} + +static void test_two_connected_components() { + SccGraph graph; + scc_graph_initialize(&graph, TOTAL_COUNT); + int half_count = TOTAL_COUNT / 2; + + int i; + for (i = 1; i <= TOTAL_COUNT; i++) { + scc_graph_add_vertex(&graph, INT2VOIDP(i)); + } + + // Component 1 + for (i = 1; i <= half_count - 1; i++) { + scc_graph_add_edge(&graph, INT2VOIDP(i), INT2VOIDP(i + 1)); + } + + scc_graph_add_edge(&graph, INT2VOIDP(half_count), INT2VOIDP(1)); + + // Component 2 + for (i = half_count + 1; i <= TOTAL_COUNT - 1; i++) { + scc_graph_add_edge(&graph, INT2VOIDP(i), INT2VOIDP(i + 1)); + } + + scc_graph_add_edge(&graph, INT2VOIDP(TOTAL_COUNT), INT2VOIDP(half_count + 1)); + + SCC_COMPONENTS* components = scc_graph_components(&graph); + + _ASSERT_EXPR("should have one large component", components->length == 2); + + SCC_COMPONENT* component1 = components->array[0]; + _ASSERT_EXPR("component 1 should have half of elements in it", + component1->length == half_count); + + SCC_COMPONENT* component2 = components->array[2]; + _ASSERT_EXPR("component 2 should have other half of elements in it", + component1->length == (TOTAL_COUNT - half_count)); + + scc_graph_destroy(&graph); +} + +static void test_n_connected_components() { + SccGraph graph; + scc_graph_initialize(&graph, TOTAL_COUNT); + + int i, j; + for (i = 1; i <= TOTAL_COUNT; i++) { + scc_graph_add_vertex(&graph, INT2VOIDP(i)); + } + + int count_chunks = 7; + int chunk_size = TOTAL_COUNT / count_chunks; + + int start = 0; + int end; + j = 0; + do { + end = MIN(TOTAL_COUNT, start + chunk_size); + + // Component j + for (i = start; i <= end - 1; i++) { + scc_graph_add_edge(&graph, INT2VOIDP(i), INT2VOIDP(i + 1)); + } + + scc_graph_add_edge(&graph, INT2VOIDP(end), INT2VOIDP(start)); + + j++; + start = end + 1; + } while (end < TOTAL_COUNT); + + SCC_COMPONENTS* components = scc_graph_components(&graph); + + _ASSERT_EXPR("should have n large component", components->length == j); + + for (j = 0; j < count_chunks; j++) { + SCC_COMPONENT* component = components->array[j]; + _ASSERT_EXPR( + "component j should have at most CHUNK_SIZE number of elements in it", + component->length <= ceil((1.0 * TOTAL_COUNT / count_chunks))); + } + + scc_graph_destroy(&graph); +} + +static void test_small_logical() { + SccGraph graph; + scc_graph_initialize(&graph, 5); + + scc_graph_add_vertex(&graph, INT2VOIDP(1)); + scc_graph_add_vertex(&graph, INT2VOIDP(2)); + scc_graph_add_vertex(&graph, INT2VOIDP(3)); + scc_graph_add_vertex(&graph, INT2VOIDP(4)); + scc_graph_add_vertex(&graph, INT2VOIDP(5)); + + // Component 1 + scc_graph_add_edge(&graph, INT2VOIDP(1), INT2VOIDP(2)); + scc_graph_add_edge(&graph, INT2VOIDP(2), INT2VOIDP(1)); + + // Component 2 + scc_graph_add_edge(&graph, INT2VOIDP(3), INT2VOIDP(4)); + scc_graph_add_edge(&graph, INT2VOIDP(4), INT2VOIDP(3)); + + SCC_COMPONENTS* components = scc_graph_components(&graph); + + _ASSERT_EXPR("should have 3 component", components->length == 3); + + int i; + for (i = 0; i < components->length; i++) { + SCC_COMPONENT* component = components->array[i]; + + if (component->length > 1) { + _ASSERT_EXPR("Component should contain 2 vertices", + component->length == 2); + } else { + _ASSERT_EXPR("Component should have vertex 5", + VOIDP2INT(component->array[0]) == 5); + } + } + + scc_graph_destroy(&graph); +} + +void test_scc() { + TEST tests[] = { + {test_small_logical, "SCC with 2 connected components"}, + {test_all_disjoints, "SCC all disjoints"}, + {test_all_disjoints2, "SCC all disjoints with edges"}, + {test_all_connected, "SCC all connected"}, + {test_two_connected_components, "SCC two connected component"}, + {test_n_connected_components, "SCC n connected component"}}; + + run_tests("scc", tests, 6); +} \ No newline at end of file diff --git a/utilities/tests/scc_test.h b/utilities/tests/scc_test.h new file mode 100644 index 00000000..9a958bad --- /dev/null +++ b/utilities/tests/scc_test.h @@ -0,0 +1,9 @@ +#ifndef SCC_TEST_H +#define SCC_TEST_H + +/** + * SCC unit tests + */ +void test_scc(); + +#endif \ No newline at end of file diff --git a/utilities/tests/stack_test.c b/utilities/tests/stack_test.c new file mode 100644 index 00000000..4fad4c58 --- /dev/null +++ b/utilities/tests/stack_test.c @@ -0,0 +1,54 @@ +#include "../stack.h" +#include "../hashtable.h" +#include "common.h" +#include "assert.h" + +static void test_empty() { + LinkedStack* stack; + stack_create(&stack); + + void* temp; + _ASSERT_EXPR("Stack should be empty", !stack_pop(&stack, &temp)); +} + +static void test_push_pop() { + LinkedStack* stack; + stack_create(&stack); + + int n = TOTAL_COUNT; + int i; + for (i = 0; i < n; i++) { + void* temp; + stack_push(&stack, INT2VOIDP(n - i - 1)); + } + + for (i = 0; i < n; i++) { + void* temp; + _ASSERT_EXPR("Stack should be empty", stack_pop(&stack, &temp)); + _ASSERT_EXPR("Should contain the right value", VOIDP2INT(temp) == i); + } +} + +static void test_clear() { + LinkedStack* stack; + stack_create(&stack); + + int n = 1000; + int i; + for (i = 0; i < n; i++) { + void* temp; + stack_push(&stack, INT2VOIDP(n - i - 1)); + } + + stack_destroy(&stack); + void* temp; + _ASSERT_EXPR("Stack should be empty", !stack_pop(&stack, &temp)); +} + +void test_stack() { + TEST tests[] = {{test_empty, "empty stack consistency"}, + {test_push_pop, "push pop consistency"}, + {test_clear, "clear consistency"}}; + + run_tests("stack", tests, 3); +} diff --git a/utilities/tests/stack_test.h b/utilities/tests/stack_test.h new file mode 100644 index 00000000..0892b7ff --- /dev/null +++ b/utilities/tests/stack_test.h @@ -0,0 +1,9 @@ +#ifndef STACK_TEST_H +#define STACK_TEST_H + +/** + * Stack tests + */ +void test_stack(); + +#endif diff --git a/utilities/utilities.h b/utilities/utilities.h new file mode 100644 index 00000000..a705a54e --- /dev/null +++ b/utilities/utilities.h @@ -0,0 +1,5 @@ +#include "hashcons.h" +#include "hashtable.h" +#include "prime.h" +#include "scc.h" +#include "stack.h"