aboutsummaryrefslogtreecommitdiff
path: root/tools/bison++
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2010-11-19 13:18:48 +0000
committermarha <marha@users.sourceforge.net>2010-11-19 13:18:48 +0000
commit12f606ce06ef926f366a03079c5e3107c23f18af (patch)
tree28d7be4328bca9c31c1ab0f7cb5924c196be23a0 /tools/bison++
parent773752eab55047c33bad0d88006bb69f5c601502 (diff)
downloadvcxsrv-12f606ce06ef926f366a03079c5e3107c23f18af.tar.gz
vcxsrv-12f606ce06ef926f366a03079c5e3107c23f18af.tar.bz2
vcxsrv-12f606ce06ef926f366a03079c5e3107c23f18af.zip
Added tool bison++-1.21.11
Diffstat (limited to 'tools/bison++')
-rw-r--r--tools/bison++/COPYING339
-rw-r--r--tools/bison++/ChangeLog1171
-rw-r--r--tools/bison++/Example/FlexLexer.h186
-rw-r--r--tools/bison++/Example/Makefile34
-rw-r--r--tools/bison++/Example/MyCompiler.cc32
-rw-r--r--tools/bison++/Example/MyParser.y55
-rw-r--r--tools/bison++/Example/MyScanner.l31
-rw-r--r--tools/bison++/Example/test.txt1
-rw-r--r--tools/bison++/Example/test2.txt1
-rw-r--r--tools/bison++/INSTALL118
-rw-r--r--tools/bison++/Makefile627
-rw-r--r--tools/bison++/Makefile.am49
-rw-r--r--tools/bison++/Makefile.in627
-rw-r--r--tools/bison++/README++22
-rw-r--r--tools/bison++/REFERENCES30
-rw-r--r--tools/bison++/aclocal.m4104
-rw-r--r--tools/bison++/alloca.c480
-rw-r--r--tools/bison++/allocate.cc61
-rw-r--r--tools/bison++/bison0
-rw-r--r--tools/bison++/bison++.1436
-rw-r--r--tools/bison++/bison++.1.dman247
-rw-r--r--tools/bison++/bison++.yacc2
-rw-r--r--tools/bison++/bison++.yacc.10
-rw-r--r--tools/bison++/bison.1279
-rw-r--r--tools/bison++/bison.cc1040
-rw-r--r--tools/bison++/bison.cld18
-rw-r--r--tools/bison++/bison.h270
-rw-r--r--tools/bison++/bison.hairy341
-rw-r--r--tools/bison++/bison.info132
-rw-r--r--tools/bison++/bison.info-11070
-rw-r--r--tools/bison++/bison.info-21334
-rw-r--r--tools/bison++/bison.info-31287
-rw-r--r--tools/bison++/bison.info-41304
-rw-r--r--tools/bison++/bison.info-5238
-rw-r--r--tools/bison++/bison.ps.gzbin0 -> 195280 bytes
-rw-r--r--tools/bison++/bison.rnh167
-rw-r--r--tools/bison++/bison.texinfo5452
-rw-r--r--tools/bison++/bison_pp.mak369
-rw-r--r--tools/bison++/build-stamp0
-rw-r--r--tools/bison++/closure.cc347
-rw-r--r--tools/bison++/config.log577
-rw-r--r--tools/bison++/config.status695
-rw-r--r--tools/bison++/configure5115
-rw-r--r--tools/bison++/configure-stamp0
-rw-r--r--tools/bison++/configure.bat28
-rw-r--r--tools/bison++/configure.in32
-rw-r--r--tools/bison++/conflict.cc767
-rw-r--r--tools/bison++/derives.cc118
-rw-r--r--tools/bison++/files.cc403
-rw-r--r--tools/bison++/files.h66
-rw-r--r--tools/bison++/getargs.cc161
-rw-r--r--tools/bison++/getopt.cc744
-rw-r--r--tools/bison++/getopt.h128
-rw-r--r--tools/bison++/getopt1.cc175
-rw-r--r--tools/bison++/gram.cc58
-rw-r--r--tools/bison++/gram.h122
-rw-r--r--tools/bison++/install-sh251
-rw-r--r--tools/bison++/lalr.cc761
-rw-r--r--tools/bison++/lex.cc516
-rw-r--r--tools/bison++/lex.h52
-rw-r--r--tools/bison++/lr0.cc702
-rw-r--r--tools/bison++/machine.h39
-rw-r--r--tools/bison++/main.cc184
-rw-r--r--tools/bison++/mdate-sh92
-rw-r--r--tools/bison++/missing190
-rw-r--r--tools/bison++/mkinstalldirs35
-rw-r--r--tools/bison++/new.h31
-rw-r--r--tools/bison++/nullable.cc136
-rw-r--r--tools/bison++/old.c6
-rw-r--r--tools/bison++/output.cc1663
-rw-r--r--tools/bison++/print.cc369
-rw-r--r--tools/bison++/reader.cc1912
-rw-r--r--tools/bison++/reduce.cc593
-rw-r--r--tools/bison++/smart-install250
-rw-r--r--tools/bison++/stamp-vti3
-rw-r--r--tools/bison++/state.h137
-rw-r--r--tools/bison++/symtab.cc147
-rw-r--r--tools/bison++/symtab.h50
-rw-r--r--tools/bison++/system.h34
-rw-r--r--tools/bison++/texinfo.tex4041
-rw-r--r--tools/bison++/types.h27
-rw-r--r--tools/bison++/version.cc1
-rw-r--r--tools/bison++/version.texi3
-rw-r--r--tools/bison++/vmsgetargs.c160
-rw-r--r--tools/bison++/vmshlp.mar42
-rw-r--r--tools/bison++/warshall.cc115
86 files changed, 40032 insertions, 0 deletions
diff --git a/tools/bison++/COPYING b/tools/bison++/COPYING
new file mode 100644
index 000000000..a43ea2126
--- /dev/null
+++ b/tools/bison++/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 675 Mass Ave, Cambridge, MA 02139, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) 19yy <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19yy name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/tools/bison++/ChangeLog b/tools/bison++/ChangeLog
new file mode 100644
index 000000000..05eb0cb82
--- /dev/null
+++ b/tools/bison++/ChangeLog
@@ -0,0 +1,1171 @@
+Changes between 1.19-8 and 1.21-8+
+ - bison.cc: #pragma alloca,in AIX must be put before any C declaration
+ - bison.cc,h: avoid redeclaring yylval twice
+ - bison.cc: changed __ALLOCA_return(num) to avoid unreached instruction warning.
+Changes between 1.19-6 and 1.21-8
+ - cosmetics...
+ - bison.cc, bison.h : generalize MSDOS to WINDOWS and alike....
+ - bison.cc, bison.h : WINDOWS: simulate alloca with malloc, via macro
+Changes between 1.19-6 and 1.21-7
+ - bison.cc, bison.h : use macro to simulate goto in yyparse. for sun c++.
+ - bison.cc : mismatch in #if between YY_USE_CLASS and compatibility just
+ before including #define tokens. added a #endif and a #ifndef YY_USE_CLASS
+ - generate enum to replace enventually the static const
+ - output.c, files.c : manage to put #line in generated code, such that the generated code is correctly indicated in the output file.
+Changes between 1.19-5 and 1.21-6
+ - bison.h : parameter name forgotten in
+ 'virtual void YY_@_ERROR(char *) YY_@_ERROR_BODY;' so inline cannot use
+ the parameter. just add the name 'msg' as parameter.
+Changes between 1.19-3 and 1.21-5
+ - send on internet as patch 5
+ - patch according to release 1.21 of bison :
+> Mon Sep 6 15:32:32 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * Version 1.22 released.
+>
+> * mkinstalldirs: New file.
+>
+> * Makefile.in (dist): Use .gz for extension, not .z.
+> (DISTFILES): New variable.
+> (dist): Use it instead of explicit file list.
+> Try to link each file separately, then copy file if ln fails.
+> (installdirs): Use mkinstalldirs script.
+>
+> Thu Jul 29 20:35:02 1993 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+>
+> * Makefile.in (config.status): Run config.status --recheck, not
+> configure, to get the right args passed.
+>
+> Sat Jul 24 04:00:52 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * bison.simple (yyparse): Init yychar1 to avoid warning.
+>
+> Sun Jul 4 16:05:58 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * bison.simple (yyparse): Don't set yyval when yylen is 0.
+>
+> Sat Jun 26 15:54:04 1993 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+>
+> * getargs.c (getargs): Exit after printing the version number.
+> Add --help and -h options.
+> (usage): New function.
+>
+> Fri Jun 25 15:11:25 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * getargs.c (longopts): Allow `output' as an alternative.
+>
+> Wed Jun 16 17:02:37 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * bison.simple (yyparse): Conditionalize the entire call to yyoverflow,
+> not just two arguments in it.
+>
+> Thu Jun 3 13:07:19 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * bison.simple [__hpux] (alloca): Don't specify arg types.
+>
+> Fri May 7 05:53:17 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * Makefile.in (install): Depend on `uninstall' and `installdirs'.
+> (installdirs): New target.
+>
+> Wed Apr 28 15:15:15 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * reader.c: Remove declaration of atoi.
+>
+> Fri Apr 23 12:29:20 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * new.h [!__STDC__] (FREE): Check x != 0.
+> Make expr to call `free' evaluate to 0.
+>
+> Tue Apr 20 01:43:44 1993 David J. MacKenzie (djm@kropotkin.gnu.ai.mit.edu)
+>
+> * files.c [MSDOS]: Use xmalloc, not malloc.
+> * allocate.c (xmalloc): Renamed from mallocate. Remove old wrapper.
+> * conflicts.c, symtab.c, files.c, LR0.c, new.h: Change callers.
+> * allocate.c (xrealloc): New function.
+> * new.h: Declare it.
+> * lex.c, reader.c: Use it.
+>
+> Sun Apr 18 00:45:56 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * Version 1.21 released.
+>
+> * reader.c : Don't declare `realloc'.
+>
+> * Makefile.in (bison.s1): use `rm -f' since it's quieter.
+> (dist): make gzipped tar file.
+>
+> Fri Apr 16 21:24:10 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * Makefile.in (Makefile, config.status, configure): New targets.
+>
+> Thu Apr 15 15:37:28 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * main.c: Don't declare `abort'.
+>
+> * files.c: Don't declare `exit'.
+>
+> Thu Apr 15 02:42:38 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * configure.in: Add AC_CONST.
+>
+> Wed Apr 14 00:51:17 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * Makefile.in (all): Depend on bison.s1.
+>
+> Tue Apr 13 14:52:32 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * Version 1.20 released.
+>
+> Wed Mar 24 21:45:47 1993 Richard Stallman (rms@wookumz.gnu.ai.mit.edu)
+>
+> * output.c (output_headers): Rename yynerrs if -p.
+>
+> Thu Mar 18 00:02:17 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * system.h: Don't try to include stdlib.h unless HAVE_STDLIB_H is
+> defined.
+>
+> * configure.in: Check for stdlib.h.
+>
+> Wed Mar 17 14:44:27 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * bison.simple [__hpux, not __GNUC__]: Declare alloca.
+> (yyparse): When printing the expected token types for an error,
+> Avoid negative indexes in yycheck and yytname.
+>
+> Sat Mar 13 23:31:25 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * Makefile.in (files.o, .c.o): Put CPPFLAGS and CFLAGS last.
+>
+> Mon Mar 1 17:49:08 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * bison.simple: Test __sgi like __sparc.
+>
+> Wed Feb 17 00:04:13 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * conflicts.c (resolve_sr_conflict): Add extra parens in alloca call.
+>
+> * bison.simple [__GNUC__] (yyparse): Declare with prototype.
+>
+> Fri Jan 15 13:15:17 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * conflicts.c (print_reduction): Near end, increment fp2 when mask
+> recycles.
+>
+> Wed Jan 13 04:15:03 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * Makefile.in (bison.s1): New target. Modifies bison.simple.
+> (install): Install bison.s1, without changing it.
+> (clean): Delete bison.s1.
+>
+> Mon Jan 4 20:35:58 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * reader.c (reader): Put Bison version in comment in output file.
+>
+> Tue Dec 22 19:00:58 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * files.c (openfiles): Use .output, not .out, for outfile,
+> regardless of spec_name_prefix.
+>
+> Tue Dec 15 19:22:11 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * output.c (output_gram): Include yyrhs in the same #if as yyprhs.
+>
+> Tue Dec 15 18:29:16 1992 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+>
+> * output.c (output): output directives checking for __cplusplus as
+> well as __STDC__ to determine when to define "const" as an empty
+> token. (Patch from Wolfgang Glunz <wogl@sun11a.zfe.siemens.de>)
+>
+> Tue Dec 8 21:51:23 1992 David J. MacKenzie (djm@kropotkin.gnu.ai.mit.edu)
+>
+> * system.h, conflicts.c: Replace USG with HAVE_STRING_H and
+> HAVE_MEMORY_H.
+>
+> Sat Nov 21 00:37:16 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+>
+> * Makefile.in: Set and use $(MAKEINFO).
+>
+> Fri Nov 20 20:45:57 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * files.c (done) [MSDOS]: Delete the tmpdefsfile with the rest.
+>
+> Thu Oct 8 21:55:52 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * Makefile.in (dist): Put configure.bat in the distribution.
+>
+> Thu Oct 1 09:16:24 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+>
+> * Makefile.in (install): cd to $(srcdir) before installing info files.
+>
+> Wed Sep 30 17:18:39 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+>
+> * Makefile.in (files.o): Supply $(DEFS), and $(CPPFLAGS).
+>
+
+
+Changes between 2.2 and 1.19-3
+ - version.c : change version name to (bison version-bison++ release), ie bison++-1.19-3
+ - Notes.txt : creation of this file, with mail transaction
+
+version 2.2 sent to compiler.ieec.com
+4 may 93 A Coetmeur (coetmeur@icdc.fr). patch 2.2
+ *system.h getopt.c bison.cc : define _MSDOS if old fashionned MSDOS if defined
+ *bison.cc: define _MSDOS if turbo C __MSDOS__
+ *bison.cc : include malloc.h if turboC
+ *file.c : use _tempname instead of mktemp on DOS
+ *file.c : delete forgoten temp file on DOS and VMS
+3 may 93 A Coetmeur (coetmeur@icdc.fr). patch 2.1
+ *bison.h bison.cc:pb with YY_@_PARSE_PARAM, never defaultly-defined
+ in c++/class, neither in stdc. Same change in bison.h and .cc.
+ *files.c: -S option, and -H option inoperant because test is stupidely inverted. corrected.
+ *files.h reader.c output.c : backquote not quoted in filename (dos). created function -> quoted_filename().
+ *reader.c : (int)0 passed to set_parser, instead of NULL, caused bug on DOS 16bit. Corrected in parse_define().
+ *bison.cc bison.h : %define LLOC not defined by default
+
+24 Feb 93 Alain Coetmeur (coetmeur@icdc.fr )
+ *modifying all to support C++, and generate classes...
+ *Option -H and -S to impose skeleton and header skeleton
+ *use a header skeleton.
+ *cut the 2 skeletons in many section separated by $
+ *put much output from reader to output.c
+ *prefix shoul not be used with classes, though do work in C
+ *semantic parser not to be used, not supported
+ *version 2.0 alpha, proposed ???
+ *bison++.1 manpage made. modification are descripted.
+ * C++ comment supported
+ * #line automaticaly added in skeleton
+ * coherent with flex++ version proposed by same author
+
+Fri Sep 25 18:06:28 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Version 1.19 released.
+
+ * reader.c (parse_union_decl): Fix ending of C++ comment;
+ don't lose the char after the newline.
+
+ * configure.bat: New file.
+
+Thu Sep 24 16:23:15 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * conflicts.c: Check for using alloca.h as getopt.c does.
+
+Sun Sep 6 08:01:53 1992 Karl Berry (karl@hayley)
+
+ * files.c (openfiles): open `fdefines' after we have assigned a name
+ to `tmpdefsfile', and only if `definesflag' is set.
+ (done): only create the real .tab.h file if `definesflag' is set.
+ * reader.c (packsymbols): don't close `fdefines' here.
+
+Sat Sep 5 15:02:11 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * files.c (openfiles): Open fdefines as temp file, like ftable.
+ (done): Copy temp defines file to real one, like main output file.
+
+Fri Aug 21 12:47:48 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (dist): Don't release mergedir.awk
+ (install): Use sed, not awk. Don't depend on mergedir.awk.
+ * mergedir.awk: File effectively deleted.
+
+Wed Jul 29 00:53:25 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * bison.simple: Test __sparc along with __sparc__.
+
+Sat Jul 11 14:08:33 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * lex.c (skip_white_space): Count \n just once at end of c++ comment.
+
+Fri Jun 26 00:00:30 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * bison.simple: Comment fix; #line command updated.
+
+Wed Jun 24 15:12:42 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (install): Specify full new file name for the executable.
+
+Mon Jun 22 16:38:24 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (dist): Include bison.rnh in distribution.
+
+Sun Jun 21 22:42:13 1992 Eric Youngdale (youngdale@v6550c.nrl.navy.mil)
+
+ Clean up rough edges in VMS port of bison, add support for remaining
+ command line options.
+
+ * bison.cld: Add /version, /yacc, /file_prefix, and /name_prefix
+ switches.
+
+ * build.com: General cleanup: add logic to automatically sense
+ which C compiler is present; add code to cwd to the directory
+ that contains bison sources; do not define XPFILE, XPFILE1
+ (correct defaults are applied in file.c).
+
+ * files.c: Append _tab, not .tab when using /file_prefix under VMS.
+
+ * system.h: Include string.h instead of strings.h (a la USG).
+
+ * vmsgetargs.c: Add support for all switches added to bison.cld.
+
+Sun Jun 21 15:53:26 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (install): Always specify new file name for install.
+ Redirect awk output to temp file and install that.
+
+Wed May 27 22:27:50 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * bison.simple (yyparse): Make yybackup and yyerrlab1 always be used.
+
+Fri May 22 14:58:42 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (dist): Depend on bison.info
+ (bison.info): Delete spurious <.
+
+Sun May 17 21:48:55 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (.c.o): New rule. Use $(DEFS) directly.
+ (CFLAGS): Use just -g by default.
+ (CDEBUG): Variable deleted.
+
+Thu May 7 00:03:37 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * reader.c (copy_guard): Fix typo skipping comment.
+
+Mon May 4 01:23:21 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Version 1.18.
+
+ * getargs.c (getargs): Change '0' to 0 in case for long options.
+
+Sun Apr 19 10:17:52 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * reader.c (packsymbols): Handle -p when declaring yylval.
+
+Sat Apr 18 18:18:48 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * output.c (output_gram): Output #endif properly at end of decl.
+
+Mon Mar 30 01:13:41 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Version 1.17.
+
+ * Makefile.in (clean): Don't delete configuration files or TAGS.
+ (distclean): New target; do delete those.
+
+Sat Mar 28 17:18:50 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * output.c (output_gram): Conditionalize yyprhs on YYDEBUG.
+
+ * LR0.c (augment_automaton): If copying sp->shifts to insert new
+ shift, handle case of inserting at end.
+
+Sat Mar 21 23:25:47 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * lex.c (skip_white_space): Handle C++ comments.
+ * reader.c (copy_definition, parse_union_decl, copy_guard):
+ (copy_action): Likewise.
+
+Sun Mar 8 01:22:21 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * bison.simple (YYPOPSTACK): Fix typo.
+
+Sat Feb 29 03:53:06 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (install): Install bison.info* files one by one.
+
+Fri Feb 28 19:55:30 1992 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+
+ * bison.1: Document long options as starting with `--', not `+'.
+
+Sat Feb 1 00:08:09 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getargs.c (getargs): Accept value 0 from getopt_long.
+
+Thu Jan 30 23:39:15 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Makefile.in (mostlyclean): Renamed from `clean'.
+ (clean): Renamed from 'distclean'. Dep on mostlyclean, not realclean.
+ (realclean): Dep on clean.
+
+Mon Jan 27 21:59:19 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * bison.simple: Use malloc, not xmalloc, and handle failure explicitly.
+
+Sun Jan 26 22:40:04 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * conflicts.c (total_conflicts): Delete unused arg to fprintf.
+
+Tue Jan 21 23:17:44 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * Version 1.16.
+
+Mon Jan 6 16:50:11 1992 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * Makefile (distclean): Depend on clean, not realclean. Don't rm TAGS.
+ (realclean): rm TAGS here.
+
+ * symtab.c (free_symtab): Don't free the type names.
+
+Sun Dec 29 22:25:40 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * machine.h: MSDOS has 32-bit ints if __GO32__.
+
+Wed Dec 25 22:09:07 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * bison.simple [_AIX]: Indent `#pragma alloca', so old C compilers
+ don't choke on it.
+
+Mon Dec 23 02:10:16 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * getopt.c, getopt1.c, getopt.h: Link them to standard source location.
+ * alloca.c: Likewise.
+ * Makefile.in (dist): Copy those files from current dir.
+
+ * getargs.c: Update usage message.
+
+ * LR0.c (augment_automaton): Put new shift in proper order.
+
+Fri Dec 20 18:39:20 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * conflicts.c: Use memcpy if ANSI C library.
+
+ * closure.c (set_fderives): Delete redundant assignment to vrow.
+
+ * closure.c (print_firsts): Fix bounds and offset checking tags.
+
+ * closure.c (tags): Declare just once at start of file.
+
+ * LR0.c (allocate_itemsets): Eliminate unused var max.
+ (augment_automaton): Test sp is non-null.
+
+ * lalr.c (initialize_LA): Make the vectors at least 1 element long.
+
+ * reader.c (readgram): Remove separate YYSTYPE default for MSDOS.
+
+Wed Dec 18 02:40:32 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * print.c (print_grammar): Don't print disabled rules.
+
+Tue Dec 17 03:48:07 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * lex.c (lex): Parse hex escapes properly.
+ Handle \v when filling token_buffer.
+
+ * lex.c: Include new.h.
+ (token_buffer): Change to a pointer.
+ (init_lex): Allocate initial buffer.
+ (grow_token_buffer): New function.
+ (lex, parse_percent_token): Use that.
+
+ * reader.c (read_declarations): Call open_extra_files just once.
+ (parse_token_decl): Don't free previous typename value.
+ Don't increment nvars if symbol is already a nonterminal.
+ (parse_union_decl): Catch unmatched close-brace.
+ (parse_expect_decl): Null-terminate buffer.
+ (copy_guard): Set brace_flag for {, not for }.
+
+ * reader.c: Fix %% in calls to fatal.
+
+ * reader.c (token_buffer): Just one extern decl, at top level.
+ Declare as pointer.
+
+ * symtab.c (free_symtab): Free type_name fields. Free symtab itself.
+
+Mon Nov 25 23:04:31 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * bison.simple: Handle alloca for AIX.
+
+ * Makefile.in (mandir): Compute default using manext.
+
+Sat Nov 2 21:39:32 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * Update all files to GPL version 2.
+
+Fri Sep 6 01:51:36 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * bison.simple (__yy_bcopy): Use builtin if GCC version 2.
+
+Mon Aug 26 22:09:12 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * reader.c (parse_assoc_decl): Error if same symbol gets two precs.
+
+Mon Aug 26 16:42:09 1991 David J. MacKenzie (djm at pogo.gnu.ai.mit.edu)
+
+ * Makefile.in, configure: Only put $< in Makefile if using VPATH,
+ because older makes don't understand it.
+
+Fri Aug 23 00:05:54 1991 David J. MacKenzie (djm at apple-gunkies)
+
+ * conflicts.c [_AIX]: #pragma alloca.
+ * reduce.c: Don't define TRUE and FALSE if already defined.
+
+Mon Aug 12 22:49:58 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * Makefile.in: Add deps on system.h.
+ (install): Add some deps.
+
+Fri Aug 2 12:19:20 1991 David J. MacKenzie (djm at apple-gunkies)
+
+ * Makefile.in (dist): Include texinfo.tex.
+
+ * configure: Create config.status. Remove it and Makefile if
+ interrupted while creating them.
+
+Thu Aug 1 23:14:01 1991 David J. MacKenzie (djm at apple-gunkies)
+
+ * configure: Check for +srcdir etc. arg and look for
+ Makefile.in in that directory. Set VPATH if srcdir is not `.'.
+ * Makefile.in (prefix): Renamed from DESTDIR.
+
+Wed Jul 31 21:29:47 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * print.c (print_grammar): Make output prettier. Break lines.
+
+Tue Jul 30 22:38:01 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * print.c (print_grammar): New function.
+ (verbose): Call it instead of printing token names here.
+
+Mon Jul 22 16:39:54 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * vmsgetargs.c (spec_name_prefix, spec_file_prefix): Define variables.
+
+Wed Jul 10 01:38:25 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * configure, Makefile.in: $(INSTALLPROG) -> $(INSTALL),
+ $(INSTALLTEXT) -> $(INSTALLDATA).
+
+Tue Jul 9 00:53:58 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * bison.simple: Don't include malloc.h if __TURBOC__.
+
+Sat Jul 6 15:18:12 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * Replace Makefile with configure and Makefile.in.
+ Update README with current compilation instructions.
+
+Mon Jul 1 23:12:20 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * reader.c (reader): Make the output define YYBISON.
+
+Thu Jun 20 16:52:51 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * Makefile (MANDIR, MANEXT): Install man page in
+ /usr/local/man/man1/bison.1 by default, instead of
+ /usr/man/manl/bison.l, for consistency with other GNU programs.
+ * Makefile: Rename BINDIR et al. to lowercase to conform to
+ GNU coding standards.
+ (install): Make man page non-executable.
+
+Fri May 31 23:22:13 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * Makefile (bison.info): New target.
+ (realclean): New target.
+
+Thu May 2 16:36:19 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * bison.simple: Use YYPRINT to print a token, if it's defined.
+
+Mon Apr 29 12:22:55 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * lalr.c (transpose): Rename R to R_arg.
+ (initialize_LA): Avoid shadowing variable j.
+
+ * reader.c (packsymbols): Avoid shadowing variable i.
+
+ * files.c: Declare exit and perror.
+
+ * machine.h: Define MAXSHORT and MINSHORT for the eta-10.
+
+Tue Apr 2 20:49:12 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * allocate.c (mallocate): Always allocate at least one byte.
+
+Tue Mar 19 22:17:19 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * Makefile (dist): Put alloca.c into distribution.
+
+Wed Mar 6 17:45:42 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * print.c (print_actions): Nicer output for final states.
+
+Thu Feb 21 20:39:53 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * output.c (output_rule_data): Break lines in yytline based on hpos.
+
+Thu Feb 7 12:54:36 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * bison.simple (yyparse): Move decl of yylsa before use.
+
+Tue Jan 15 23:41:33 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * Version 1.14.
+
+ * output.c (output_rule_data): Handle NULL in tags[i].
+
+Fri Jan 11 17:27:24 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * bison.simple: On MSDOS, include malloc.h.
+
+Sat Dec 29 19:59:55 1990 David J. MacKenzie (djm at wookumz.ai.mit.edu)
+
+ * files.c: Use `mallocate' instead of `xmalloc' so no extra decl is
+ needed.
+
+Wed Dec 19 18:31:21 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * reader.c (readgram): Alternate YYSTYPE defn for MSDOS.
+ * files.c [MSDOS]: Declare xmalloc.
+
+Thu Dec 13 12:45:54 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * output.c (output_rule_data): Put all symbols in yytname.
+
+ * bison.simple (yyparse): Delete extra fprintf arg
+ when printing a result of reduction.
+
+Mon Dec 10 13:55:15 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * reader.c (packsymbols): Don't declare yylval if pure_parser.
+
+Tue Oct 30 23:38:09 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * Version 1.12.
+
+ * LR0.c (augment_automaton): Fix bugs adding sp2 to chain of shifts.
+
+Tue Oct 23 17:41:49 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * bison.simple: Don't define alloca if already defined.
+
+Sun Oct 21 22:10:53 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * getopt.c: On VMS, use string.h.
+
+ * main.c (main): Return type int.
+
+Mon Sep 10 16:59:01 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * output.c (output_headers): Output macro defs for -p.
+
+ * reader.c (readgram): Handle consecutive actions.
+
+ * getargs.c (getargs): Rename -a to -p.
+ * files.c (openfiles): Change names used for -b.
+
+Mon Aug 27 00:30:15 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * reduce.c (reduce_grammar_tables): Don't map rlhs of disabled rule.
+
+Sun Aug 26 13:43:32 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * closure.c (print_firsts, print_fderives): Use BITISSET to test bits.
+
+Thu Aug 23 22:13:40 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * closure.c (print_firsts): vrowsize => varsetsize.
+ (print_fderives): rrowsize => rulesetsize.
+
+Fri Aug 10 15:32:11 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple (alloca): Don't define if already defined.
+ (__yy_bcopy): Alternate definition for C++.
+
+Wed Jul 11 00:46:03 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * getargs.c (getargs): Mention +yacc in usage message.
+
+Tue Jul 10 17:29:08 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (parse_token_decl, copy_action):
+ Set value_components_used if appropriate.
+ (readgram): Inhibit output of YYSTYPE definition in that case.
+
+Sat Jun 30 13:47:57 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * output.c (output_parser): Define YYPURE if pure, and not otherwise.
+ Don't define YYIMPURE.
+ * bison.simple: Adjust conditionals accordingly.
+ * bison.simple (YYLEX): If locations not in use, don't pass &yylloc.
+
+Thu Jun 28 12:32:21 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getargs.c (longopts): Add `yacc'.
+
+Thu Jun 28 00:40:21 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * getargs.c (getargs): Add long options.
+ * Makefile: Link with getopt1.o and add getopt1.c and getopt.h to
+ dist.
+
+ * Move version number and description back into version.c from
+ Makefile and getargs.c.
+ * Makefile (dist): Extract version number from version.c.
+
+Tue Jun 26 13:16:35 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * output.c (output): Always call output_gram.
+ * bison.simple (yyparse): Print rhs and lhs symbols of reduction rule.
+
+Thu Jun 21 00:15:40 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * main.c: New global var `program_name' to hold argv[0] for error
+ messages.
+ * allocate.c, files.c, getargs.c, reader.c: Use `program_name'
+ in messages instead of hardcoded "bison".
+
+Wed Jun 20 23:38:34 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * Makefile: Specify Bison version here. Add rule to pass it to
+ version.c. Encode it in distribution directory and tar file names.
+ * version.c: Use version number from Makefile.
+ * getargs.c (getargs): Print additional text that used to be part of
+ version_string in version.c. Use -V instead of -version to print
+ Bison version info. Print a usage message and exit if given an
+ invalid option.
+
+Tue Jun 19 01:15:18 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple: Fix a #line.
+
+ * Makefile (INSTALL): New parameter.
+ (install): Use that.
+ (CFLAGS): Move definition to top.
+
+Sun Jun 17 17:10:21 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (parse_type_decl): Ignore semicolon.
+ Remove excess % from error messages.
+
+Sat Jun 16 19:15:48 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.11.
+
+ * Makefile (install): Ensure installed files readable.
+
+Tue Jun 12 12:50:56 EDT 1990 Jay Fenlason (hack@ai.mit.edu)
+
+ * getargs.c: Declare spec_file_prefix
+
+ * lex.c (lex): \a is '\007' instead of '007'
+
+ * reader.c: include machine.h
+
+ * files.h: Declare extern spec_name_prefix.
+
+ Trivial patch from Thorsten Ohl (td12@ddagsi3.bitnet)
+
+Thu May 31 22:00:16 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.10.
+
+ * bison.simple (YYBACKUP, YYRECOVERING): New macros.
+ (YYINITDEPTH): This is what used to be YYMAXDEPTH.
+ (YYMAXDEPTH): This is what used to be YYMAXLIMIT.
+ If the value is 0, use the default instead.
+ (yyparse): Return 2 on stack overflow.
+
+Wed May 30 21:09:07 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple (YYERROR): Jump to new label; don't print error message.
+ (yyparse): Define label yyerrlab1.
+
+Wed May 16 13:23:58 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * files.c (openfiles): Support -b.
+ * getargs.c (getargs): Likewise.
+
+ * reader.c (readgram): Error if too many symbols.
+
+ * lex.c (lex): Handle \a. Make error msgs more reliable.
+ * reader.c (read_declarations): Make error msgs more reliable.
+
+Sun May 13 15:03:37 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.09.
+
+ * reduce.c (reduce_grammar_tables): Fix backward test.
+
+Sat May 12 21:05:34 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Makefile (bison-dist.*): Rename targets and files to bison.*.
+ (bison.tar): Make tar file to unpack into subdirectory named `bison'.
+
+Mon Apr 30 03:46:58 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reduce.c (reduce_grammar_tables): Set rlhs to -1 for useless rules.
+ * nullable.c (set_nullable): Ignore those rules.
+ * derives.c (set_derives): Likewise.
+
+Mon Apr 23 15:16:09 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple (yyparse): Mention rule number as well as line number.
+
+Thu Mar 29 00:00:43 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple (__yy_bcopy): New function.
+ (yyparse): Use that, not bcopy.
+
+Wed Mar 28 15:23:51 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * print.c (print_actions): Don't alter i and j spuriously when errp==0.
+
+Mon Mar 12 16:22:18 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * bison.simple [__GNUC__]: Use builtin_alloca.
+
+Wed Mar 7 21:11:36 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Makefile (install): Use mergedir.awk to process bison.simple
+ for installation.
+
+ * bison.simple (yyparse): New feature to include possible valid
+ tokens in parse error message.
+
+Sat Mar 3 14:10:56 1990 Richard Stallman (rms at geech)
+
+ * Version 1.08.
+
+Mon Feb 26 16:32:21 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * print.c (print_actions)
+ conflicts.c (print_reductions): Change "shift %d" to
+ "shift, and go to state %d" and "reduce %d" to "reduce using rule %d"
+ and "goto %d" to "go to state %d".
+ print.c (print_core): Change "(%d)" to "(rule %d)".
+
+Tue Feb 20 14:22:47 EST 1990 Jay Fenlason (hack @ wookumz.ai.mit.edu)
+
+ * bison.simple: Comment out unused yyresume: label.
+
+Fri Feb 9 16:14:34 EST 1990 Jay Fenlason (hack @ wookumz.ai.mit.edu)
+
+ * bison.simple : surround all declarations and (remaining) uses of
+ yyls* and yylloc with #ifdef YYLSP_NEEDED This will significantly
+ cut down on stack usage, and gets rid of unused-variable msgs from
+ GCC.
+
+Wed Jan 31 13:06:08 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * files.c (done) [VMS]: Don't delete files that weren't used.
+ [VMS]: Let user override XPFILE and XPFILE1.
+
+Wed Jan 3 15:52:28 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.07.
+
+Sat Dec 16 15:50:21 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * gram.c (dummy): New function.
+
+ * reader.c (readgram): Detect error if two consec actions.
+
+Wed Nov 15 02:06:08 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reduce.c (reduce_grammar_tables): Update rline like other tables.
+
+ * Makefile (install): Install the man page.
+
+Sat Nov 11 03:21:58 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * output.c (output_rule_data): Write #if YYDEBUG around yyrline.
+
+Wed Oct 18 13:07:55 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.06.
+
+ * vmsgetargs.c (getargs): Downcase specified output file name.
+
+Fri Oct 13 17:48:14 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (readgram): Warn if there is no default to use for $$
+ and one is needed.
+
+Fri Sep 29 12:51:53 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.05.
+
+ * vmsgetargs.h (getargs): Process outfile option.
+
+Fri Sep 8 03:05:14 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Version 1.04.
+
+ * reader.c (parse_union_decl): Count newlines even in comments.
+
+Wed Sep 6 22:03:19 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * files.c (openfiles): short_base_length was always == base_length.
+
+Thu Aug 24 16:55:06 1989 Richard Stallman (rms at apple-gunkies.ai.mit.edu)
+
+ * Version 1.03.
+
+ * files.c (openfiles): Write output into same dir as input, by default.
+
+Wed Aug 23 15:03:07 1989 Jay Fenlason (hack at gnu)
+
+ * Makefile: Include system.h in bison-dist.tar
+
+Tue Aug 15 22:30:42 1989 Richard Stallman (rms at hobbes.ai.mit.edu)
+
+ * version 1.03.
+
+ * reader.c (reader): Output LTYPESTR to fdefines
+ only after reading the grammar.
+
+Sun Aug 6 16:55:23 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (read_declarations): Put space before comment
+ to avoid bug in Green Hills C compiler.
+
+Mon Jun 19 20:14:01 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * allocate.c (xmalloc): New function.
+
+Fri Jun 16 23:59:40 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * build.com: Compile and link reduce.c.
+
+Fri Jun 9 23:00:54 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reduce.c (reduce_grammar_tables): Adjust start_symbol when #s change.
+
+Sat May 27 17:57:29 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (copy_definition, copy_guard): Don't object to \-newline
+ inside strings.
+
+Mon May 22 12:30:59 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * files.c (openfiles): Alternate file names for MSDOS.
+ (open_extra_files): Likewise.
+ (done): On MSDOS, unlink temp files here, not in openfiles.
+
+ * machine.h (BITS_PER_WORD): 16 on MSDOS.
+ (MAXTABLE): Now defined in this file.
+
+ * system.h: New file includes system-dependent headers.
+ All relevant .c files include it.
+
+Thu Apr 27 17:00:47 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * version.c: Version 1.01.
+
+Tue Apr 18 12:46:05 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * conflicts.c (total_conflicts): Fixed typo in yacc style output;
+ mention conflicts if > 0.
+
+Sat Apr 15 17:36:18 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (packsymbols): Start new symbols after 256.
+
+Wed Apr 12 14:09:09 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (reader): Always assign code 256 to `error' token.
+ Always set `translations' to 1 so this code gets handled.
+ * bison.simple (YYERRCODE): Define it.
+
+Tue Apr 11 19:26:32 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * conflicts.c: If GNU C, use builtin alloca.
+
+ * Makefile (install): Delete parser files before copying them.
+
+Thu Mar 30 13:51:17 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getargs.c (getargs): Turn off checking of name Bison was invoked by.
+
+ * Makefile (dist): Include ChangeLog in distrib.
+
+Thu Mar 23 15:19:41 1989 Jay Fenlason (hack at apple-gunkies.ai.mit.edu)
+
+ * LR0.c closure.c conflicts.c derives.c files.c getargs.c lalr.c
+ lex.c main.c nullable.c output.c print.c reader.c reduce.c
+ symtab.c warshall.c: A first pass at getting gcc -Wall to shut up.
+ Mostly declared functions as void, etc.
+
+ * reduce.c moved 'extern int fixed_outfiles;' into print_notices()
+ where it belongs.
+
+Wed Mar 1 12:33:28 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * types.h, symtab.h, state.h, new.h, machine.h, lex.h, gram.h,
+ files.h, closure.c, vmsgetargs.c, warshall.c, symtab.c, reduce.c,
+ reader.c, print.c, output.c, nullable.c, main.c, lex.c, lalr.c,
+ gram.c, getargs.c, files.c, derives.c, conflicts.c, allocate.c,
+ LR0.c, Makefile, bison.simple: Changed copyright notices to be in
+ accord with the new General Public License.
+ * COPYING: Made a link to the new copying file.
+
+Wed Feb 22 06:18:20 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * new.h (FREE): Alternate definition for __STDC__ avoids error
+ if `free' returns void.
+
+Tue Feb 21 15:03:34 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (read_declarations): Double a `%' in a format string.
+ (copy_definition, parse_start_decl, parse_token_decl): Likewise.
+ (parse_type_decl, parse_union_decl, copy_guard, readgram, get_type).
+ (copy_action): change a `fatal' to `fatals'.
+
+ * lalr.c (map_goto): Initial high-end of binary search was off by 1.
+
+Sat Feb 18 08:49:57 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple [sparc]: Include alloca.h.
+
+Wed Feb 15 06:24:36 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (packsymbols): Write decl of yylval into .tab.h file.
+
+Sat Jan 28 18:19:05 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple: Avoid comments on `#line' lines.
+
+ * reader.c (LTYPESTR): Rearrange to avoid whitespace after \-newline.
+
+Mon Jan 9 18:43:08 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * conflicts.c (total_conflicts): if -y, use output syntax POSIX wants.
+ * reduce.c (print_notices): likewise.
+
+ * lex.c (lex): Handle \v, and \x hex escapes.
+
+ * reader.c (reader): Merge output_ltype into here.
+ Don't output YYLTYPE definition to .tab.h file
+ unless the @ construct is used.
+
+ * bison.simple: Define YYERROR, YYABORT, YYACCEPT here.
+ * reader.c (output_ltype): Don't output them here.
+
+ * bison.simple: YYDEBUG now should be 0 or 1.
+ * output.c (output): For YYDEBUG, output conditional to define it
+ only if not previously defined.
+
+Mon Jan 2 11:29:55 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple (yyparse) [YYPURE]: Add local yynerrs.
+ (yydebug): Declare global, but don't initialize, regardless of YYPURE.
+ (yyparse): Don't declare yydebug here.
+
+Thu Dec 22 22:01:22 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reduce.c (print_notices): Typo in message.
+
+Sun Dec 11 11:32:07 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * output.c (pack_table): Free only nonzero the elts of froms & tos.
+
+Thu Dec 8 16:26:46 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * gram.c (rprecsym): New vector indicates the %prec symbol for a rule.
+ * reader.c (packgram): Allocate it and fill it in.
+ * reduce.c (inaccessable_symbols): Use it to set V1.
+ * reduce.c (print_results): Don't complain about useless token
+ if it's in V1.
+
+Mon Dec 5 14:33:17 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * machine.h (RESETBIT, BITISSET): New macros.
+ (SETBIT, WORDSIZE): Change to use BITS_PER_WORD.
+
+ * reduce.c: New file, by David Bakin. Reduces the grammar.
+ * Makefile: Compile it, link it, put it in dist.
+
+ * main.c (main): Call reduce_grammar (in reduce.c).
+
+Thu Nov 17 18:33:04 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * conflicts.c: Don't declare alloca if including alloca.h.
+
+ * bison.cld: Define qualifiers `nolines', `debug'.
+ * vmsgetargs.c (getargs): Handle them.
+
+ * output.c (output_program): Notice `nolinesflag'.
+
+ * output.c (output_parser): Simplify logic for -l and #line.
+ Avoid writing EOF char into output.
+
+Wed Oct 12 18:00:03 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Implement `-l' option.
+ * getopt.c: Set flag `nolinesflag'.
+ * reader.c (copy_definition, parse_union_decl, copy_guard, copy_action)
+ Obey that flag; don't generate #line.
+ * output.c (output_parser): Discard #line's when copying the parser.
+
+Mon Sep 12 16:33:17 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (copy_guard): Fix brace-counting for brace-surrounded guard.
+
+Thu Sep 8 20:09:53 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * bison.simple: Correct number in #line command.
+ (yyparse): Call YYABORT instead of YYERROR, due to last change in
+ output_ltype.
+
+Mon Sep 5 14:55:30 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * Makefile: New variable LIBS. Alternatives for USG.
+ * conflicts.c [USG]: Define bcopy.
+ * symtab.c [USG]: Include string.h instead of strings.h.
+
+ * conflicts.c [sparc]: Include alloca.h.
+
+Tue Aug 2 08:38:38 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (parse_token_decl): Ignore commas.
+
+Sat Jun 25 10:29:20 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * reader.c (output_ltype): Make YYERROR yacc-compatible (like YYFAIL).
+
+Fri Jun 24 11:25:11 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getargs.c (getargs): -t sets debugflag.
+ Eliminate upper case duplicate options.
+ * output.c (output): If debugflag, output `#define YYDEBUG'.
+
+Thu May 26 06:04:21 1988 Richard Stallman (rms at frosted-flakes.ai.mit.edu)
+
+ * allocate.c (mallocate): New name for `allocate' (which loses in VMS).
+ Calls changed in LR0.c, conflicts.c, symtab.c, new.h.
+
+ * getargs.c (getargs): If argv[0] is "yacc", set fixed_outfiles.
+
+Tue May 17 12:15:30 1988 Richard Stallman (rms at frosted-flakes.ai.mit.edu)
+
+ * conflicts.c: Declare alloca.
+ * reader.c: Declare realloc.
+ * warshall.c (TC): Fix one arithmetic op that was omitted last time.
+
+Thu May 5 14:36:03 1988 Richard Stallman (rms at frosted-flakes.ai.mit.edu)
+
+ * bison.simple: Conditionalize most refs to yylsp on YYLSP_NEEDED.
+ * reader.c (copy_guard, copy_action): Notice if `@' is used.
+ (reader): If it was, output `#define YYLSP_NEEDED'.
+
+Mon Apr 18 04:54:32 1988 Richard Stallman (rms at rice-krispies.ai.mit.edu)
+
+ * bison.simple: New variable yynerr counts calls to yyerror.
+
+ * lex.c (lex, case '='): Update lineno when skipping a newline.
+
+ * reader.c (parse_expect_decl): ungetc the char that ends the number;
+ don't read any further. This handles multi-line comments right
+ and avoids incorrect lineno.
+
+ * reader.c: Delete duplicate decl of symval.
+
+ * warshall.c (RTC, TC): Cast ptrs to char *, not unsigned, for arith.
+
+Local Variables:
+mode: indented-text
+left-margin: 8
+fill-column: 76
+version-control: never
+End:
diff --git a/tools/bison++/Example/FlexLexer.h b/tools/bison++/Example/FlexLexer.h
new file mode 100644
index 000000000..431bfd397
--- /dev/null
+++ b/tools/bison++/Example/FlexLexer.h
@@ -0,0 +1,186 @@
+// $Header: /home/daffy/u0/vern/flex/RCS/FlexLexer.h,v 1.19 96/05/25 20:43:02 vern Exp $
+
+// FlexLexer.h -- define interfaces for lexical analyzer classes generated
+// by flex
+
+// Copyright (c) 1993 The Regents of the University of California.
+// All rights reserved.
+//
+// This code is derived from software contributed to Berkeley by
+// Kent Williams and Tom Epperly.
+//
+// Redistribution and use in source and binary forms with or without
+// modification are permitted provided that: (1) source distributions retain
+// this entire copyright notice and comment, and (2) distributions including
+// binaries display the following acknowledgement: ``This product includes
+// software developed by the University of California, Berkeley and its
+// contributors'' in the documentation or other materials provided with the
+// distribution and in all advertising materials mentioning features or use
+// of this software. Neither the name of the University nor the names of
+// its contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+// This file defines FlexLexer, an abstract class which specifies the
+// external interface provided to flex C++ lexer objects, and yyFlexLexer,
+// which defines a particular lexer class.
+//
+// If you want to create multiple lexer classes, you use the -P flag
+// to rename each yyFlexLexer to some other xxFlexLexer. You then
+// include <FlexLexer.h> in your other sources once per lexer class:
+//
+// #undef yyFlexLexer
+// #define yyFlexLexer xxFlexLexer
+// #include <FlexLexer.h>
+//
+// #undef yyFlexLexer
+// #define yyFlexLexer zzFlexLexer
+// #include <FlexLexer.h>
+// ...
+
+#ifndef __FLEX_LEXER_H
+// Never included before - need to define base class.
+#define __FLEX_LEXER_H
+#include <iostream.h>
+#include "MyParser.h"
+extern "C++" {
+
+struct yy_buffer_state;
+typedef int yy_state_type;
+
+class FlexLexer {
+public:
+ virtual ~FlexLexer() { }
+
+ const char* YYText() { return yytext; }
+ int YYLeng() { return yyleng; }
+
+ virtual void
+ yy_switch_to_buffer( struct yy_buffer_state* new_buffer ) = 0;
+ virtual struct yy_buffer_state*
+ yy_create_buffer( istream* s, int size ) = 0;
+ virtual void yy_delete_buffer( struct yy_buffer_state* b ) = 0;
+ virtual void yyrestart( istream* s ) = 0;
+
+ virtual int yylex(FLEXFIX) = 0;
+
+ // Call yylex with new input/output sources.
+ int yylex(FLEXFIX, istream* new_in, ostream* new_out = 0 )
+ {
+ switch_streams( new_in, new_out );
+ return yylex(FLEXFIX2);
+ }
+
+ // Switch to new input/output streams. A nil stream pointer
+ // indicates "keep the current one".
+ virtual void switch_streams( istream* new_in = 0,
+ ostream* new_out = 0 ) = 0;
+
+ int lineno() const { return yylineno; }
+
+ int debug() const { return yy_flex_debug; }
+ void set_debug( int flag ) { yy_flex_debug = flag; }
+
+protected:
+ char* yytext;
+ int yyleng;
+ int yylineno; // only maintained if you use %option yylineno
+ int yy_flex_debug; // only has effect with -d or "%option debug"
+};
+
+}
+#endif
+
+#if defined(yyFlexLexer) || ! defined(yyFlexLexerOnce)
+// Either this is the first time through (yyFlexLexerOnce not defined),
+// or this is a repeated include to define a different flavor of
+// yyFlexLexer, as discussed in the flex man page.
+#define yyFlexLexerOnce
+
+class yyFlexLexer : public FlexLexer {
+public:
+ // arg_yyin and arg_yyout default to the cin and cout, but we
+ // only make that assignment when initializing in yylex().
+ yyFlexLexer( istream* arg_yyin = 0, ostream* arg_yyout = 0 );
+
+ virtual ~yyFlexLexer();
+
+ void yy_switch_to_buffer( struct yy_buffer_state* new_buffer );
+ struct yy_buffer_state* yy_create_buffer( istream* s, int size );
+ void yy_delete_buffer( struct yy_buffer_state* b );
+ void yyrestart( istream* s );
+
+ virtual int yylex(FLEXFIX);
+ virtual void switch_streams( istream* new_in, ostream* new_out );
+
+protected:
+ virtual int LexerInput( char* buf, int max_size );
+ virtual void LexerOutput( const char* buf, int size );
+ virtual void LexerError( const char* msg );
+
+ void yyunput( int c, char* buf_ptr );
+ int yyinput();
+
+ void yy_load_buffer_state();
+ void yy_init_buffer( struct yy_buffer_state* b, istream* s );
+ void yy_flush_buffer( struct yy_buffer_state* b );
+
+ int yy_start_stack_ptr;
+ int yy_start_stack_depth;
+ int* yy_start_stack;
+
+ void yy_push_state( int new_state );
+ void yy_pop_state();
+ int yy_top_state();
+
+ yy_state_type yy_get_previous_state();
+ yy_state_type yy_try_NUL_trans( yy_state_type current_state );
+ int yy_get_next_buffer();
+
+ istream* yyin; // input source for default LexerInput
+ ostream* yyout; // output sink for default LexerOutput
+
+ struct yy_buffer_state* yy_current_buffer;
+
+ // yy_hold_char holds the character lost when yytext is formed.
+ char yy_hold_char;
+
+ // Number of characters read into yy_ch_buf.
+ int yy_n_chars;
+
+ // Points to current character in buffer.
+ char* yy_c_buf_p;
+
+ int yy_init; // whether we need to initialize
+ int yy_start; // start state number
+
+ // Flag which is used to allow yywrap()'s to do buffer switches
+ // instead of setting up a fresh yyin. A bit of a hack ...
+ int yy_did_buffer_switch_on_eof;
+
+ // The following are not always needed, but may be depending
+ // on use of certain flex features (like REJECT or yymore()).
+
+ yy_state_type yy_last_accepting_state;
+ char* yy_last_accepting_cpos;
+
+ yy_state_type* yy_state_buf;
+ yy_state_type* yy_state_ptr;
+
+ char* yy_full_match;
+ int* yy_full_state;
+ int yy_full_lp;
+
+ int yy_lp;
+ int yy_looking_for_trail_begin;
+
+ int yy_more_flag;
+ int yy_more_len;
+ int yy_more_offset;
+ int yy_prev_more_offset;
+};
+
+#endif
diff --git a/tools/bison++/Example/Makefile b/tools/bison++/Example/Makefile
new file mode 100644
index 000000000..006b58f49
--- /dev/null
+++ b/tools/bison++/Example/Makefile
@@ -0,0 +1,34 @@
+
+.SUFFIXES : .cc .y .l $(SUFFIXES)
+
+.cc.o :
+ g++ -g -I . -I$(CENTERCCLIBDIR)/incl -c $*.cc
+
+.y.cc :
+ bison++ -d -o $*.cc -h $*.h $*.y
+.l.cc :
+ flex++ -o$*.cc $*.l
+.y.h :
+ bison++ -d -o $*.cc -h $*.h $*.y
+.l.h :
+ flex++ -o$*.cc $*.l
+
+# COMPILER SAMPLE
+
+MyCompiler.o : MyCompiler.cc MyParser.h MyScanner.h
+
+MyParser.o : MyParser.cc MyParser.h
+
+MyScanner.o : MyScanner.cc MyScanner.h MyParser.h
+
+MyParser.cc : MyParser.y
+
+MyScanner.cc : MyScanner.l
+
+MyParser.h : MyParser.y
+
+MyScanner.h : MyScanner.l
+
+compiler : MyCompiler.o MyParser.o MyScanner.o
+ g++ -o $@ MyCompiler.o MyParser.o MyScanner.o
+
diff --git a/tools/bison++/Example/MyCompiler.cc b/tools/bison++/Example/MyCompiler.cc
new file mode 100644
index 000000000..3c554bc40
--- /dev/null
+++ b/tools/bison++/Example/MyCompiler.cc
@@ -0,0 +1,32 @@
+#include "MyParser.h"
+#define YY_DECL int yyFlexLexer::yylex(YY_MyParser_STYPE *val)
+#include "FlexLexer.h"
+#include <stdio.h>
+
+class MyCompiler : public MyParser
+{
+private:
+ yyFlexLexer theScanner;
+ public:
+ virtual int yylex();
+ virtual void yyerror(char *m);
+ MyCompiler(){;}
+};
+
+int MyCompiler::yylex()
+{
+ return theScanner.yylex(&yylval);
+}
+
+void MyCompiler::yyerror(char *m)
+{ fprintf(stderr,"%d: %s at token '%s'\n",yylloc.first_line, m,yylloc.text);
+}
+
+int main(int argc,char **argv)
+{
+ MyCompiler aCompiler;
+ int result=aCompiler.yyparse();
+ printf("Resultat Parsing=%s\n",result?"Erreur":"OK");
+ return 0;
+};
+
diff --git a/tools/bison++/Example/MyParser.y b/tools/bison++/Example/MyParser.y
new file mode 100644
index 000000000..f8787eb34
--- /dev/null
+++ b/tools/bison++/Example/MyParser.y
@@ -0,0 +1,55 @@
+%{
+#define YY_MyParser_STYPE yy_MyParser_stype
+%}
+%name MyParser
+%define LSP_NEEDED
+%define ERROR_BODY =0
+%define LEX_BODY =0
+%header{
+#include <iostream>
+#include <string>
+ using namespace std;
+#define YY_DECL int yyFlexLexer::yylex(YY_MyParser_STYPE *val)
+#ifndef FLEXFIX
+#define FLEXFIX YY_MyParser_STYPE *val
+#define FLEXFIX2 val
+#endif
+%}
+
+%union {
+ int num;
+ bool statement;
+ }
+
+
+
+%token <num> PLUS INTEGER MINUS AND OR NOT LPARA RPARA
+%token <statement> BOOLEAN
+%type <num> exp result
+%type <statement> bexp
+%start result
+
+%left OR
+%left AND
+%left PLUS MINUS
+%left NOT
+%left LPARA RPARA
+
+%%
+
+result : exp {cout << "Result = " << $1 << endl;}
+ | bexp {cout << "Result = " << $1 << endl;}
+
+exp : exp PLUS exp {$$ = $1 + $3;}
+ | INTEGER {$$ = $1;}
+ | MINUS exp { $$ = -$2;}
+ | exp MINUS exp {$$ = $1 - $3;}
+
+bexp : BOOLEAN {$$ = $1;}
+ | bexp AND bexp { $$ = $1 && $3;}
+ | bexp OR bexp { $$ = $1 || $3;}
+ | NOT bexp {$$ = !$2;}
+ | LPARA bexp RPARA {$$ = $2}
+%%
+/* -------------- body section -------------- */
+// feel free to add your own C/C++ code here
diff --git a/tools/bison++/Example/MyScanner.l b/tools/bison++/Example/MyScanner.l
new file mode 100644
index 000000000..fd9dde78f
--- /dev/null
+++ b/tools/bison++/Example/MyScanner.l
@@ -0,0 +1,31 @@
+%{
+#ifndef FLEXFIX
+#define FLEXFIX YY_MyParser_STYPE *val
+#define FLEXFIX2 val
+#endif
+#include "MyParser.h" // Make sure the flexer can communicate with bison++
+ //using return values
+%}
+
+digit [0-9]
+integer [1-9]{digit}*
+ws [ \t\n]+
+%%
+{ws} { /* no action */ }
+{integer} { val->num = atoi(yytext); return MyParser::INTEGER; }
+"AND" {return(MyParser::AND);}
+"OR" {return(MyParser::OR);}
+"NOT" {return(MyParser::NOT);}
+"TRUE" {val->statement=true; return MyParser::BOOLEAN; }
+"FALSE" {val->statement=false; return MyParser::BOOLEAN; }
+"-" {return(MyParser::MINUS);}
+"+" {return(MyParser::PLUS);}
+"(" {return(MyParser::LPARA);}
+")" {return(MyParser::RPARA);}
+
+<<EOF>> { yyterminate();}
+%%
+int yywrap()
+{
+ return(1);
+} \ No newline at end of file
diff --git a/tools/bison++/Example/test.txt b/tools/bison++/Example/test.txt
new file mode 100644
index 000000000..ef163e647
--- /dev/null
+++ b/tools/bison++/Example/test.txt
@@ -0,0 +1 @@
+1+2+3+4
diff --git a/tools/bison++/Example/test2.txt b/tools/bison++/Example/test2.txt
new file mode 100644
index 000000000..0ce961b61
--- /dev/null
+++ b/tools/bison++/Example/test2.txt
@@ -0,0 +1 @@
+(TRUE OR FALSE) AND FALSE \ No newline at end of file
diff --git a/tools/bison++/INSTALL b/tools/bison++/INSTALL
new file mode 100644
index 000000000..f44831799
--- /dev/null
+++ b/tools/bison++/INSTALL
@@ -0,0 +1,118 @@
+This is a generic INSTALL file for utilities distributions.
+If this package does not come with, e.g., installable documentation or
+data files, please ignore the references to them below.
+
+To compile this package:
+
+1. Configure the package for your system. In the directory that this
+file is in, type `./configure'. If you're using `csh' on an old
+version of System V, you might need to type `sh configure' instead to
+prevent `csh' from trying to execute `configure' itself.
+
+The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation, and
+creates the Makefile(s) (one in each subdirectory of the source
+directory). In some packages it creates a C header file containing
+system-dependent definitions. It also creates a file `config.status'
+that you can run in the future to recreate the current configuration.
+
+Running `configure' takes a minute or two. While it is running, it
+prints some messages that tell what it is doing. If you don't want to
+see the messages, run `configure' with its standard output redirected
+to `/dev/null'; for example, `./configure >/dev/null'.
+
+To compile the package in a different directory from the one
+containing the source code, you must use a version of `make' that
+supports the VPATH variable, such as GNU `make'. `cd' to the directory
+where you want the object files and executables to go and run
+`configure'. `configure' automatically checks for the source code in
+the directory that `configure' is in and in `..'. If for some reason
+`configure' is not in the source code directory that you are
+configuring, then it will report that it can't find the source code.
+In that case, run `configure' with the option `--srcdir=DIR', where
+DIR is the directory that contains the source code.
+
+By default, `make install' will install the package's files in
+/usr/local/bin, /usr/local/lib, /usr/local/man, etc. You can specify an
+installation prefix other than /usr/local by giving `configure' the option
+`--prefix=PATH'. Alternately, you can do so by consistently giving a value
+for the `prefix' variable when you run `make', e.g.,
+ make prefix=/usr/gnu
+ make prefix=/usr/gnu install
+
+You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files. If
+you give `configure' the option `--exec-prefix=PATH' or set the
+`make' variable `exec_prefix' to PATH, the package will use PATH as
+the prefix for installing programs and libraries. Data files and
+documentation will still use the regular prefix. Normally, all files
+are installed using the regular prefix.
+
+Another `configure' option is useful mainly in `Makefile' rules for
+updating `config.status' and `Makefile'. The `--no-create' option
+figures out the configuration for your system and records it in
+`config.status', without actually configuring the package (creating
+`Makefile's and perhaps a configuration header file). Later, you can
+run `./config.status' to actually configure the package. You can also
+give `config.status' the `--recheck' option, which makes it re-run
+`configure' with the same arguments you used before. This option is
+useful if you change `configure'.
+
+Some packages pay attention to `--with-PACKAGE' options to `configure',
+where PACKAGE is something like `gnu-libc' or `x' (for the X Window System).
+The README should mention any --with- options that the package recognizes.
+
+`configure' ignores any other arguments that you give it.
+
+If your system requires unusual options for compilation or linking
+that `configure' doesn't know about, you can give `configure' initial
+values for some variables by setting them in the environment. In
+Bourne-compatible shells, you can do that on the command line like
+this:
+ CC='gcc -traditional' DEFS=-D_POSIX_SOURCE ./configure
+
+The `make' variables that you might want to override with environment
+variables when running `configure' are:
+
+(For these variables, any value given in the environment overrides the
+value that `configure' would choose:)
+CC C compiler program.
+ Default is `cc', or `gcc' if `gcc' is in your PATH.
+INSTALL Program to use to install files.
+ Default is `install' if you have it, `cp' otherwise.
+
+(For these variables, any value given in the environment is added to
+the value that `configure' chooses:)
+DEFS Configuration options, in the form `-Dfoo -Dbar ...'
+ Do not use this variable in packages that create a
+ configuration header file.
+LIBS Libraries to link with, in the form `-lfoo -lbar ...'
+
+If you need to do unusual things to compile the package, we encourage
+you to figure out how `configure' could check whether to do them, and
+mail diffs or instructions to the address given in the README so we
+can include them in the next release.
+
+2. Type `make' to compile the package. If you want, you can override
+the `make' variables CFLAGS and LDFLAGS like this:
+
+ make CFLAGS=-O2 LDFLAGS=-s
+
+3. If the package comes with self-tests and you want to run them,
+type `make check'. If you're not sure whether there are any, try it;
+if `make' responds with something like
+ make: *** No way to make target `check'. Stop.
+then the package does not come with self-tests.
+
+4. Type `make install' to install programs, data files, and
+documentation.
+
+5. You can remove the program binaries and object files from the
+source directory by typing `make clean'. To also remove the
+Makefile(s), the header file containing system-dependent definitions
+(if the package uses one), and `config.status' (all the files that
+`configure' created), type `make distclean'.
+
+The file `configure.in' is used as a template to create `configure' by
+a program called `autoconf'. You will only need it if you want to
+regenerate `configure' using a newer version of `autoconf'.
diff --git a/tools/bison++/Makefile b/tools/bison++/Makefile
new file mode 100644
index 000000000..b7a87d09f
--- /dev/null
+++ b/tools/bison++/Makefile
@@ -0,0 +1,627 @@
+# Makefile.in generated automatically by automake 1.4-p6 from Makefile.am
+
+# Copyright (C) 1994, 1995-8, 1999, 2001 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+
+SHELL = /bin/sh
+
+srcdir = .
+top_srcdir = .
+
+prefix = /usr
+exec_prefix = ${prefix}
+
+bindir = ${exec_prefix}/bin
+sbindir = ${exec_prefix}/sbin
+libexecdir = ${exec_prefix}/libexec
+datadir = ${prefix}/share
+sysconfdir = ${prefix}/etc
+sharedstatedir = ${prefix}/com
+localstatedir = ${prefix}/var
+libdir = ${exec_prefix}/lib
+infodir = ${prefix}/share/info
+mandir = ${prefix}/share/man
+includedir = ${prefix}/include
+oldincludedir = /usr/include
+
+DESTDIR =
+
+pkgdatadir = $(datadir)/bison++
+pkglibdir = $(libdir)/bison++
+pkgincludedir = $(includedir)/bison++
+
+top_builddir = .
+
+ACLOCAL = aclocal
+AUTOCONF = autoconf
+AUTOMAKE = automake
+AUTOHEADER = autoheader
+
+INSTALL = /usr/bin/install -c
+INSTALL_PROGRAM = ${INSTALL} $(AM_INSTALL_PROGRAM_FLAGS)
+INSTALL_DATA = ${INSTALL} -m 644
+INSTALL_SCRIPT = ${INSTALL}
+transform = s,x,x,
+
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+AWK = mawk
+CC = gcc
+LN_S = ln -s
+MAKEINFO = makeinfo
+PACKAGE = bison++
+VERSION = 2.21.5
+
+bin_PROGRAMS = bison++
+AUTOMAKE_OPTIONS = foreign
+
+bison___SOURCES = closure.cc derives.cc getargs.cc getopt1.cc lalr.cc lr0.cc nullable.cc print.cc reduce.cc version.cc warshall.cc allocate.cc conflict.cc files.cc getopt.cc gram.cc lex.cc main.cc output.cc reader.cc symtab.cc old.c
+
+
+info_TEXINFOS = bison.texinfo
+man_MANS = bison++.1 bison.1 bison++.yacc.1
+pkgdata_DATA = bison.cc bison.hairy bison.h Example
+CXX = g++
+PFILE = bison.cc
+PFILE1 = bison.hairy
+HFILE = bison.h
+INCLUDES = -DXPFILE=\"$(datadir)/bison++/$(PFILE)\" -DXHFILE=\"$(datadir)/bison++/$(HFILE)\" -DXPFILE1=\"$(datadir)/bison++/$(PFILE1)\"
+
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
+CONFIG_CLEAN_FILES =
+PROGRAMS = $(bin_PROGRAMS)
+
+
+DEFS = -DPACKAGE_NAME=\"FULL-PACKAGE-NAME\" -DPACKAGE_TARNAME=\"full-package-name\" -DPACKAGE_VERSION=\"VERSION\" -DPACKAGE_STRING=\"FULL-PACKAGE-NAME\ VERSION\" -DPACKAGE_BUGREPORT=\"BUG-REPORT-ADDRESS\" -DPACKAGE=\"bison++\" -DVERSION=\"2.21.5\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_ALLOCA_H=1 -DHAVE_MALLOC_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STDDEF_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_STRINGS_H=1 -DHAVE_ALLOCA_H=1 -DHAVE_ALLOCA=1 -DHAVE_STDLIB_H=1 -DHAVE_MALLOC=1 -I. -I$(srcdir)
+CPPFLAGS =
+LDFLAGS =
+LIBS =
+bison___OBJECTS = closure.o derives.o getargs.o getopt1.o lalr.o lr0.o \
+nullable.o print.o reduce.o version.o warshall.o allocate.o conflict.o \
+files.o getopt.o gram.o lex.o main.o output.o reader.o symtab.o old.o
+bison___LDADD = $(LDADD)
+bison___DEPENDENCIES =
+bison___LDFLAGS =
+CXXFLAGS = -g -O2
+CXXCOMPILE = $(CXX) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(LDFLAGS) -o $@
+CFLAGS = -g -O2
+COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@
+TEXI2DVI = texi2dvi
+INFO_DEPS = bison.info
+DVIS = bison.dvi
+TEXINFOS = bison.texinfo
+man1dir = $(mandir)/man1
+MANS = $(man_MANS)
+
+NROFF = nroff
+DATA = $(pkgdata_DATA)
+
+DIST_COMMON = COPYING ChangeLog INSTALL Makefile.am Makefile.in \
+aclocal.m4 configure configure.in install-sh mdate-sh missing \
+mkinstalldirs stamp-vti texinfo.tex version.texi
+
+
+DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
+
+TAR = tar
+GZIP_ENV = --best
+DEP_FILES = .deps/allocate.P .deps/closure.P .deps/conflict.P \
+.deps/derives.P .deps/files.P .deps/getargs.P .deps/getopt.P \
+.deps/getopt1.P .deps/gram.P .deps/lalr.P .deps/lex.P .deps/lr0.P \
+.deps/main.P .deps/nullable.P .deps/old.P .deps/output.P .deps/print.P \
+.deps/reader.P .deps/reduce.P .deps/symtab.P .deps/version.P \
+.deps/warshall.P
+SOURCES = $(bison___SOURCES)
+OBJECTS = $(bison___OBJECTS)
+
+all: all-redirect
+.SUFFIXES:
+.SUFFIXES: .S .c .cc .dvi .info .o .ps .s .texi .texinfo .txi
+$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status $(BUILT_SOURCES)
+ cd $(top_builddir) \
+ && CONFIG_FILES=$@ CONFIG_HEADERS= $(SHELL) ./config.status
+
+$(ACLOCAL_M4): configure.in
+ cd $(srcdir) && $(ACLOCAL)
+
+config.status: $(srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+$(srcdir)/configure: $(srcdir)/configure.in $(ACLOCAL_M4) $(CONFIGURE_DEPENDENCIES)
+ cd $(srcdir) && $(AUTOCONF)
+
+mostlyclean-binPROGRAMS:
+
+clean-binPROGRAMS:
+ -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS)
+
+distclean-binPROGRAMS:
+
+maintainer-clean-binPROGRAMS:
+
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(bindir)
+ @list='$(bin_PROGRAMS)'; for p in $$list; do \
+ if test -f $$p; then \
+ echo " $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`"; \
+ $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`; \
+ else :; fi; \
+ done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ list='$(bin_PROGRAMS)'; for p in $$list; do \
+ rm -f $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`; \
+ done
+
+.s.o:
+ $(COMPILE) -c $<
+
+.S.o:
+ $(COMPILE) -c $<
+
+mostlyclean-compile:
+ -rm -f *.o core *.core
+
+clean-compile:
+
+distclean-compile:
+ -rm -f *.tab.c
+
+maintainer-clean-compile:
+
+bison++: $(bison___OBJECTS) $(bison___DEPENDENCIES)
+ @rm -f bison++
+ $(CXXLINK) $(bison___LDFLAGS) $(bison___OBJECTS) $(bison___LDADD) $(LIBS)
+.cc.o:
+ $(CXXCOMPILE) -c $<
+
+$(srcdir)/version.texi: stamp-vti
+ @:
+
+$(srcdir)/stamp-vti: bison.texinfo $(top_srcdir)/configure.in
+ @echo "@set UPDATED `$(SHELL) $(srcdir)/mdate-sh $(srcdir)/bison.texinfo`" > vti.tmp
+ @echo "@set EDITION $(VERSION)" >> vti.tmp
+ @echo "@set VERSION $(VERSION)" >> vti.tmp
+ @cmp -s vti.tmp $(srcdir)/version.texi \
+ || (echo "Updating $(srcdir)/version.texi"; \
+ cp vti.tmp $(srcdir)/version.texi)
+ -@rm -f vti.tmp
+ @cp $(srcdir)/version.texi $@
+
+mostlyclean-vti:
+ -rm -f vti.tmp
+
+clean-vti:
+
+distclean-vti:
+
+maintainer-clean-vti:
+ -rm -f $(srcdir)/stamp-vti $(srcdir)/version.texi
+
+bison.info: bison.texinfo version.texi
+bison.dvi: bison.texinfo version.texi
+
+
+DVIPS = dvips
+
+.texi.info:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texi.dvi:
+ TEXINPUTS=.:$$TEXINPUTS \
+ MAKEINFO='$(MAKEINFO) -I $(srcdir)' $(TEXI2DVI) $<
+
+.texi:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texinfo.info:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texinfo:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texinfo.dvi:
+ TEXINPUTS=.:$$TEXINPUTS \
+ MAKEINFO='$(MAKEINFO) -I $(srcdir)' $(TEXI2DVI) $<
+
+.txi.info:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.txi.dvi:
+ TEXINPUTS=.:$$TEXINPUTS \
+ MAKEINFO='$(MAKEINFO) -I $(srcdir)' $(TEXI2DVI) $<
+
+.txi:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+.dvi.ps:
+ $(DVIPS) $< -o $@
+
+install-info-am: $(INFO_DEPS)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(infodir)
+ @list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ d=$(srcdir); \
+ for ifile in `cd $$d && echo $$file $$file-[0-9] $$file-[0-9][0-9]`; do \
+ if test -f $$d/$$ifile; then \
+ echo " $(INSTALL_DATA) $$d/$$ifile $(DESTDIR)$(infodir)/$$ifile"; \
+ $(INSTALL_DATA) $$d/$$ifile $(DESTDIR)$(infodir)/$$ifile; \
+ else : ; fi; \
+ done; \
+ done
+ @$(POST_INSTALL)
+ @if $(SHELL) -c 'install-info --version | sed 1q | fgrep -s -v -i debian' >/dev/null 2>&1; then \
+ list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ echo " install-info --info-dir=$(DESTDIR)$(infodir) $(DESTDIR)$(infodir)/$$file";\
+ install-info --info-dir=$(DESTDIR)$(infodir) $(DESTDIR)$(infodir)/$$file || :;\
+ done; \
+ else : ; fi
+
+uninstall-info:
+ $(PRE_UNINSTALL)
+ @if $(SHELL) -c 'install-info --version | sed 1q | fgrep -s -v -i debian' >/dev/null 2>&1; then \
+ ii=yes; \
+ else ii=; fi; \
+ list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ test -z "$$ii" \
+ || install-info --info-dir=$(DESTDIR)$(infodir) --remove $$file; \
+ done
+ @$(NORMAL_UNINSTALL)
+ list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ (cd $(DESTDIR)$(infodir) && rm -f $$file $$file-[0-9] $$file-[0-9][0-9]); \
+ done
+
+dist-info: $(INFO_DEPS)
+ list='$(INFO_DEPS)'; \
+ for base in $$list; do \
+ d=$(srcdir); \
+ for file in `cd $$d && eval echo $$base*`; do \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file; \
+ done; \
+ done
+
+mostlyclean-aminfo:
+ -rm -f bison.aux bison.cp bison.cps bison.dvi bison.fn bison.fns \
+ bison.ky bison.kys bison.ps bison.log bison.pg bison.toc \
+ bison.tp bison.tps bison.vr bison.vrs bison.op bison.tr \
+ bison.cv bison.cn
+
+clean-aminfo:
+
+distclean-aminfo:
+
+maintainer-clean-aminfo:
+ cd $(srcdir) && for i in $(INFO_DEPS); do \
+ rm -f $$i; \
+ if test "`echo $$i-[0-9]*`" != "$$i-[0-9]*"; then \
+ rm -f $$i-[0-9]*; \
+ fi; \
+ done
+
+install-man1:
+ $(mkinstalldirs) $(DESTDIR)$(man1dir)
+ @list='$(man1_MANS)'; \
+ l2='$(man_MANS)'; for i in $$l2; do \
+ case "$$i" in \
+ *.1*) list="$$list $$i" ;; \
+ esac; \
+ done; \
+ for i in $$list; do \
+ if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
+ else file=$$i; fi; \
+ ext=`echo $$i | sed -e 's/^.*\\.//'`; \
+ inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
+ inst=`echo $$inst | sed '$(transform)'`.$$ext; \
+ echo " $(INSTALL_DATA) $$file $(DESTDIR)$(man1dir)/$$inst"; \
+ $(INSTALL_DATA) $$file $(DESTDIR)$(man1dir)/$$inst; \
+ done
+
+uninstall-man1:
+ @list='$(man1_MANS)'; \
+ l2='$(man_MANS)'; for i in $$l2; do \
+ case "$$i" in \
+ *.1*) list="$$list $$i" ;; \
+ esac; \
+ done; \
+ for i in $$list; do \
+ ext=`echo $$i | sed -e 's/^.*\\.//'`; \
+ inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
+ inst=`echo $$inst | sed '$(transform)'`.$$ext; \
+ echo " rm -f $(DESTDIR)$(man1dir)/$$inst"; \
+ rm -f $(DESTDIR)$(man1dir)/$$inst; \
+ done
+install-man: $(MANS)
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-man1
+uninstall-man:
+ @$(NORMAL_UNINSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) uninstall-man1
+
+install-pkgdataDATA: $(pkgdata_DATA)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)
+ @list='$(pkgdata_DATA)'; for p in $$list; do \
+ if test -f $(srcdir)/$$p; then \
+ echo " $(INSTALL_DATA) $(srcdir)/$$p $(DESTDIR)$(pkgdatadir)/$$p"; \
+ $(INSTALL_DATA) $(srcdir)/$$p $(DESTDIR)$(pkgdatadir)/$$p; \
+ else if test -f $$p; then \
+ echo " $(INSTALL_DATA) $$p $(DESTDIR)$(pkgdatadir)/$$p"; \
+ $(INSTALL_DATA) $$p $(DESTDIR)$(pkgdatadir)/$$p; \
+ fi; fi; \
+ done
+
+uninstall-pkgdataDATA:
+ @$(NORMAL_UNINSTALL)
+ list='$(pkgdata_DATA)'; for p in $$list; do \
+ rm -f $(DESTDIR)$(pkgdatadir)/$$p; \
+ done
+
+tags: TAGS
+
+ID: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ here=`pwd` && cd $(srcdir) \
+ && mkid -f$$here/ID $$unique $(LISP)
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
+ || (cd $(srcdir) && etags -o $$here/TAGS $(ETAGS_ARGS) $$tags $$unique $(LISP))
+
+mostlyclean-tags:
+
+clean-tags:
+
+distclean-tags:
+ -rm -f TAGS ID
+
+maintainer-clean-tags:
+
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ -rm -rf $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) zxf $(distdir).tar.gz
+ mkdir $(distdir)/=build
+ mkdir $(distdir)/=inst
+ dc_install_base=`cd $(distdir)/=inst && pwd`; \
+ cd $(distdir)/=build \
+ && ../configure --srcdir=.. --prefix=$$dc_install_base \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) dist
+ -rm -rf $(distdir)
+ @banner="$(distdir).tar.gz is ready for distribution"; \
+ dashes=`echo "$$banner" | sed s/./=/g`; \
+ echo "$$dashes"; \
+ echo "$$banner"; \
+ echo "$$dashes"
+dist: distdir
+ -chmod -R a+r $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) chozf $(distdir).tar.gz $(distdir)
+ -rm -rf $(distdir)
+dist-all: distdir
+ -chmod -R a+r $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) chozf $(distdir).tar.gz $(distdir)
+ -rm -rf $(distdir)
+distdir: $(DISTFILES)
+ -rm -rf $(distdir)
+ mkdir $(distdir)
+ -chmod 777 $(distdir)
+ here=`cd $(top_builddir) && pwd`; \
+ top_distdir=`cd $(distdir) && pwd`; \
+ distdir=`cd $(distdir) && pwd`; \
+ cd $(top_srcdir) \
+ && $(AUTOMAKE) --include-deps --build-dir=$$here --srcdir-name=$(top_srcdir) --output-dir=$$top_distdir --foreign Makefile
+ @for file in $(DISTFILES); do \
+ d=$(srcdir); \
+ if test -d $$d/$$file; then \
+ cp -pr $$d/$$file $(distdir)/$$file; \
+ else \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file || :; \
+ fi; \
+ done
+ $(MAKE) $(AM_MAKEFLAGS) top_distdir="$(top_distdir)" distdir="$(distdir)" dist-info
+
+DEPS_MAGIC := $(shell mkdir .deps > /dev/null 2>&1 || :)
+
+-include $(DEP_FILES)
+
+mostlyclean-depend:
+
+clean-depend:
+
+distclean-depend:
+ -rm -rf .deps
+
+maintainer-clean-depend:
+
+%.o: %.c
+ @echo '$(COMPILE) -c $<'; \
+ $(COMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-cp .deps/$(*F).pp .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm .deps/$(*F).pp
+
+%.lo: %.c
+ @echo '$(LTCOMPILE) -c $<'; \
+ $(LTCOMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-sed -e 's/^\([^:]*\)\.o[ ]*:/\1.lo \1.o :/' \
+ < .deps/$(*F).pp > .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm -f .deps/$(*F).pp
+
+%.o: %.cc
+ @echo '$(CXXCOMPILE) -c $<'; \
+ $(CXXCOMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-cp .deps/$(*F).pp .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm .deps/$(*F).pp
+
+%.lo: %.cc
+ @echo '$(LTCXXCOMPILE) -c $<'; \
+ $(LTCXXCOMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-sed -e 's/^\([^:]*\)\.o[ ]*:/\1.lo \1.o :/' \
+ < .deps/$(*F).pp > .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm -f .deps/$(*F).pp
+info-am: $(INFO_DEPS)
+info: info-am
+dvi-am: $(DVIS)
+dvi: dvi-am
+check-am: all-am
+check: check-am
+installcheck-am:
+installcheck: installcheck-am
+install-exec-am: install-binPROGRAMS
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-exec-hook
+install-exec: install-exec-am
+
+install-data-am: install-info-am install-man install-pkgdataDATA
+install-data: install-data-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+install: install-am
+uninstall-am: uninstall-binPROGRAMS uninstall-info uninstall-man \
+ uninstall-pkgdataDATA
+uninstall: uninstall-am
+all-am: Makefile $(INFO_DEPS) $(PROGRAMS) $(MANS) $(DATA)
+all-redirect: all-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
+installdirs:
+ $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(infodir) \
+ $(DESTDIR)$(mandir)/man1 $(DESTDIR)$(pkgdatadir)
+
+
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -rm -f Makefile $(CONFIG_CLEAN_FILES)
+ -rm -f config.cache config.log stamp-h stamp-h[0-9]*
+
+maintainer-clean-generic:
+mostlyclean-am: mostlyclean-binPROGRAMS mostlyclean-compile \
+ mostlyclean-vti mostlyclean-aminfo mostlyclean-tags \
+ mostlyclean-depend mostlyclean-generic
+
+mostlyclean: mostlyclean-am
+
+clean-am: clean-binPROGRAMS clean-compile clean-vti clean-aminfo \
+ clean-tags clean-depend clean-generic mostlyclean-am
+
+clean: clean-am
+
+distclean-am: distclean-binPROGRAMS distclean-compile distclean-vti \
+ distclean-aminfo distclean-tags distclean-depend \
+ distclean-generic clean-am
+
+distclean: distclean-am
+ -rm -f config.status
+
+maintainer-clean-am: maintainer-clean-binPROGRAMS \
+ maintainer-clean-compile maintainer-clean-vti \
+ maintainer-clean-aminfo maintainer-clean-tags \
+ maintainer-clean-depend maintainer-clean-generic \
+ distclean-am
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+
+maintainer-clean: maintainer-clean-am
+ -rm -f config.status
+
+.PHONY: mostlyclean-binPROGRAMS distclean-binPROGRAMS clean-binPROGRAMS \
+maintainer-clean-binPROGRAMS uninstall-binPROGRAMS install-binPROGRAMS \
+mostlyclean-compile distclean-compile clean-compile \
+maintainer-clean-compile mostlyclean-vti distclean-vti clean-vti \
+maintainer-clean-vti install-info-am uninstall-info mostlyclean-aminfo \
+distclean-aminfo clean-aminfo maintainer-clean-aminfo install-man1 \
+uninstall-man1 install-man uninstall-man uninstall-pkgdataDATA \
+install-pkgdataDATA tags mostlyclean-tags distclean-tags clean-tags \
+maintainer-clean-tags distdir mostlyclean-depend distclean-depend \
+clean-depend maintainer-clean-depend info-am info dvi-am dvi check \
+check-am installcheck-am installcheck install-exec-am install-exec \
+install-data-am install-data install-am install uninstall-am uninstall \
+all-redirect all-am all installdirs mostlyclean-generic \
+distclean-generic clean-generic maintainer-clean-generic clean \
+mostlyclean distclean maintainer-clean
+
+
+install-exec-hook:
+ cp bison $(bindir)
+ cp bison++.yacc $(bindir)
+
+uninstall-hook:
+ rm $(bindir)/bison++.yacc
+ rm $(bindir)/bison
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/tools/bison++/Makefile.am b/tools/bison++/Makefile.am
new file mode 100644
index 000000000..b25b23b2f
--- /dev/null
+++ b/tools/bison++/Makefile.am
@@ -0,0 +1,49 @@
+bin_PROGRAMS = bison++
+AUTOMAKE_OPTIONS = foreign
+
+bison___SOURCES = closure.cc \
+ derives.cc \
+ getargs.cc \
+ getopt1.cc \
+ lalr.cc \
+ lr0.cc \
+ nullable.cc \
+ print.cc \
+ reduce.cc \
+ version.cc \
+ warshall.cc \
+ allocate.cc \
+ conflict.cc \
+ files.cc \
+ getopt.cc \
+ gram.cc \
+ lex.cc \
+ main.cc \
+ output.cc \
+ reader.cc \
+ symtab.cc\
+ old.c
+
+info_TEXINFOS = bison.texinfo
+man_MANS = bison++.1 bison.1 bison++.yacc.1
+pkgdata_DATA = bison.cc bison.hairy bison.h Example
+CXX=g++
+PFILE = bison.cc
+PFILE1 = bison.hairy
+HFILE = bison.h
+INCLUDES = -DXPFILE=\"$(datadir)/bison++/$(PFILE)\" \
+ -DXHFILE=\"$(datadir)/bison++/$(HFILE)\" \
+ -DXPFILE1=\"$(datadir)/bison++/$(PFILE1)\"
+
+install-exec-hook:
+ cp bison $(bindir)
+ cp bison++.yacc $(bindir)
+
+uninstall-hook:
+ rm $(bindir)/bison++.yacc
+ rm $(bindir)/bison
+
+
+
+
+
diff --git a/tools/bison++/Makefile.in b/tools/bison++/Makefile.in
new file mode 100644
index 000000000..426e77a57
--- /dev/null
+++ b/tools/bison++/Makefile.in
@@ -0,0 +1,627 @@
+# Makefile.in generated automatically by automake 1.4-p6 from Makefile.am
+
+# Copyright (C) 1994, 1995-8, 1999, 2001 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+
+SHELL = @SHELL@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+datadir = @datadir@
+sysconfdir = @sysconfdir@
+sharedstatedir = @sharedstatedir@
+localstatedir = @localstatedir@
+libdir = @libdir@
+infodir = @infodir@
+mandir = @mandir@
+includedir = @includedir@
+oldincludedir = /usr/include
+
+DESTDIR =
+
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+
+top_builddir = .
+
+ACLOCAL = @ACLOCAL@
+AUTOCONF = @AUTOCONF@
+AUTOMAKE = @AUTOMAKE@
+AUTOHEADER = @AUTOHEADER@
+
+INSTALL = @INSTALL@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+transform = @program_transform_name@
+
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+AWK = @AWK@
+CC = @CC@
+LN_S = @LN_S@
+MAKEINFO = @MAKEINFO@
+PACKAGE = @PACKAGE@
+VERSION = @VERSION@
+
+bin_PROGRAMS = bison++
+AUTOMAKE_OPTIONS = foreign
+
+bison___SOURCES = closure.cc derives.cc getargs.cc getopt1.cc lalr.cc lr0.cc nullable.cc print.cc reduce.cc version.cc warshall.cc allocate.cc conflict.cc files.cc getopt.cc gram.cc lex.cc main.cc output.cc reader.cc symtab.cc old.c
+
+
+info_TEXINFOS = bison.texinfo
+man_MANS = bison++.1 bison.1 bison++.yacc.1
+pkgdata_DATA = bison.cc bison.hairy bison.h Example
+CXX = g++
+PFILE = bison.cc
+PFILE1 = bison.hairy
+HFILE = bison.h
+INCLUDES = -DXPFILE=\"$(datadir)/bison++/$(PFILE)\" -DXHFILE=\"$(datadir)/bison++/$(HFILE)\" -DXPFILE1=\"$(datadir)/bison++/$(PFILE1)\"
+
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
+CONFIG_CLEAN_FILES =
+PROGRAMS = $(bin_PROGRAMS)
+
+
+DEFS = @DEFS@ -I. -I$(srcdir)
+CPPFLAGS = @CPPFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+bison___OBJECTS = closure.o derives.o getargs.o getopt1.o lalr.o lr0.o \
+nullable.o print.o reduce.o version.o warshall.o allocate.o conflict.o \
+files.o getopt.o gram.o lex.o main.o output.o reader.o symtab.o old.o
+bison___LDADD = $(LDADD)
+bison___DEPENDENCIES =
+bison___LDFLAGS =
+CXXFLAGS = @CXXFLAGS@
+CXXCOMPILE = $(CXX) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(LDFLAGS) -o $@
+CFLAGS = @CFLAGS@
+COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@
+TEXI2DVI = texi2dvi
+INFO_DEPS = bison.info
+DVIS = bison.dvi
+TEXINFOS = bison.texinfo
+man1dir = $(mandir)/man1
+MANS = $(man_MANS)
+
+NROFF = nroff
+DATA = $(pkgdata_DATA)
+
+DIST_COMMON = COPYING ChangeLog INSTALL Makefile.am Makefile.in \
+aclocal.m4 configure configure.in install-sh mdate-sh missing \
+mkinstalldirs stamp-vti texinfo.tex version.texi
+
+
+DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
+
+TAR = tar
+GZIP_ENV = --best
+DEP_FILES = .deps/allocate.P .deps/closure.P .deps/conflict.P \
+.deps/derives.P .deps/files.P .deps/getargs.P .deps/getopt.P \
+.deps/getopt1.P .deps/gram.P .deps/lalr.P .deps/lex.P .deps/lr0.P \
+.deps/main.P .deps/nullable.P .deps/old.P .deps/output.P .deps/print.P \
+.deps/reader.P .deps/reduce.P .deps/symtab.P .deps/version.P \
+.deps/warshall.P
+SOURCES = $(bison___SOURCES)
+OBJECTS = $(bison___OBJECTS)
+
+all: all-redirect
+.SUFFIXES:
+.SUFFIXES: .S .c .cc .dvi .info .o .ps .s .texi .texinfo .txi
+$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status $(BUILT_SOURCES)
+ cd $(top_builddir) \
+ && CONFIG_FILES=$@ CONFIG_HEADERS= $(SHELL) ./config.status
+
+$(ACLOCAL_M4): configure.in
+ cd $(srcdir) && $(ACLOCAL)
+
+config.status: $(srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+$(srcdir)/configure: $(srcdir)/configure.in $(ACLOCAL_M4) $(CONFIGURE_DEPENDENCIES)
+ cd $(srcdir) && $(AUTOCONF)
+
+mostlyclean-binPROGRAMS:
+
+clean-binPROGRAMS:
+ -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS)
+
+distclean-binPROGRAMS:
+
+maintainer-clean-binPROGRAMS:
+
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(bindir)
+ @list='$(bin_PROGRAMS)'; for p in $$list; do \
+ if test -f $$p; then \
+ echo " $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`"; \
+ $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`; \
+ else :; fi; \
+ done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ list='$(bin_PROGRAMS)'; for p in $$list; do \
+ rm -f $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`; \
+ done
+
+.s.o:
+ $(COMPILE) -c $<
+
+.S.o:
+ $(COMPILE) -c $<
+
+mostlyclean-compile:
+ -rm -f *.o core *.core
+
+clean-compile:
+
+distclean-compile:
+ -rm -f *.tab.c
+
+maintainer-clean-compile:
+
+bison++: $(bison___OBJECTS) $(bison___DEPENDENCIES)
+ @rm -f bison++
+ $(CXXLINK) $(bison___LDFLAGS) $(bison___OBJECTS) $(bison___LDADD) $(LIBS)
+.cc.o:
+ $(CXXCOMPILE) -c $<
+
+$(srcdir)/version.texi: stamp-vti
+ @:
+
+$(srcdir)/stamp-vti: bison.texinfo $(top_srcdir)/configure.in
+ @echo "@set UPDATED `$(SHELL) $(srcdir)/mdate-sh $(srcdir)/bison.texinfo`" > vti.tmp
+ @echo "@set EDITION $(VERSION)" >> vti.tmp
+ @echo "@set VERSION $(VERSION)" >> vti.tmp
+ @cmp -s vti.tmp $(srcdir)/version.texi \
+ || (echo "Updating $(srcdir)/version.texi"; \
+ cp vti.tmp $(srcdir)/version.texi)
+ -@rm -f vti.tmp
+ @cp $(srcdir)/version.texi $@
+
+mostlyclean-vti:
+ -rm -f vti.tmp
+
+clean-vti:
+
+distclean-vti:
+
+maintainer-clean-vti:
+ -rm -f $(srcdir)/stamp-vti $(srcdir)/version.texi
+
+bison.info: bison.texinfo version.texi
+bison.dvi: bison.texinfo version.texi
+
+
+DVIPS = dvips
+
+.texi.info:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texi.dvi:
+ TEXINPUTS=.:$$TEXINPUTS \
+ MAKEINFO='$(MAKEINFO) -I $(srcdir)' $(TEXI2DVI) $<
+
+.texi:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texinfo.info:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texinfo:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.texinfo.dvi:
+ TEXINPUTS=.:$$TEXINPUTS \
+ MAKEINFO='$(MAKEINFO) -I $(srcdir)' $(TEXI2DVI) $<
+
+.txi.info:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+
+.txi.dvi:
+ TEXINPUTS=.:$$TEXINPUTS \
+ MAKEINFO='$(MAKEINFO) -I $(srcdir)' $(TEXI2DVI) $<
+
+.txi:
+ @cd $(srcdir) && rm -f $@ $@-[0-9] $@-[0-9][0-9]
+ cd $(srcdir) \
+ && $(MAKEINFO) `echo $< | sed 's,.*/,,'`
+.dvi.ps:
+ $(DVIPS) $< -o $@
+
+install-info-am: $(INFO_DEPS)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(infodir)
+ @list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ d=$(srcdir); \
+ for ifile in `cd $$d && echo $$file $$file-[0-9] $$file-[0-9][0-9]`; do \
+ if test -f $$d/$$ifile; then \
+ echo " $(INSTALL_DATA) $$d/$$ifile $(DESTDIR)$(infodir)/$$ifile"; \
+ $(INSTALL_DATA) $$d/$$ifile $(DESTDIR)$(infodir)/$$ifile; \
+ else : ; fi; \
+ done; \
+ done
+ @$(POST_INSTALL)
+ @if $(SHELL) -c 'install-info --version | sed 1q | fgrep -s -v -i debian' >/dev/null 2>&1; then \
+ list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ echo " install-info --info-dir=$(DESTDIR)$(infodir) $(DESTDIR)$(infodir)/$$file";\
+ install-info --info-dir=$(DESTDIR)$(infodir) $(DESTDIR)$(infodir)/$$file || :;\
+ done; \
+ else : ; fi
+
+uninstall-info:
+ $(PRE_UNINSTALL)
+ @if $(SHELL) -c 'install-info --version | sed 1q | fgrep -s -v -i debian' >/dev/null 2>&1; then \
+ ii=yes; \
+ else ii=; fi; \
+ list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ test -z "$$ii" \
+ || install-info --info-dir=$(DESTDIR)$(infodir) --remove $$file; \
+ done
+ @$(NORMAL_UNINSTALL)
+ list='$(INFO_DEPS)'; \
+ for file in $$list; do \
+ (cd $(DESTDIR)$(infodir) && rm -f $$file $$file-[0-9] $$file-[0-9][0-9]); \
+ done
+
+dist-info: $(INFO_DEPS)
+ list='$(INFO_DEPS)'; \
+ for base in $$list; do \
+ d=$(srcdir); \
+ for file in `cd $$d && eval echo $$base*`; do \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file; \
+ done; \
+ done
+
+mostlyclean-aminfo:
+ -rm -f bison.aux bison.cp bison.cps bison.dvi bison.fn bison.fns \
+ bison.ky bison.kys bison.ps bison.log bison.pg bison.toc \
+ bison.tp bison.tps bison.vr bison.vrs bison.op bison.tr \
+ bison.cv bison.cn
+
+clean-aminfo:
+
+distclean-aminfo:
+
+maintainer-clean-aminfo:
+ cd $(srcdir) && for i in $(INFO_DEPS); do \
+ rm -f $$i; \
+ if test "`echo $$i-[0-9]*`" != "$$i-[0-9]*"; then \
+ rm -f $$i-[0-9]*; \
+ fi; \
+ done
+
+install-man1:
+ $(mkinstalldirs) $(DESTDIR)$(man1dir)
+ @list='$(man1_MANS)'; \
+ l2='$(man_MANS)'; for i in $$l2; do \
+ case "$$i" in \
+ *.1*) list="$$list $$i" ;; \
+ esac; \
+ done; \
+ for i in $$list; do \
+ if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
+ else file=$$i; fi; \
+ ext=`echo $$i | sed -e 's/^.*\\.//'`; \
+ inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
+ inst=`echo $$inst | sed '$(transform)'`.$$ext; \
+ echo " $(INSTALL_DATA) $$file $(DESTDIR)$(man1dir)/$$inst"; \
+ $(INSTALL_DATA) $$file $(DESTDIR)$(man1dir)/$$inst; \
+ done
+
+uninstall-man1:
+ @list='$(man1_MANS)'; \
+ l2='$(man_MANS)'; for i in $$l2; do \
+ case "$$i" in \
+ *.1*) list="$$list $$i" ;; \
+ esac; \
+ done; \
+ for i in $$list; do \
+ ext=`echo $$i | sed -e 's/^.*\\.//'`; \
+ inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
+ inst=`echo $$inst | sed '$(transform)'`.$$ext; \
+ echo " rm -f $(DESTDIR)$(man1dir)/$$inst"; \
+ rm -f $(DESTDIR)$(man1dir)/$$inst; \
+ done
+install-man: $(MANS)
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-man1
+uninstall-man:
+ @$(NORMAL_UNINSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) uninstall-man1
+
+install-pkgdataDATA: $(pkgdata_DATA)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)
+ @list='$(pkgdata_DATA)'; for p in $$list; do \
+ if test -f $(srcdir)/$$p; then \
+ echo " $(INSTALL_DATA) $(srcdir)/$$p $(DESTDIR)$(pkgdatadir)/$$p"; \
+ $(INSTALL_DATA) $(srcdir)/$$p $(DESTDIR)$(pkgdatadir)/$$p; \
+ else if test -f $$p; then \
+ echo " $(INSTALL_DATA) $$p $(DESTDIR)$(pkgdatadir)/$$p"; \
+ $(INSTALL_DATA) $$p $(DESTDIR)$(pkgdatadir)/$$p; \
+ fi; fi; \
+ done
+
+uninstall-pkgdataDATA:
+ @$(NORMAL_UNINSTALL)
+ list='$(pkgdata_DATA)'; for p in $$list; do \
+ rm -f $(DESTDIR)$(pkgdatadir)/$$p; \
+ done
+
+tags: TAGS
+
+ID: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ here=`pwd` && cd $(srcdir) \
+ && mkid -f$$here/ID $$unique $(LISP)
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
+ || (cd $(srcdir) && etags -o $$here/TAGS $(ETAGS_ARGS) $$tags $$unique $(LISP))
+
+mostlyclean-tags:
+
+clean-tags:
+
+distclean-tags:
+ -rm -f TAGS ID
+
+maintainer-clean-tags:
+
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ -rm -rf $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) zxf $(distdir).tar.gz
+ mkdir $(distdir)/=build
+ mkdir $(distdir)/=inst
+ dc_install_base=`cd $(distdir)/=inst && pwd`; \
+ cd $(distdir)/=build \
+ && ../configure --srcdir=.. --prefix=$$dc_install_base \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) dist
+ -rm -rf $(distdir)
+ @banner="$(distdir).tar.gz is ready for distribution"; \
+ dashes=`echo "$$banner" | sed s/./=/g`; \
+ echo "$$dashes"; \
+ echo "$$banner"; \
+ echo "$$dashes"
+dist: distdir
+ -chmod -R a+r $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) chozf $(distdir).tar.gz $(distdir)
+ -rm -rf $(distdir)
+dist-all: distdir
+ -chmod -R a+r $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) chozf $(distdir).tar.gz $(distdir)
+ -rm -rf $(distdir)
+distdir: $(DISTFILES)
+ -rm -rf $(distdir)
+ mkdir $(distdir)
+ -chmod 777 $(distdir)
+ here=`cd $(top_builddir) && pwd`; \
+ top_distdir=`cd $(distdir) && pwd`; \
+ distdir=`cd $(distdir) && pwd`; \
+ cd $(top_srcdir) \
+ && $(AUTOMAKE) --include-deps --build-dir=$$here --srcdir-name=$(top_srcdir) --output-dir=$$top_distdir --foreign Makefile
+ @for file in $(DISTFILES); do \
+ d=$(srcdir); \
+ if test -d $$d/$$file; then \
+ cp -pr $$d/$$file $(distdir)/$$file; \
+ else \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file || :; \
+ fi; \
+ done
+ $(MAKE) $(AM_MAKEFLAGS) top_distdir="$(top_distdir)" distdir="$(distdir)" dist-info
+
+DEPS_MAGIC := $(shell mkdir .deps > /dev/null 2>&1 || :)
+
+-include $(DEP_FILES)
+
+mostlyclean-depend:
+
+clean-depend:
+
+distclean-depend:
+ -rm -rf .deps
+
+maintainer-clean-depend:
+
+%.o: %.c
+ @echo '$(COMPILE) -c $<'; \
+ $(COMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-cp .deps/$(*F).pp .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm .deps/$(*F).pp
+
+%.lo: %.c
+ @echo '$(LTCOMPILE) -c $<'; \
+ $(LTCOMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-sed -e 's/^\([^:]*\)\.o[ ]*:/\1.lo \1.o :/' \
+ < .deps/$(*F).pp > .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm -f .deps/$(*F).pp
+
+%.o: %.cc
+ @echo '$(CXXCOMPILE) -c $<'; \
+ $(CXXCOMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-cp .deps/$(*F).pp .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm .deps/$(*F).pp
+
+%.lo: %.cc
+ @echo '$(LTCXXCOMPILE) -c $<'; \
+ $(LTCXXCOMPILE) -Wp,-MD,.deps/$(*F).pp -c $<
+ @-sed -e 's/^\([^:]*\)\.o[ ]*:/\1.lo \1.o :/' \
+ < .deps/$(*F).pp > .deps/$(*F).P; \
+ tr ' ' '\012' < .deps/$(*F).pp \
+ | sed -e 's/^\\$$//' -e '/^$$/ d' -e '/:$$/ d' -e 's/$$/ :/' \
+ >> .deps/$(*F).P; \
+ rm -f .deps/$(*F).pp
+info-am: $(INFO_DEPS)
+info: info-am
+dvi-am: $(DVIS)
+dvi: dvi-am
+check-am: all-am
+check: check-am
+installcheck-am:
+installcheck: installcheck-am
+install-exec-am: install-binPROGRAMS
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-exec-hook
+install-exec: install-exec-am
+
+install-data-am: install-info-am install-man install-pkgdataDATA
+install-data: install-data-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+install: install-am
+uninstall-am: uninstall-binPROGRAMS uninstall-info uninstall-man \
+ uninstall-pkgdataDATA
+uninstall: uninstall-am
+all-am: Makefile $(INFO_DEPS) $(PROGRAMS) $(MANS) $(DATA)
+all-redirect: all-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
+installdirs:
+ $(mkinstalldirs) $(DESTDIR)$(bindir) $(DESTDIR)$(infodir) \
+ $(DESTDIR)$(mandir)/man1 $(DESTDIR)$(pkgdatadir)
+
+
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -rm -f Makefile $(CONFIG_CLEAN_FILES)
+ -rm -f config.cache config.log stamp-h stamp-h[0-9]*
+
+maintainer-clean-generic:
+mostlyclean-am: mostlyclean-binPROGRAMS mostlyclean-compile \
+ mostlyclean-vti mostlyclean-aminfo mostlyclean-tags \
+ mostlyclean-depend mostlyclean-generic
+
+mostlyclean: mostlyclean-am
+
+clean-am: clean-binPROGRAMS clean-compile clean-vti clean-aminfo \
+ clean-tags clean-depend clean-generic mostlyclean-am
+
+clean: clean-am
+
+distclean-am: distclean-binPROGRAMS distclean-compile distclean-vti \
+ distclean-aminfo distclean-tags distclean-depend \
+ distclean-generic clean-am
+
+distclean: distclean-am
+ -rm -f config.status
+
+maintainer-clean-am: maintainer-clean-binPROGRAMS \
+ maintainer-clean-compile maintainer-clean-vti \
+ maintainer-clean-aminfo maintainer-clean-tags \
+ maintainer-clean-depend maintainer-clean-generic \
+ distclean-am
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+
+maintainer-clean: maintainer-clean-am
+ -rm -f config.status
+
+.PHONY: mostlyclean-binPROGRAMS distclean-binPROGRAMS clean-binPROGRAMS \
+maintainer-clean-binPROGRAMS uninstall-binPROGRAMS install-binPROGRAMS \
+mostlyclean-compile distclean-compile clean-compile \
+maintainer-clean-compile mostlyclean-vti distclean-vti clean-vti \
+maintainer-clean-vti install-info-am uninstall-info mostlyclean-aminfo \
+distclean-aminfo clean-aminfo maintainer-clean-aminfo install-man1 \
+uninstall-man1 install-man uninstall-man uninstall-pkgdataDATA \
+install-pkgdataDATA tags mostlyclean-tags distclean-tags clean-tags \
+maintainer-clean-tags distdir mostlyclean-depend distclean-depend \
+clean-depend maintainer-clean-depend info-am info dvi-am dvi check \
+check-am installcheck-am installcheck install-exec-am install-exec \
+install-data-am install-data install-am install uninstall-am uninstall \
+all-redirect all-am all installdirs mostlyclean-generic \
+distclean-generic clean-generic maintainer-clean-generic clean \
+mostlyclean distclean maintainer-clean
+
+
+install-exec-hook:
+ cp bison $(bindir)
+ cp bison++.yacc $(bindir)
+
+uninstall-hook:
+ rm $(bindir)/bison++.yacc
+ rm $(bindir)/bison
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/tools/bison++/README++ b/tools/bison++/README++
new file mode 100644
index 000000000..8ab127e9b
--- /dev/null
+++ b/tools/bison++/README++
@@ -0,0 +1,22 @@
+this is a modified version of bison
+the author of the changes is coetmeur@icdc.fr
+
+all is quite like the standard bison install, except :
+
+
+the bison++.man are man page that shows the differences between
+bison and bison++
+
+the bison++.dman is a very readable file that generate the
+not as much readable troff/man file bison++.man
+
+on DOS, use bison_pp.mak to build the DOS EXE with MSC7
+there are no big trick in it. it can be directly loaded in
+the PWB...
+if you want to use another compiler, just compile
+and link normally each source like on unix...
+use large memory model, and look at the -D option I have added
+in the MSC makefile... check also for compiler specific problems...
+
+you can else get the DOS exe that come normally aside this archive...
+
diff --git a/tools/bison++/REFERENCES b/tools/bison++/REFERENCES
new file mode 100644
index 000000000..b02eb18c5
--- /dev/null
+++ b/tools/bison++/REFERENCES
@@ -0,0 +1,30 @@
+From phr Tue Jul 8 10:36:19 1986
+Date: Tue, 8 Jul 86 00:52:24 EDT
+From: phr (Paul Rubin)
+To: riferguson%watmath.waterloo.edu@CSNET-RELAY.ARPA, tower
+Subject: Re: Bison documentation?
+
+The main difference between Bison and Yacc that I know of is that
+Bison supports the @N construction, which gives you access to
+the starting and ending line number and character number associated
+with any of the symbols in the current rule.
+
+Also, Bison supports the command `%expect N' which says not to mention
+the conflicts if there are N shift/reduce conflicts and no reduce/reduce
+conflicts.
+
+The differences in the algorithms stem mainly from the horrible
+kludges that Johnson had to perpetrate to make Yacc fit in a PDP-11.
+
+Also, Bison uses a faster but less space-efficient encoding for the
+parse tables (see Corbett's PhD thesis from Berkeley, "Static
+Semantics in Compiler Error Recovery", June 1985, Report No. UCB/CSD
+85/251), and more modern technique for generating the lookahead sets.
+(See "Efficient Construction of LALR(1) Lookahead Sets" by F. DeRemer
+and A. Pennello, in ACM TOPLS Vol 4 No 4, October 1982. Their
+technique is the standard one now.)
+
+ paul rubin
+ free software foundation
+
+
diff --git a/tools/bison++/aclocal.m4 b/tools/bison++/aclocal.m4
new file mode 100644
index 000000000..58ddd1cd9
--- /dev/null
+++ b/tools/bison++/aclocal.m4
@@ -0,0 +1,104 @@
+dnl aclocal.m4 generated automatically by aclocal 1.4-p4
+
+dnl Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl This program is distributed in the hope that it will be useful,
+dnl but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+dnl even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+dnl PARTICULAR PURPOSE.
+
+# Do all the work for Automake. This macro actually does too much --
+# some checks are only needed if your package does certain things.
+# But this isn't really a big deal.
+
+# serial 1
+
+dnl Usage:
+dnl AM_INIT_AUTOMAKE(package,version, [no-define])
+
+AC_DEFUN(AM_INIT_AUTOMAKE,
+[AC_REQUIRE([AC_PROG_INSTALL])
+PACKAGE=[$1]
+AC_SUBST(PACKAGE)
+VERSION=[$2]
+AC_SUBST(VERSION)
+dnl test to see if srcdir already configured
+if test "`cd $srcdir && pwd`" != "`pwd`" && test -f $srcdir/config.status; then
+ AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+fi
+ifelse([$3],,
+AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
+AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package]))
+AC_REQUIRE([AM_SANITY_CHECK])
+AC_REQUIRE([AC_ARG_PROGRAM])
+dnl FIXME This is truly gross.
+missing_dir=`cd $ac_aux_dir && pwd`
+AM_MISSING_PROG(ACLOCAL, aclocal, $missing_dir)
+AM_MISSING_PROG(AUTOCONF, autoconf, $missing_dir)
+AM_MISSING_PROG(AUTOMAKE, automake, $missing_dir)
+AM_MISSING_PROG(AUTOHEADER, autoheader, $missing_dir)
+AM_MISSING_PROG(MAKEINFO, makeinfo, $missing_dir)
+AC_REQUIRE([AC_PROG_MAKE_SET])])
+
+#
+# Check to make sure that the build environment is sane.
+#
+
+AC_DEFUN(AM_SANITY_CHECK,
+[AC_MSG_CHECKING([whether build environment is sane])
+# Just in case
+sleep 1
+echo timestamp > conftestfile
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt $srcdir/configure conftestfile 2> /dev/null`
+ if test "[$]*" = "X"; then
+ # -L didn't work.
+ set X `ls -t $srcdir/configure conftestfile`
+ fi
+ if test "[$]*" != "X $srcdir/configure conftestfile" \
+ && test "[$]*" != "X conftestfile $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+alias in your environment])
+ fi
+
+ test "[$]2" = conftestfile
+ )
+then
+ # Ok.
+ :
+else
+ AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+rm -f conftest*
+AC_MSG_RESULT(yes)])
+
+dnl AM_MISSING_PROG(NAME, PROGRAM, DIRECTORY)
+dnl The program must properly implement --version.
+AC_DEFUN(AM_MISSING_PROG,
+[AC_MSG_CHECKING(for working $2)
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if ($2 --version) < /dev/null > /dev/null 2>&1; then
+ $1=$2
+ AC_MSG_RESULT(found)
+else
+ $1="$3/missing $2"
+ AC_MSG_RESULT(missing)
+fi
+AC_SUBST($1)])
+
diff --git a/tools/bison++/alloca.c b/tools/bison++/alloca.c
new file mode 100644
index 000000000..cab9645b7
--- /dev/null
+++ b/tools/bison++/alloca.c
@@ -0,0 +1,480 @@
+/* alloca.c -- allocate automatically reclaimed memory
+ (Mostly) portable public-domain implementation -- D A Gwyn
+
+ This implementation of the PWB library alloca function,
+ which is used to allocate space off the run-time stack so
+ that it is automatically reclaimed upon procedure exit,
+ was inspired by discussions with J. Q. Johnson of Cornell.
+ J.Otto Tennant <jot@cray.com> contributed the Cray support.
+
+ There are some preprocessor constants that can
+ be defined when compiling for your specific system, for
+ improved efficiency; however, the defaults should be okay.
+
+ The general concept of this implementation is to keep
+ track of all alloca-allocated blocks, and reclaim any
+ that are found to be deeper in the stack than the current
+ invocation. This heuristic does not reclaim storage as
+ soon as it becomes invalid, but it will do so eventually.
+
+ As a special case, alloca(0) reclaims storage without
+ allocating any. It is a good idea to use alloca(0) in
+ your main control loop, etc. to force garbage collection. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* If compiling with GCC 2, this file's not needed. */
+#if !defined (__GNUC__) || __GNUC__ < 2
+
+/* If someone has defined alloca as a macro,
+ there must be some other way alloca is supposed to work. */
+#ifndef alloca
+
+#ifdef emacs
+#ifdef static
+/* actually, only want this if static is defined as ""
+ -- this is for usg, in which emacs must undefine static
+ in order to make unexec workable
+ */
+#ifndef STACK_DIRECTION
+you
+lose
+-- must know STACK_DIRECTION at compile-time
+#endif /* STACK_DIRECTION undefined */
+#endif /* static */
+#endif /* emacs */
+
+/* If your stack is a linked list of frames, you have to
+ provide an "address metric" ADDRESS_FUNCTION macro. */
+
+#if defined (CRAY) && defined (CRAY_STACKSEG_END)
+long i00afunc ();
+#define ADDRESS_FUNCTION(arg) (char *) i00afunc (&(arg))
+#else
+#define ADDRESS_FUNCTION(arg) &(arg)
+#endif
+
+#if __STDC__
+typedef void *pointer;
+#else
+typedef char *pointer;
+#endif
+
+#define NULL 0
+
+/* Different portions of Emacs need to call different versions of
+ malloc. The Emacs executable needs alloca to call xmalloc, because
+ ordinary malloc isn't protected from input signals. On the other
+ hand, the utilities in lib-src need alloca to call malloc; some of
+ them are very simple, and don't have an xmalloc routine.
+
+ Non-Emacs programs expect this to call use xmalloc.
+
+ Callers below should use malloc. */
+
+#ifndef emacs
+#define malloc xmalloc
+#endif
+extern pointer malloc ();
+
+/* Define STACK_DIRECTION if you know the direction of stack
+ growth for your system; otherwise it will be automatically
+ deduced at run-time.
+
+ STACK_DIRECTION > 0 => grows toward higher addresses
+ STACK_DIRECTION < 0 => grows toward lower addresses
+ STACK_DIRECTION = 0 => direction of growth unknown */
+
+#ifndef STACK_DIRECTION
+#define STACK_DIRECTION 0 /* Direction unknown. */
+#endif
+
+#if STACK_DIRECTION != 0
+
+#define STACK_DIR STACK_DIRECTION /* Known at compile-time. */
+
+#else /* STACK_DIRECTION == 0; need run-time code. */
+
+static int stack_dir; /* 1 or -1 once known. */
+#define STACK_DIR stack_dir
+
+static void
+find_stack_direction ()
+{
+ static char *addr = NULL; /* Address of first `dummy', once known. */
+ auto char dummy; /* To get stack address. */
+
+ if (addr == NULL)
+ { /* Initial entry. */
+ addr = ADDRESS_FUNCTION (dummy);
+
+ find_stack_direction (); /* Recurse once. */
+ }
+ else
+ {
+ /* Second entry. */
+ if (ADDRESS_FUNCTION (dummy) > addr)
+ stack_dir = 1; /* Stack grew upward. */
+ else
+ stack_dir = -1; /* Stack grew downward. */
+ }
+}
+
+#endif /* STACK_DIRECTION == 0 */
+
+/* An "alloca header" is used to:
+ (a) chain together all alloca'ed blocks;
+ (b) keep track of stack depth.
+
+ It is very important that sizeof(header) agree with malloc
+ alignment chunk size. The following default should work okay. */
+
+#ifndef ALIGN_SIZE
+#define ALIGN_SIZE sizeof(double)
+#endif
+
+typedef union hdr
+{
+ char align[ALIGN_SIZE]; /* To force sizeof(header). */
+ struct
+ {
+ union hdr *next; /* For chaining headers. */
+ char *deep; /* For stack depth measure. */
+ } h;
+} header;
+
+static header *last_alloca_header = NULL; /* -> last alloca header. */
+
+/* Return a pointer to at least SIZE bytes of storage,
+ which will be automatically reclaimed upon exit from
+ the procedure that called alloca. Originally, this space
+ was supposed to be taken from the current stack frame of the
+ caller, but that method cannot be made to work for some
+ implementations of C, for example under Gould's UTX/32. */
+
+pointer
+alloca (size)
+ unsigned size;
+{
+ auto char probe; /* Probes stack depth: */
+ register char *depth = ADDRESS_FUNCTION (probe);
+
+#if STACK_DIRECTION == 0
+ if (STACK_DIR == 0) /* Unknown growth direction. */
+ find_stack_direction ();
+#endif
+
+ /* Reclaim garbage, defined as all alloca'd storage that
+ was allocated from deeper in the stack than currently. */
+
+ {
+ register header *hp; /* Traverses linked list. */
+
+ for (hp = last_alloca_header; hp != NULL;)
+ if ((STACK_DIR > 0 && hp->h.deep > depth)
+ || (STACK_DIR < 0 && hp->h.deep < depth))
+ {
+ register header *np = hp->h.next;
+
+ free ((pointer) hp); /* Collect garbage. */
+
+ hp = np; /* -> next header. */
+ }
+ else
+ break; /* Rest are not deeper. */
+
+ last_alloca_header = hp; /* -> last valid storage. */
+ }
+
+ if (size == 0)
+ return NULL; /* No allocation required. */
+
+ /* Allocate combined header + user data storage. */
+
+ {
+ register pointer new = malloc (sizeof (header) + size);
+ /* Address of header. */
+
+ ((header *) new)->h.next = last_alloca_header;
+ ((header *) new)->h.deep = depth;
+
+ last_alloca_header = (header *) new;
+
+ /* User storage begins just after header. */
+
+ return (pointer) ((char *) new + sizeof (header));
+ }
+}
+
+#if defined (CRAY) && defined (CRAY_STACKSEG_END)
+
+#ifdef DEBUG_I00AFUNC
+#include <stdio.h>
+#endif
+
+#ifndef CRAY_STACK
+#define CRAY_STACK
+#ifndef CRAY2
+/* Stack structures for CRAY-1, CRAY X-MP, and CRAY Y-MP */
+struct stack_control_header
+ {
+ long shgrow:32; /* Number of times stack has grown. */
+ long shaseg:32; /* Size of increments to stack. */
+ long shhwm:32; /* High water mark of stack. */
+ long shsize:32; /* Current size of stack (all segments). */
+ };
+
+/* The stack segment linkage control information occurs at
+ the high-address end of a stack segment. (The stack
+ grows from low addresses to high addresses.) The initial
+ part of the stack segment linkage control information is
+ 0200 (octal) words. This provides for register storage
+ for the routine which overflows the stack. */
+
+struct stack_segment_linkage
+ {
+ long ss[0200]; /* 0200 overflow words. */
+ long sssize:32; /* Number of words in this segment. */
+ long ssbase:32; /* Offset to stack base. */
+ long:32;
+ long sspseg:32; /* Offset to linkage control of previous
+ segment of stack. */
+ long:32;
+ long sstcpt:32; /* Pointer to task common address block. */
+ long sscsnm; /* Private control structure number for
+ microtasking. */
+ long ssusr1; /* Reserved for user. */
+ long ssusr2; /* Reserved for user. */
+ long sstpid; /* Process ID for pid based multi-tasking. */
+ long ssgvup; /* Pointer to multitasking thread giveup. */
+ long sscray[7]; /* Reserved for Cray Research. */
+ long ssa0;
+ long ssa1;
+ long ssa2;
+ long ssa3;
+ long ssa4;
+ long ssa5;
+ long ssa6;
+ long ssa7;
+ long sss0;
+ long sss1;
+ long sss2;
+ long sss3;
+ long sss4;
+ long sss5;
+ long sss6;
+ long sss7;
+ };
+
+#else /* CRAY2 */
+/* The following structure defines the vector of words
+ returned by the STKSTAT library routine. */
+struct stk_stat
+ {
+ long now; /* Current total stack size. */
+ long maxc; /* Amount of contiguous space which would
+ be required to satisfy the maximum
+ stack demand to date. */
+ long high_water; /* Stack high-water mark. */
+ long overflows; /* Number of stack overflow ($STKOFEN) calls. */
+ long hits; /* Number of internal buffer hits. */
+ long extends; /* Number of block extensions. */
+ long stko_mallocs; /* Block allocations by $STKOFEN. */
+ long underflows; /* Number of stack underflow calls ($STKRETN). */
+ long stko_free; /* Number of deallocations by $STKRETN. */
+ long stkm_free; /* Number of deallocations by $STKMRET. */
+ long segments; /* Current number of stack segments. */
+ long maxs; /* Maximum number of stack segments so far. */
+ long pad_size; /* Stack pad size. */
+ long current_address; /* Current stack segment address. */
+ long current_size; /* Current stack segment size. This
+ number is actually corrupted by STKSTAT to
+ include the fifteen word trailer area. */
+ long initial_address; /* Address of initial segment. */
+ long initial_size; /* Size of initial segment. */
+ };
+
+/* The following structure describes the data structure which trails
+ any stack segment. I think that the description in 'asdef' is
+ out of date. I only describe the parts that I am sure about. */
+
+struct stk_trailer
+ {
+ long this_address; /* Address of this block. */
+ long this_size; /* Size of this block (does not include
+ this trailer). */
+ long unknown2;
+ long unknown3;
+ long link; /* Address of trailer block of previous
+ segment. */
+ long unknown5;
+ long unknown6;
+ long unknown7;
+ long unknown8;
+ long unknown9;
+ long unknown10;
+ long unknown11;
+ long unknown12;
+ long unknown13;
+ long unknown14;
+ };
+
+#endif /* CRAY2 */
+#endif /* not CRAY_STACK */
+
+#ifdef CRAY2
+/* Determine a "stack measure" for an arbitrary ADDRESS.
+ I doubt that "lint" will like this much. */
+
+static long
+i00afunc (long *address)
+{
+ struct stk_stat status;
+ struct stk_trailer *trailer;
+ long *block, size;
+ long result = 0;
+
+ /* We want to iterate through all of the segments. The first
+ step is to get the stack status structure. We could do this
+ more quickly and more directly, perhaps, by referencing the
+ $LM00 common block, but I know that this works. */
+
+ STKSTAT (&status);
+
+ /* Set up the iteration. */
+
+ trailer = (struct stk_trailer *) (status.current_address
+ + status.current_size
+ - 15);
+
+ /* There must be at least one stack segment. Therefore it is
+ a fatal error if "trailer" is null. */
+
+ if (trailer == 0)
+ abort ();
+
+ /* Discard segments that do not contain our argument address. */
+
+ while (trailer != 0)
+ {
+ block = (long *) trailer->this_address;
+ size = trailer->this_size;
+ if (block == 0 || size == 0)
+ abort ();
+ trailer = (struct stk_trailer *) trailer->link;
+ if ((block <= address) && (address < (block + size)))
+ break;
+ }
+
+ /* Set the result to the offset in this segment and add the sizes
+ of all predecessor segments. */
+
+ result = address - block;
+
+ if (trailer == 0)
+ {
+ return result;
+ }
+
+ do
+ {
+ if (trailer->this_size <= 0)
+ abort ();
+ result += trailer->this_size;
+ trailer = (struct stk_trailer *) trailer->link;
+ }
+ while (trailer != 0);
+
+ /* We are done. Note that if you present a bogus address (one
+ not in any segment), you will get a different number back, formed
+ from subtracting the address of the first block. This is probably
+ not what you want. */
+
+ return (result);
+}
+
+#else /* not CRAY2 */
+/* Stack address function for a CRAY-1, CRAY X-MP, or CRAY Y-MP.
+ Determine the number of the cell within the stack,
+ given the address of the cell. The purpose of this
+ routine is to linearize, in some sense, stack addresses
+ for alloca. */
+
+static long
+i00afunc (long address)
+{
+ long stkl = 0;
+
+ long size, pseg, this_segment, stack;
+ long result = 0;
+
+ struct stack_segment_linkage *ssptr;
+
+ /* Register B67 contains the address of the end of the
+ current stack segment. If you (as a subprogram) store
+ your registers on the stack and find that you are past
+ the contents of B67, you have overflowed the segment.
+
+ B67 also points to the stack segment linkage control
+ area, which is what we are really interested in. */
+
+ stkl = CRAY_STACKSEG_END ();
+ ssptr = (struct stack_segment_linkage *) stkl;
+
+ /* If one subtracts 'size' from the end of the segment,
+ one has the address of the first word of the segment.
+
+ If this is not the first segment, 'pseg' will be
+ nonzero. */
+
+ pseg = ssptr->sspseg;
+ size = ssptr->sssize;
+
+ this_segment = stkl - size;
+
+ /* It is possible that calling this routine itself caused
+ a stack overflow. Discard stack segments which do not
+ contain the target address. */
+
+ while (!(this_segment <= address && address <= stkl))
+ {
+#ifdef DEBUG_I00AFUNC
+ fprintf (stderr, "%011o %011o %011o\n", this_segment, address, stkl);
+#endif
+ if (pseg == 0)
+ break;
+ stkl = stkl - pseg;
+ ssptr = (struct stack_segment_linkage *) stkl;
+ size = ssptr->sssize;
+ pseg = ssptr->sspseg;
+ this_segment = stkl - size;
+ }
+
+ result = address - this_segment;
+
+ /* If you subtract pseg from the current end of the stack,
+ you get the address of the previous stack segment's end.
+ This seems a little convoluted to me, but I'll bet you save
+ a cycle somewhere. */
+
+ while (pseg != 0)
+ {
+#ifdef DEBUG_I00AFUNC
+ fprintf (stderr, "%011o %011o\n", pseg, size);
+#endif
+ stkl = stkl - pseg;
+ ssptr = (struct stack_segment_linkage *) stkl;
+ size = ssptr->sssize;
+ pseg = ssptr->sspseg;
+ result += size;
+ }
+ return (result);
+}
+
+#endif /* not CRAY2 */
+#endif /* CRAY */
+
+#endif /* no alloca */
+#endif /* not GCC version 2 */
diff --git a/tools/bison++/allocate.cc b/tools/bison++/allocate.cc
new file mode 100644
index 000000000..773366eb6
--- /dev/null
+++ b/tools/bison++/allocate.cc
@@ -0,0 +1,61 @@
+/* Allocate and clear storage for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+
+extern "C" char *calloc (unsigned,unsigned);
+extern "C" char *realloc (char*,unsigned);
+extern void done (int);
+
+extern char *program_name;
+
+char *
+xmalloc (unsigned n)
+{
+ register char *block;
+
+ /* Avoid uncertainty about what an arg of 0 will do. */
+ if (n == 0)
+ n = 1;
+ block = calloc (n, 1);
+ if (block == NULL)
+ {
+ fprintf (stderr, "%s: memory exhausted\n", program_name);
+ done (1);
+ }
+
+ return (block);
+}
+
+char *
+xrealloc (char* block, unsigned n)
+{
+ /* Avoid uncertainty about what an arg of 0 will do. */
+ if (n == 0)
+ n = 1;
+ block = realloc (block, n);
+ if (block == NULL)
+ {
+ fprintf (stderr, "%s: memory exhausted\n", program_name);
+ done (1);
+ }
+
+ return (block);
+}
diff --git a/tools/bison++/bison b/tools/bison++/bison
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tools/bison++/bison
diff --git a/tools/bison++/bison++.1 b/tools/bison++/bison++.1
new file mode 100644
index 000000000..cd305277d
--- /dev/null
+++ b/tools/bison++/bison++.1
@@ -0,0 +1,436 @@
+.TH BISON++ 1 "3/3/93" "GNU and RDT" "COMMANDS"
+.SH "NAME"
+bison++ \- generate a parser in c or c++\.
+.SH "SYNOPSIS"
+\fBbison++\fP [\fB\-dltvyVu\fP] [\fB\-b\fP \fIfile\-prefix\fP] [\fB\-p\fP \fIname\-prefix\fP] [\fB\-o\fP \fIoutfile\fP] [\fB\-h\fP \fIheaderfile\fP] [\fB\-S\fP \fIskeleton\fP] [\fB\-H\fP \fIheader\-skeleton\fP] [\fB\-\-debug\fP] [\fB\-\-defines\fP] [\fB\-\-fixed\-output\-files\fP] [\fB\-\-no\-lines\fP] [\fB\-\-verbose\fP] [\fB\-\-version\fP] [\fB\-\-yacc\fP] [\fB\-\-usage\fP] [\fB\-\-help\fP] [\fB\-\-file\-prefix=\fP\fIprefix\fP] [\fB\-\-name\-prefix=\fP\fIprefix\fP] [\fB\-\-skeleton=\fP\fIskeletonfile\fP] [\fB\-\-headerskeleton=\fP\fIheaderskeletonfile\fP] [\fB\-\-output=\fP\fIoutfile\fP] [\fB\-\-header\-name=\fP\fIheader\fP] \fIgrammar\-file\fP
+.SH "DESCRIPTION"
+Generate a parser\. Based on \fBbison\fP version 1\.19\. See \fBbison\fP(1) for details of main functionality\. Only changes are reported here\.
+.PP
+You now generate a C++ class if you are compiling with a C++ compiler\. The generated header is far more rich than before, and is made from a skeleton\-header\. The code skeleton is also richer, and the generated code is less important compared to the skeletons\. It permit you to modify much things only by changing the two skeletons\.
+.PP
+In plain C, the \fBbison++\fP is compatible with standard \fBbison\fP\.
+.SH "OPTIONS"
+.\"bloc1[
+.IP "\\fB\\-\\-name\\-prefix=\\fP\\fIprefix\\fP"
+.IP "\\fB\\-p\\fP \\fIprefix\\fP"
+Set prefix of names of yylex,yyerror\. keeped for compatibility, but you should prefer \fB%define LEX \fP\fInewname\fP, and similar\.
+.IP "\\fB\\-\\-skeleton=\\fP\\fIskeleton\\fP"
+.IP "\\fB\\-S\\fP \\fIskeleton\\fP"
+Set filename of code skeleton\. Default is \fBbison\.cc\fP\.
+.IP "\\fB\\-\\-headerskeleton=\\fP\\fIheader\\-skeleton\\fP"
+.IP "\\fB\\-H\\fP \\fIheader\\-skeleton\\fP"
+Set filename of header skeleton\. Default is \fBbison\.h\fP\.
+.IP "\\fB\\-\\-header\\-name=\\fP\\fIheader\\fP"
+.IP "\\fB\\-h\\fP \\fIheader\\fP"
+Set filename of header skeleton\. Default is \fBy\.tab\.h\fP, or \fIprefix\fP\.h if option \fB\-b\fP is used or \fIc_basename\fP\.h if \fB\-o\fP is used\. \fB\.c\fP, \fB\.cc\fP, \fB\.C\fP, \fB\.cpp\fP, \fB\.cxx\fP options for output files are replaced by \fB\.h\fP for header name\.
+.\"bloc1]
+.SH "DECLARATIONS"
+These are new declarations to put in the declaration section :
+.\"bloc1[
+.IP "\\fB%name\\fP \\fIparser_name\\fP"
+Declare the name of this parser\. User for C++ class name, and to render many names unique\. default is \fBparse\fP\. Must be given before \fB%union\fP and \fB%define\fP, or never\.
+.IP "\\fB%define\\fP \\fIdefine_name\\fP \\fIcontent\\.\\.\\.\\fP"
+Declare a macro symbol in header and code\. The name of the symbol is \fBYY_\fP'\fIparser_name\fP'\fB_\fP'\fIdefine_name\fP'\. The content if given after, as with #define\. Newline can be escaped as with #define\. Many symbols are proposed for customisation\.
+.IP "\\fB%union\\fP"
+as with bison generate a union for semantic type\. The difference is that the union is named \fByy_\fP'\fIparser_name\fP'\fB_stype\fP\.
+.IP "\\fB%pure_parser\\fP"
+As with bison in C\. In C++ generate a parser where yylval, and yylloc (if needed) are passed as parameter to yylex, and where some instance variable are local to yyparse (like yydebug\.\.\.)\. Not very useful, since you can create multiple instances for reentering another parser\.
+.IP "\\fB%header{\\fP"
+Like \fB%{\fP, but include this text both in the header, and in the code\. End with \fB%}\fP\. When put in declaration section, the text is added before the definitions\. It can be put in the last section so that the text is added after all definition in the header, and in the last section at the current position in the code\.
+.\"bloc1]
+.PP
+Note that the order of these declaration is important, since they are translated into preprocessor sympols, typedef or code depending on their type\. For example use \fB%name\fP before any \fB%define\fP, since the name is needed to compose the name of the define symbols\. Order of \fB%header\fP and \fB%union\fP is important, since type may be undefined\.
+.SH "DECLARATION DEFINE SYMBOLS"
+These are the symbols you can define with \fB%define\fP in declaration section, or that are already defined\. Remind that they are replaced by a preprocessor \fB#define YY_\fP'\fIparser_name\fP'\fB_\fP'\fIname\fP\.
+.\"bloc1[
+.IP "\\fBBISON\\fP"
+defined to \fB1\fP in the code\. used for conditional code\. Don't redefine it\.
+.IP "\\fBh_included\\fP"
+defined in the code, and in the header\. used for include anti\-reload\. Don't redefine it\.
+.IP "\\fBCOMPATIBILITY\\fP"
+Indicate if obsoleted defines are to be used and produced\. If defined to 0, indicate no compatibility needed, else if defined to non\-0, generate it\. If it is undefined, default is to be compatible if classes are not used\.
+.IP "\\fBUSE_GOTO\\fP"
+Indicates (if defined as 1) that \fBgoto\fP are to be used (for backward compatibility) in the parser function\. By default \fBgoto\fP are replaced with a \fBswitch\fP construction, to avoid problems with some compiler that don't support \fBgoto\fP and destructor in the same function block\. If \fBCOMPATIBILITY\fP is 1, and \fBUSE_GOTO\fP is not defined, then \fBUSE_GOTO\fP is defined to 1, to be compatible with older bison\.
+.IP "\\fBUSE_CONST_TOKEN\\fP"
+Indicate (if defined as 1) that \fBstatic const int\fP are to be used in C++, for token IDs\. By default an enum is used to define the token IDs instead of const\.
+.IP "\\fBENUM_TOKEN\\fP"
+When \fBenum\fP are used instead of \fBstatic const int\fP for token IDs, this symbol define the name of the enum type\. Defined to \fByy_\fP'\fIparser_name\fP'\fB_enum_token\fP by default\.
+.IP "\\fBPURE\\fP"
+Indicate that \fB%pure_parser\fP is asked\.\.\. Don't redefine it\.
+.IP "\\fBLSP_NEEDED\\fP"
+if defined indicate that @ construct is used, so \fBLLOC\fP stack is needed\. Can be defined to force use of location stack\.
+.IP "\\fBDEBUG\\fP"
+if defined to non\-0 activate debugging code\. See\fB YYDEBUG\fP in bison\.
+.IP "\\fBERROR_VERBOSE\\fP"
+if defined activate dump parser stack when error append\.
+.IP "\\fBSTYPE\\fP"
+the type of the semantic value of token\. defined by \fB%union\fP\. default is \fBint\fP\. See \fBYYSTYPE\fP in bison\. Don't redefine it, if you use a \fB%union\fP\.
+.IP "\\fBLTYPE\\fP"
+The token location type\. If needed default is \fByyltype\fP\. See \fBYYLTYPE\fP in bison\. default \fByyltype\fP is a typedef and struct defined as in old bison\.
+.IP "\\fBLLOC\\fP"
+The token location variable name\. If needed, default is \fByylloc\fP\. See \fByylloc\fP in bison\.
+.IP "\\fBLVAL\\fP"
+The token semantic value variable name\. Default \fByylval\fP\. See \fByylval\fP in bison\.
+.IP "\\fBCHAR\\fP"
+The lookahead token value variable name\. Default \fByychar\fP\. See \fByychar\fP in bison\.
+.IP "\\fBLEX\\fP"
+The scanner function name\. Default \fByylex\fP\. See \fByylex\fP in bison\.
+.IP "\\fBPARSE\\fP"
+The parser function name\. Default \fByyparse\fP\. See \fByyparse\fP in bison\.
+.IP "\\fBPARSE_PARAM\\fP"
+The parser function parameters declaration\. Default \fBvoid\fP in C++ or ANSIC, nothing if old C\. In ANSIC and C++ contain the prototype\. In old\-C comtaim just the list of parameters name\. Don't allows default value\.
+.IP "\\fBPARSE_PARAM_DEF\\fP"
+The parser function parameters definition, for old style C\. Default nothing\. For example to use an \fBint\fP parameter called \fBx\fP, PARSE_PARAM is \fBx\fP, and PARSE_PARAM_DEF is \fBint x;\fP\. In ANSIC or C++ it is unuseful and ignored\.
+.IP "\\fBERROR\\fP"
+The error function name\. Default \fByyerror\fP\. See \fByyerror\fP in bison\.
+.IP "\\fBNERRS\\fP"
+The error count name\. Default \fByynerrs\fP\. See \fByynerrs\fP in bison\.
+.IP "\\fBDEBUG_FLAG\\fP"
+The runtime debug flag\. Default \fByydebug\fP\. See \fByydebug\fP in bison\.
+.\"bloc1]
+.PP
+These are only used if class is generated\.
+.\"bloc1[
+.IP "\\fBCLASS\\fP"
+The class name\. default is the parser name\.
+.IP "\\fBINHERIT\\fP"
+The inheritance list\. Don't forget the \fB:\fP before, if not empty list\.
+.IP "\\fBMEMBERS\\fP"
+List of members to add to the class definition, before ending it\.
+.IP "\\fBLEX_BODY\\fP"
+The scanner member function boby\. May be defined to \fB=0\fP for pure function, or to an inline body\.
+.IP "\\fBERROR_BODY\\fP"
+The error member function boby\. May be defined to \fB=0\fP for pure function, or to an inline body\.
+.IP "\\fBCONSTRUCTOR_PARAM\\fP"
+List of parameters of the constructor\. Dont allows default value\.
+.IP "\\fBCONSTRUCTOR_INIT\\fP"
+List of initialisation befor constructor call\. If not empty dont't forget the \fB:\fP before list of initialisation\.
+.IP "\\fBCONSTRUCTOR_CODE\\fP"
+Code added after internal initialisation in constructor\.
+.\"bloc1]
+.SH "OBSOLETED PREPROCESSOR SYMBOLS"
+if you use new features, the folowing symbols should not be used, though they are proposed\. The symbol \fBCOMPATIBILITY\fP control their disponibility\. Incoherence may arise if they are defined simultaneously with the new symbol\.
+.\"bloc1[
+.IP "\\fBYYLTYPE\\fP"
+prefer \fB%define LTYPE\fP\.
+.IP "\\fBYYSTYPE\\fP"
+prefer \fB%define STYPE\fP\.
+.IP "\\fBYYDEBUG\\fP"
+prefer \fB%define DEBUG\fP\.
+.IP "\\fBYYERROR_VERBOSE\\fP"
+prefer \fB%define ERROR_VERBOSE\fP\.
+.IP "\\fBYYLSP_NEEDED\\fP"
+prefer \fB%define LSP_NEEDED\fP\.
+.IP "\\fByystype\\fP"
+Now a preprocessor symbol instead of a typedef\. prefer \fByy_\fP'\fIparser_name\fP'\fB_stype\fP\.
+.\"bloc1]
+.SH "CONSERVED PREPROCESSOR SYMBOLS"
+These symbols are kept, and cannot be defined elsewhere, since they control private parameters of the generated parser, or are actually unused\. You can \fB#define\fP them to the value you need, or indirectly to the name of a \fB%define\fP generated symbol if you want to be clean\.
+.\"bloc1[
+.IP "\\fBYYINITDEPTH\\fP"
+initial stack depth\.
+.IP "\\fBYYMAXDEPTH\\fP"
+stack overflow limit depth\.
+.IP "\\fByyoverflow\\fP"
+instead of expand with alloca, realloc manualy or raise error\.
+.\"bloc1]
+.SH "OTHER ADDED PREPROCESSOR SYMBOLS"
+.\"bloc1[
+.IP "\\fBYY_USE_CLASS\\fP"
+indicate that class will be produced\. Default if C++\.
+.\"bloc1]
+.SH "C++ CLASS GENERATED"
+To simplify the notation, we note \fB%SYMBOLNAME\fP the preprocessor symbol generated with a \fB%define\fP of this name\. In fact see the use of \fB%define\fP for it's real name\.
+.PP
+Note that there is sometime symbols that differ from only an underscore \fB_\fP, like \fByywrap\fP and \fByy_wrap\fP\. They are much different\. In this case \fByy_wrap()\fP is a virtual member function, and \fByywrap()\fP is a macro\.
+.SS "General Class declaration"
+class %CLASS %INHERIT
+.PP
+{
+.PP
+public:
+.PP
+#if %USE_CONST_TOKEN != 0
+.PP
+static const TOKEN_NEXT;
+.PP
+static const AND_SO_ON;
+.PP
+// \.\.\.
+.PP
+#else
+.PP
+enum %ENUM_TOKEN { %NULL_TOKEN
+.\"bloc1[
+.IP
+,TOKEN_FIRST=256
+.IP
+,TOKEN_NEXT=257
+.IP
+,AND_SO_ON=258
+.\"bloc1]
+.PP
+} ;
+.PP
+// \.\.\.
+.PP
+#endif
+.PP
+public:
+.PP
+int %PARSE (%PARSE_PARAM);
+.PP
+virtual void %ERROR(char *msg) %ERROR_BODY;
+.PP
+#ifdef %PURE
+.\"bloc1[
+.IP
+// if %PURE , we must pass the value and (eventually) the location explicitely
+.IP
+#ifdef %LSP_NEEDED
+.IP
+// if and only if %LSP_NEEDED , we must pass the location explicitely
+.IP
+virtual int %LEX (%STYPE *%LVAL,%LTYPE *%LLOC) %LEX_BODY;
+.IP
+#else
+.IP
+virtual int %LEX (%STYPE *%LVAL) %LEX_BODY;
+.IP
+#endif
+.\"bloc1]
+.PP
+#else
+.\"bloc1[
+.IP
+// if not %PURE , we must declare member to store the value and (eventually) the location explicitely
+.IP
+// if not %PURE ,%NERRS and %CHAR are not local variable to %PARSE, so must be member
+.IP
+virtual int %LEX() %LEX_BODY;
+.IP
+%STYPE %LVAL;
+.IP
+#ifdef %LSP_NEEDED
+.IP
+%LTYPE %LLOC;
+.IP
+#endif
+.IP
+int %NERRS;
+.IP
+int %CHAR;
+.\"bloc1]
+.PP
+#endif
+.PP
+#if %DEBUG != 0
+.PP
+int %DEBUG_FLAG; /* nonzero means print parse trace */
+.PP
+#endif
+.PP
+public:
+.PP
+%CLASS(%CONSTRUCTOR_PARAM);
+.PP
+public:
+.PP
+%MEMBERS
+.PP
+};
+.PP
+// here are defined the token constants
+.PP
+// for example:
+.PP
+#if %USE_CONST_TOKEN != 0
+.\"bloc1[
+.IP
+const %CLASS::TOKEN_FIRST=1;
+.IP
+\.\.\.
+.\"bloc1]
+.PP
+#endif
+.PP
+// here is the construcor
+.PP
+%CLASS::%CLASS(%CONSTRUCTOR_PARAM) %CONSTRUCTOR_INIT
+.PP
+{
+.PP
+#if %DEBUG != 0
+.PP
+%DEBUG_FLAG=0;
+.PP
+#endif
+.PP
+%CONSTRUCTOR_CODE;
+.PP
+};
+.SS "Default Class declaration"
+// Here is the default declaration made in the header when you %define nothing
+.PP
+// typical yyltype
+.PP
+typedef struct yyltype
+.PP
+{
+.PP
+int timestamp;
+.PP
+int first_line;
+.PP
+int first_column;
+.PP
+int last_line;
+.PP
+int last_column;
+.PP
+char *text;
+.PP
+} yyltype;
+.PP
+// class definition
+.PP
+class parser
+.PP
+{
+.PP
+public:
+.PP
+enum yy_parser_enum_token { YY_parser_NULL_TOKEN
+.\"bloc1[
+.IP
+,TOKEN_FIRST=256
+.IP
+,TOKEN_NEXT=257
+.IP
+,AND_SO_ON=258
+.\"bloc1]
+.PP
+} ;
+.PP
+// \.\.\.
+.PP
+public:
+.PP
+int yyparse (yyparse_PARAM);
+.PP
+virtual void yyerror(char *msg) ;
+.PP
+#ifdef YY_parser_PURE
+.\"bloc1[
+.IP
+#ifdef YY_parser_LSP_NEEDED
+.IP
+virtual int yylex (int *yylval,yyltype *yylloc) ;
+.IP
+#else
+.IP
+virtual int yylex (int *yylval) ;
+.IP
+#endif
+.\"bloc1]
+.PP
+#else
+.\"bloc1[
+.IP
+virtual int yylex() %LEX_BODY;
+.IP
+int yylval;
+.IP
+#ifdef YY_parser_LSP_NEEDED
+.IP
+yyltype yylloc;
+.IP
+#endif
+.IP
+int yynerrs;
+.IP
+int yychar;
+.\"bloc1]
+.PP
+#endif
+.PP
+#if YY_parser_DEBUG != 0
+.PP
+int yydebug;
+.PP
+#endif
+.PP
+public:
+.PP
+parser();
+.PP
+public:
+.PP
+};
+.PP
+// here is the constructor code
+.PP
+parser::parser()
+.PP
+{
+.PP
+#if YY_parser_DEBUG != 0
+.PP
+yydebug=0;
+.PP
+#endif
+.PP
+};
+.SH "USAGE"
+Should replace \fBbison\fP, because it generate a far more customisable parser, still beeing compatible\.
+.PP
+You should always use the header facility\.
+.PP
+Use it with \fBflex++\fP (same author)\.
+.SH "EXEMPLES"
+This man page has been produced through a parser made in C++ with this version of \fBbison\fP and our version of \fBflex++\fP (same author)\.
+.SH "FILES"
+.\"bloc1[
+.IP "\\fBbison\\.cc\\fP"
+main skeleton\.
+.IP "\\fBbison\\.h\\fP"
+header skeleton\.
+.IP "\\fBbison\\.hairy\\fP"
+old main skeleton for semantic parser\. Not adapted to this version\. Kept for future works\.
+.\"bloc1]
+.SH "ENVIRONNEMENT"
+.SH "DIAGNOSTICS"
+.SH "SEE ALSO"
+\fBbison\fP(1), \fBbison\.info\fP (use texinfo), \fBflex++\fP(1)\.
+.SH "DOCUMENTATION"
+.SH "BUGS"
+Tell us more !
+.PP
+The \fB%semantic_parser\fP is no more supported\. If you want to use it, adapt the skeletons, and maybe \fBbison++\fP generator itself\. The reason is that it seems unused, unuseful, not documented, and too complex for us to support\. tell us if you use, need, or understand it\.
+.PP
+Header is not included in the parser code\. Change made in the generated header are not used in the parser code, even if you include it volontarily, since it is guarded against re\-include\. So don't modify it\.
+.PP
+For the same reasons, if you modify the header skeleton, or the code skeleton, report the changes in the other skeleton if applicable\. If not done, incoherent declarations may lead to unpredictable result\.
+.PP
+Use of defines for \fBYYLTYPE\fP, \fBYYSTYPE\fP, \fBYYDEBUG\fP is supported for backward compatibility in C, but should not be used with new features, as \fB%defines\fP or C++ classes\. You can define them, and use them as with old \fBbison\fP in C only\.
+.PP
+Parameters are richer than before, and nothing is removed\. POSIX compliance can be enforced by not using extensions\. If you want to forbide them, there is a good job !
+.SH "FUTUR WORKS"
+tell us !
+.PP
+Support semantic parser\. Is it really used ?
+.PP
+POSIX compliance\. is'nt it good now ?
+.PP
+Use lex and yacc (flex/bison) to generate the scanner/parser\. It would be comfortable for futur works, though very complicated\. Who feel it good ?
+.PP
+\fBiostream\fP : this is a great demand\. this work will be done as soon as possible\. The virtual members permit such work still easily\.
+.SH "INSTALLATION"
+With this install the executable is named bison++\. rename it bison if you want, because it could replace \fBbison\fP\.
+.SH "TESTS"
+.SH "AUTHORS"
+Alain Coe\*:tmeur (coetmeur@icdc\.fr), R&D department (RDT) , Informatique\-CDC, France\.
+.SH "RESTRICTIONS"
+The words 'author', and 'us' mean the author and colleages, not GNU\. We don't have contacted GNU about this, nowaday\. If you're in GNU, we are ready to propose it to you, and you may tell us what you think about\.
+.PP
+Based on GNU version 1\.21 of bison\. Modified by the author\.
diff --git a/tools/bison++/bison++.1.dman b/tools/bison++/bison++.1.dman
new file mode 100644
index 000000000..50325bfba
--- /dev/null
+++ b/tools/bison++/bison++.1.dman
@@ -0,0 +1,247 @@
+% %Z% %M% %Y% %Q% %I% %E% %U% (%F%)
+%
+% Nom du Fichier : bison++.dman
+% Titre : bison++ man page of modifications
+% Auteur: coetmeur@icdc.fr
+% Date de creation : 3/3/93
+%
+% Description :
+% Document de reference : see bison.1
+% Objet : present difference from bison 1.19 (standard)
+% and bison++ from which it has been made
+%
+%
+% historique :
+% |>date<| |>auteur<| |>objet<|
+%
+% header NOM SECTION DATE AUTEUR DOMAINE
+:HEADER BISON++ 1 "3/3/93" "GNU and RDT" "COMMANDS"
+:SECTION "NAME"
+ bison++ - generate a parser in c or c++.
+:SECTION "SYNOPSIS"
+ "bison++" ["-dltvyVu"] ["-b" <file-prefix>] ["-p" <name-prefix>] ["-o" <outfile>] ["-h" <headerfile>] ["-S" <skeleton>] ["-H" <header-skeleton>] ["--debug"] ["--defines"] ["--fixed-output-files"] ["--no-lines"] ["--verbose"] ["--version"] ["--yacc"] ["--usage"] ["--help"] ["--file-prefix="<prefix>] ["--name-prefix="<prefix>] ["--skeleton="<skeletonfile>] ["--headerskeleton="<headerskeletonfile>] ["--output="<outfile>] ["--header-name="<header>] <grammar-file>
+
+:SECTION "DESCRIPTION"
+ Generate a parser. Based on "bison" version 1.19. See "bison"(1) for details of main functionality. Only changes are reported here.
+ You now generate a C++ class if you are compiling with a C++ compiler. The generated header is far more rich than before, and is made from a skeleton-header. The code skeleton is also richer, and the generated code is less important compared to the skeletons. It permit you to modify much things only by changing the two skeletons.
+ In plain C, the "bison++" is compatible with standard "bison".
+:SECTION "OPTIONS"
+ -{"--name-prefix="<prefix>}
+ -{"-p" <prefix>} Set prefix of names of yylex,yyerror. keeped for compatibility, but you should prefer "\%define LEX "<newname>, and similar.
+ -{"--skeleton="<skeleton>}
+ -{"-S" <skeleton>} Set filename of code skeleton. Default is "bison.cc".
+ -{"--headerskeleton="<header-skeleton>}
+ -{"-H" <header-skeleton>} Set filename of header skeleton. Default is "bison.h".
+ -{"--header-name="<header>}
+ -{"-h" <header>} Set filename of header skeleton. Default is "y.tab.h", or <prefix>.h if option "-b" is used or <c_basename>.h if "-o" is used. ".c", ".cc", ".C", ".cpp", ".cxx" options for output files are replaced by ".h" for header name.
+:SECTION "DECLARATIONS"
+ These are new declarations to put in the declaration section :
+ -{"\%name" <parser_name>} Declare the name of this parser. User for C++ class name, and to render many names unique. default is "parse". Must be given before "\%union" and "\%define", or never.
+ -{"\%define" <define_name> <content...>} Declare a macro symbol in header and code. The name of the symbol is "YY_"'<parser_name>'"_"'<define_name>'. The content if given after, as with #define. Newline can be escaped as with #define. Many symbols are proposed for customisation.
+ -{"\%union"} as with bison generate a union for semantic type. The difference is that the union is named "yy_"'<parser_name>'"_stype".
+ -{"\%pure_parser"} As with bison in C. In C++ generate a parser where yylval, and yylloc (if needed) are passed as parameter to yylex, and where some instance variable are local to yyparse (like yydebug...). Not very useful, since you can create multiple instances for reentering another parser.
+ -{"\%header\{"} Like "\%\{", but include this text both in the header, and in the code. End with "\%\}". When put in declaration section, the text is added before the definitions. It can be put in the last section so that the text is added after all definition in the header, and in the last section at the current position in the code.
+ Note that the order of these declaration is important, since they are translated into preprocessor sympols, typedef or code depending on their type. For example use "\%name" before any "\%define", since the name is needed to compose the name of the define symbols. Order of "\%header" and "\%union" is important, since type may be undefined.
+:SECTION "DECLARATION DEFINE SYMBOLS"
+ These are the symbols you can define with "\%define" in declaration section, or that are already defined. Remind that they are replaced by a preprocessor "#define YY_"'<parser_name>'"_"'<name>.
+ -{"BISON"} defined to "1" in the code. used for conditional code. Don't redefine it.
+ -{"h_included"} defined in the code, and in the header. used for include anti-reload. Don't redefine it.
+ -{"COMPATIBILITY"} Indicate if obsoleted defines are to be used and produced. If defined to 0, indicate no compatibility needed, else if defined to non-0, generate it. If it is undefined, default is to be compatible if classes are not used.
+ -{"USE_GOTO"} Indicates (if defined as 1) that "goto" are to be used (for backward compatibility) in the parser function. By default "goto" are replaced with a "switch" construction, to avoid problems with some compiler that don't support "goto" and destructor in the same function block. If "COMPATIBILITY" is 1, and "USE_GOTO" is not defined, then "USE_GOTO" is defined to 1, to be compatible with older bison.
+ -{"USE_CONST_TOKEN"} Indicate (if defined as 1) that "static const int" are to be used in C++, for token IDs. By default an enum is used to define the token IDs instead of const.
+ -{"ENUM_TOKEN"} When "enum" are used instead of "static const int" for token IDs, this symbol define the name of the enum type. Defined to "yy_"'<parser_name>'"_enum_token" by default.
+ -{"PURE"} Indicate that "\%pure_parser" is asked... Don't redefine it.
+ -{"LSP_NEEDED"} if defined indicate that @ construct is used, so "LLOC" stack is needed. Can be defined to force use of location stack.
+ -{"DEBUG"} if defined to non-0 activate debugging code. See" YYDEBUG" in bison.
+ -{"ERROR_VERBOSE"} if defined activate dump parser stack when error append.
+ -{"STYPE"} the type of the semantic value of token. defined by "\%union". default is "int". See "YYSTYPE" in bison. Don't redefine it, if you use a "\%union".
+ -{"LTYPE"} The token location type. If needed default is "yyltype". See "YYLTYPE" in bison. default "yyltype" is a typedef and struct defined as in old bison.
+ -{"LLOC"} The token location variable name. If needed, default is "yylloc". See "yylloc" in bison.
+ -{"LVAL"} The token semantic value variable name. Default "yylval". See "yylval" in bison.
+ -{"CHAR"} The lookahead token value variable name. Default "yychar". See "yychar" in bison.
+ -{"LEX"} The scanner function name. Default "yylex". See "yylex" in bison.
+ -{"PARSE"} The parser function name. Default "yyparse". See "yyparse" in bison.
+ -{"PARSE_PARAM"} The parser function parameters declaration. Default "void" in C++ or ANSIC, nothing if old C. In ANSIC and C++ contain the prototype. In old-C comtaim just the list of parameters name. Don't allows default value.
+ -{"PARSE_PARAM_DEF"} The parser function parameters definition, for old style C. Default nothing. For example to use an "int" parameter called "x", PARSE_PARAM is "x", and PARSE_PARAM_DEF is "int x;". In ANSIC or C++ it is unuseful and ignored.
+ -{"ERROR"} The error function name. Default "yyerror". See "yyerror" in bison.
+ -{"NERRS"} The error count name. Default "yynerrs". See "yynerrs" in bison.
+ -{"DEBUG_FLAG"} The runtime debug flag. Default "yydebug". See "yydebug" in bison.
+ These are only used if class is generated.
+ -{"CLASS"} The class name. default is the parser name.
+ -{"INHERIT"} The inheritance list. Don't forget the ":" before, if not empty list.
+ -{"MEMBERS"} List of members to add to the class definition, before ending it.
+ -{"LEX_BODY"} The scanner member function boby. May be defined to "=0" for pure function, or to an inline body.
+ -{"ERROR_BODY"} The error member function boby. May be defined to "=0" for pure function, or to an inline body.
+ -{"CONSTRUCTOR_PARAM"} List of parameters of the constructor. Dont allows default value.
+ -{"CONSTRUCTOR_INIT"} List of initialisation befor constructor call. If not empty dont't forget the ":" before list of initialisation.
+ -{"CONSTRUCTOR_CODE"} Code added after internal initialisation in constructor.
+:SECTION "OBSOLETED PREPROCESSOR SYMBOLS"
+ if you use new features, the folowing symbols should not be used, though they are proposed. The symbol "COMPATIBILITY" control their disponibility. Incoherence may arise if they are defined simultaneously with the new symbol.
+ -{"YYLTYPE"} prefer "\%define LTYPE".
+ -{"YYSTYPE"} prefer "\%define STYPE".
+ -{"YYDEBUG"} prefer "\%define DEBUG".
+ -{"YYERROR_VERBOSE"} prefer "\%define ERROR_VERBOSE".
+ -{"YYLSP_NEEDED"} prefer "\%define LSP_NEEDED".
+ -{"yystype"} Now a preprocessor symbol instead of a typedef. prefer "yy_"'<parser_name>'"_stype".
+:SECTION "CONSERVED PREPROCESSOR SYMBOLS"
+ These symbols are kept, and cannot be defined elsewhere, since they control private parameters of the generated parser, or are actually unused. You can "#define" them to the value you need, or indirectly to the name of a "\%define" generated symbol if you want to be clean.
+ -{"YYINITDEPTH"} initial stack depth.
+ -{"YYMAXDEPTH"} stack overflow limit depth.
+ -{"yyoverflow"} instead of expand with alloca, realloc manualy or raise error.
+:SECTION "OTHER ADDED PREPROCESSOR SYMBOLS"
+ -{"YY_USE_CLASS"} indicate that class will be produced. Default if C++.
+:SECTION "C++ CLASS GENERATED"
+ To simplify the notation, we note "\%SYMBOLNAME" the preprocessor symbol generated with a "\%define" of this name. In fact see the use of "\%define" for it's real name.
+ Note that there is sometime symbols that differ from only an underscore "_", like "yywrap" and "yy_wrap". They are much different. In this case "yy_wrap()" is a virtual member function, and "yywrap()" is a macro.
+ :SSECTION "General Class declaration"
+ class \%CLASS \%INHERIT
+ \{
+ public:
+ #if \%USE_CONST_TOKEN != 0
+ static const TOKEN_NEXT;
+ static const AND_SO_ON;
+ // ...
+ #else
+ enum \%ENUM_TOKEN \{ \%NULL_TOKEN
+ > ,TOKEN_FIRST=256
+ > ,TOKEN_NEXT=257
+ > ,AND_SO_ON=258
+ \} ;
+ // ...
+ #endif
+ public:
+ int \%PARSE (\%PARSE_PARAM);
+ virtual void \%ERROR(char *msg) \%ERROR_BODY;
+ #ifdef \%PURE
+ >// if \%PURE , we must pass the value and (eventually) the location explicitely
+ >#ifdef \%LSP_NEEDED
+ >// if and only if \%LSP_NEEDED , we must pass the location explicitely
+ >virtual int \%LEX (\%STYPE *\%LVAL,\%LTYPE *\%LLOC) \%LEX_BODY;
+ >#else
+ >virtual int \%LEX (\%STYPE *\%LVAL) \%LEX_BODY;
+ >#endif
+ #else
+ >// if not \%PURE , we must declare member to store the value and (eventually) the location explicitely
+ >// if not \%PURE ,\%NERRS and \%CHAR are not local variable to \%PARSE, so must be member
+ >virtual int \%LEX() \%LEX_BODY;
+ >\%STYPE \%LVAL;
+ >#ifdef \%LSP_NEEDED
+ >\%LTYPE \%LLOC;
+ >#endif
+ >int \%NERRS;
+ >int \%CHAR;
+ #endif
+ #if \%DEBUG != 0
+ int \%DEBUG_FLAG; /* nonzero means print parse trace */
+ #endif
+ public:
+ \%CLASS(\%CONSTRUCTOR_PARAM);
+ public:
+ \%MEMBERS
+ \};
+ // here are defined the token constants
+ // for example:
+ #if \%USE_CONST_TOKEN != 0
+ >const \%CLASS::TOKEN_FIRST=1;
+ >...
+ #endif
+ // here is the construcor
+ \%CLASS::\%CLASS(\%CONSTRUCTOR_PARAM) \%CONSTRUCTOR_INIT
+ \{
+ #if \%DEBUG != 0
+ \%DEBUG_FLAG=0;
+ #endif
+ \%CONSTRUCTOR_CODE;
+ \};
+ :SSECTION "Default Class declaration"
+ // Here is the default declaration made in the header when you \%define nothing
+ // typical yyltype
+ typedef struct yyltype
+ \{
+ int timestamp;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ char *text;
+ \} yyltype;
+ // class definition
+ class parser
+ \{
+ public:
+ enum yy_parser_enum_token \{ YY_parser_NULL_TOKEN
+ > ,TOKEN_FIRST=256
+ > ,TOKEN_NEXT=257
+ > ,AND_SO_ON=258
+ \} ;
+ // ...
+ public:
+ int yyparse (yyparse_PARAM);
+ virtual void yyerror(char *msg) ;
+ #ifdef YY_parser_PURE
+ >#ifdef YY_parser_LSP_NEEDED
+ >virtual int yylex (int *yylval,yyltype *yylloc) ;
+ >#else
+ >virtual int yylex (int *yylval) ;
+ >#endif
+ #else
+ >virtual int yylex() \%LEX_BODY;
+ >int yylval;
+ >#ifdef YY_parser_LSP_NEEDED
+ >yyltype yylloc;
+ >#endif
+ >int yynerrs;
+ >int yychar;
+ #endif
+ #if YY_parser_DEBUG != 0
+ int yydebug;
+ #endif
+ public:
+ parser();
+ public:
+ \};
+ // here is the constructor code
+ parser::parser()
+ \{
+ #if YY_parser_DEBUG != 0
+ yydebug=0;
+ #endif
+ \};
+
+:SECTION "USAGE"
+ Should replace "bison", because it generate a far more customisable parser, still beeing compatible.
+ You should always use the header facility.
+ Use it with "flex++" (same author).
+:SECTION "EXEMPLES"
+ This man page has been produced through a parser made in C++ with this version of "bison" and our version of "flex++" (same author).
+:SECTION "FILES"
+ -{"bison.cc"} main skeleton.
+ -{"bison.h"} header skeleton.
+ -{"bison.hairy"} old main skeleton for semantic parser. Not adapted to this version. Kept for future works.
+:SECTION "ENVIRONNEMENT"
+:SECTION "DIAGNOSTICS"
+:SECTION "SEE ALSO"
+ "bison"(1), "bison.info" (use texinfo), "flex++"(1).
+:SECTION "DOCUMENTATION"
+:SECTION "BUGS"
+ Tell us more !
+ The "\%semantic_parser" is no more supported. If you want to use it, adapt the skeletons, and maybe "bison++" generator itself. The reason is that it seems unused, unuseful, not documented, and too complex for us to support. tell us if you use, need, or understand it.
+ Header is not included in the parser code. Change made in the generated header are not used in the parser code, even if you include it volontarily, since it is guarded against re-include. So don't modify it.
+ For the same reasons, if you modify the header skeleton, or the code skeleton, report the changes in the other skeleton if applicable. If not done, incoherent declarations may lead to unpredictable result.
+ Use of defines for "YYLTYPE", "YYSTYPE", "YYDEBUG" is supported for backward compatibility in C, but should not be used with new features, as "\%defines" or C++ classes. You can define them, and use them as with old "bison" in C only.
+ Parameters are richer than before, and nothing is removed. POSIX compliance can be enforced by not using extensions. If you want to forbide them, there is a good job !
+:SECTION "FUTUR WORKS"
+ tell us !
+ Support semantic parser. Is it really used ?
+ POSIX compliance. is'nt it good now ?
+ Use lex and yacc (flex/bison) to generate the scanner/parser. It would be comfortable for futur works, though very complicated. Who feel it good ?
+ "iostream" : this is a great demand. this work will be done as soon as possible. The virtual members permit such work still easily.
+
+:SECTION "INSTALLATION"
+ With this install the executable is named bison++. rename it bison if you want, because it could replace "bison".
+:SECTION "TESTS"
+:SECTION "AUTHORS"
+ Alain Coëtmeur (coetmeur@icdc.fr), R&D department (RDT) , Informatique-CDC, France.
+:SECTION "RESTRICTIONS"
+ The words 'author', and 'us' mean the author and colleages, not GNU. We don't have contacted GNU about this, nowaday. If you're in GNU, we are ready to propose it to you, and you may tell us what you think about.
+ Based on GNU version 1.21 of bison. Modified by the author.
diff --git a/tools/bison++/bison++.yacc b/tools/bison++/bison++.yacc
new file mode 100644
index 000000000..75102cb0d
--- /dev/null
+++ b/tools/bison++/bison++.yacc
@@ -0,0 +1,2 @@
+#!/bin/sh
+ exec bison -y "$@" \ No newline at end of file
diff --git a/tools/bison++/bison++.yacc.1 b/tools/bison++/bison++.yacc.1
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tools/bison++/bison++.yacc.1
diff --git a/tools/bison++/bison.1 b/tools/bison++/bison.1
new file mode 100644
index 000000000..4a7132340
--- /dev/null
+++ b/tools/bison++/bison.1
@@ -0,0 +1,279 @@
+.TH BISON 1 local
+.SH NAME
+bison \- GNU Project parser generator (yacc replacement)
+.SH SYNOPSIS
+.B bison
+[
+.BI \-b " file-prefix"
+] [
+.BI \-\-file-prefix= file-prefix
+] [
+.B \-d
+] [
+.B \-\-defines
+] [
+.B \-l
+] [
+.B \-\-no-lines
+] [
+.BI \-o " outfile"
+] [
+.BI \-\-output-file= outfile
+] [
+.BI \-p " prefix"
+] [
+.BI \-\-name-prefix= prefix
+] [
+.B \-t
+] [
+.B \-\-debug
+] [
+.B \-v
+] [
+.B \-\-verbose
+] [
+.B \-V
+] [
+.B \-\-version
+] [
+.B \-y
+] [
+.B \-\-yacc
+] [
+.B \-\-fixed-output-files
+]
+file
+.SH DESCRIPTION
+.I Bison
+is a parser generator in the style of
+.IR yacc (1).
+It should be upwardly compatible with input files designed
+for
+.IR yacc .
+.PP
+Input files should follow the
+.I yacc
+convention of ending in
+.BR .y .
+Unlike
+.IR yacc ,
+the generated files do not have fixed names, but instead use the prefix
+of the input file.
+For instance, a grammar description file named
+.B parse.y
+would produce the generated parser in a file named
+.BR parse.tab.c ,
+instead of
+.IR yacc 's
+.BR y.tab.c .
+.PP
+This description of the options that can be given to
+.I bison
+is adapted from the node
+.B Invocation
+in the
+.B bison.texinfo
+manual, which should be taken as authoritative.
+.PP
+.I Bison
+supports both traditional single-letter options and mnemonic long
+option names. Long option names are indicated with
+.B \-\-
+instead of
+.BR \- .
+Abbreviations for option names are allowed as long as they
+are unique. When a long option takes an argument, like
+.BR \-\-file-prefix ,
+connect the option name and the argument with
+.BR = .
+.SS OPTIONS
+.TP
+.BI \-b " file-prefix"
+.br
+.ns
+.TP
+.BI \-\-file-prefix= file-prefix
+Specify a prefix to use for all
+.I bison
+output file names. The names are
+chosen as if the input file were named
+\fIfile-prefix\fB.c\fR.
+.TP
+.B \-d
+.br
+.ns
+.TP
+.B \-\-defines
+Write an extra output file containing macro definitions for the token
+type names defined in the grammar and the semantic value type
+.BR YYSTYPE ,
+as well as a few
+.B extern
+variable declarations.
+.sp
+If the parser output file is named
+\fIname\fB.c\fR
+then this file
+is named
+\fIname\fB.h\fR.
+.sp
+This output file is essential if you wish to put the definition of
+.B yylex
+in a separate source file, because
+.B yylex
+needs to be able to refer to token type codes and the variable
+.BR yylval .
+.TP
+.B \-l
+.br
+.ns
+.TP
+.B \-\-no-lines
+Don't put any
+.B #line
+preprocessor commands in the parser file.
+Ordinarily
+.I bison
+puts them in the parser file so that the C compiler
+and debuggers will associate errors with your source file, the
+grammar file. This option causes them to associate errors with the
+parser file, treating it an independent source file in its own right.
+.TP
+.BI \-o " outfile"
+.br
+.ns
+.TP
+.BI \-\-output-file= outfile
+Specify the name
+.I outfile
+for the parser file.
+.sp
+The other output files' names are constructed from
+.I outfile
+as described under the
+.B \-v
+and
+.B \-d
+switches.
+.TP
+.BI \-p " prefix"
+.br
+.ns
+.TP
+.BI \-\-name-prefix= prefix
+Rename the external symbols used in the parser so that they start with
+.I prefix
+instead of
+.BR yy .
+The precise list of symbols renamed is
+.BR yyparse ,
+.BR yylex ,
+.BR yyerror ,
+.BR yylval ,
+.BR yychar ,
+and
+.BR yydebug .
+.sp
+For example, if you use
+.BR "\-p c" ,
+the names become
+.BR cparse ,
+.BR clex ,
+and so on.
+.TP
+.B \-t
+.br
+.ns
+.TP
+.B \-\-debug
+Output a definition of the macro
+.B YYDEBUG
+into the parser file,
+so that the debugging facilities are compiled.
+.TP
+.B \-v
+.br
+.ns
+.TP
+.B \-\-verbose
+Write an extra output file containing verbose descriptions of the
+parser states and what is done for each type of look-ahead token in
+that state.
+.sp
+This file also describes all the conflicts, both those resolved by
+operator precedence and the unresolved ones.
+.sp
+The file's name is made by removing
+.B .tab.c
+or
+.B .c
+from the parser output file name, and adding
+.B .output
+instead.
+.sp
+Therefore, if the input file is
+.BR foo.y ,
+then the parser file is called
+.B foo.tab.c
+by default. As a consequence, the verbose
+output file is called
+.BR foo.output .
+.TP
+.B \-V
+.br
+.ns
+.TP
+.B \-\-version
+Print the version number of
+.IR bison .
+.TP
+.B \-y
+.br
+.ns
+.TP
+.B \-\-yacc
+.br
+.ns
+.TP
+.B \-\-fixed-output-files
+Equivalent to
+.BR "\-o y.tab.c" ;
+the parser output file is called
+.BR y.tab.c ,
+and the other outputs are called
+.B y.output
+and
+.BR y.tab.h .
+The purpose of this switch is to imitate
+.IR yacc 's
+output file name conventions.
+Thus, the following shell script can substitute for
+.IR yacc :
+.sp
+.RS
+.ft B
+bison \-y $*
+.ft R
+.sp
+.RE
+.PP
+The long-named options can be introduced with `+' as well as `\-\-',
+for compatibility with previous releases. Eventually support for `+'
+will be removed, because it is incompatible with the POSIX.2 standard.
+.SH FILES
+/usr/local/lib/bison.simple simple parser
+.br
+/usr/local/lib/bison.hairy complicated parser
+.SH SEE ALSO
+.IR yacc (1)
+.br
+The
+.IR "Bison Reference Manual" ,
+included as the file
+.B bison.texinfo
+in the
+.I bison
+source distribution.
+.SH DIAGNOSTICS
+Self explanatory.
+
diff --git a/tools/bison++/bison.cc b/tools/bison++/bison.cc
new file mode 100644
index 000000000..7611087b4
--- /dev/null
+++ b/tools/bison++/bison.cc
@@ -0,0 +1,1040 @@
+/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
+/* Skeleton output parser for bison,
+ Copyright (C) 1984, 1989, 1990 Bob Corbett and Richard Stallman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ As a special exception, when this file is copied by Bison++ into a
+ Bison++ output file, you may use that output file without restriction.
+ This special exception was added by the Free Software Foundation
+ in version 1.24 of Bison, and has been in Bison++ since 1.21.9.
+
+*/
+
+/* HEADER SECTION */
+#if defined( _MSDOS ) || defined(MSDOS) || defined(__MSDOS__)
+ #define __MSDOS_AND_ALIKE
+#endif
+
+#if defined(_WINDOWS) && defined(_MSC_VER)
+ #define __HAVE_NO_ALLOCA
+ #define __MSDOS_AND_ALIKE
+#endif
+
+#ifndef alloca
+ #if defined( __GNUC__)
+ #define alloca __builtin_alloca
+
+ #elif (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi)
+ #include <alloca.h>
+
+ #elif defined (__MSDOS_AND_ALIKE)
+ #include <malloc.h>
+ #ifndef __TURBOC__
+ /* MS C runtime lib */
+ #define alloca _alloca
+ #endif
+
+ #elif defined(_AIX)
+ /* pragma must be put before any C/C++ instruction !! */
+ #pragma alloca
+ #include <malloc.h>
+
+ #elif defined(__hpux)
+ #ifdef __cplusplus
+ extern "C" {
+ void *alloca (unsigned int);
+ };
+ #else /* not __cplusplus */
+ void *alloca ();
+ #endif /* not __cplusplus */
+
+ #endif /* not _AIX not MSDOS, or __TURBOC__ or _AIX, not sparc. */
+#endif /* alloca not defined. */
+
+#ifdef c_plusplus
+ #ifndef __cplusplus
+ #define __cplusplus
+ #endif
+#endif
+
+#ifdef __cplusplus
+ #ifndef YY_USE_CLASS
+/*#warning "For C++ its recomended to use bison++, otherwise classes won't be generated"*/
+ #endif
+#else
+ #ifndef __STDC__
+ #define const
+ #endif
+ #ifdef YY_USE_CLASS
+ #error "This is a C++ header generated by bison++, please use a C++ compiler!"
+ #endif
+#endif
+
+#include <stdio.h>
+#define YYBISON 1
+$/* %{ and %header{ and %union, during decl */
+#define YY_@_BISON 1
+
+#ifndef YY_@_COMPATIBILITY
+ #ifndef YY_USE_CLASS
+ #define YY_@_COMPATIBILITY 1
+ #else
+ #define YY_@_COMPATIBILITY 0
+ #endif
+#endif
+
+#if YY_@_COMPATIBILITY != 0
+ /* backward compatibility */
+ #ifdef YYLTYPE
+ #ifndef YY_@_LTYPE
+ #define YY_@_LTYPE YYLTYPE
+ #endif
+ #endif
+/* Testing alternative bison solution
+ /#ifdef YYSTYPE*/
+#ifndef YY_@_STYPE
+ #define YY_@_STYPE YYSTYPE
+#endif
+/*#endif*/
+ #ifdef YYDEBUG
+ #ifndef YY_@_DEBUG
+ #define YY_@_DEBUG YYDEBUG
+ #endif
+ #endif
+
+ /* use goto to be compatible */
+ #ifndef YY_@_USE_GOTO
+ #define YY_@_USE_GOTO 1
+ #endif
+#endif
+
+/* use no goto to be clean in C++ */
+#ifndef YY_@_USE_GOTO
+ #define YY_@_USE_GOTO 0
+#endif
+
+#ifndef YY_@_PURE
+$/* YY_@_PURE */
+#endif
+
+/* section apres lecture def, avant lecture grammaire S2 */
+$/* prefix */
+#ifndef YY_@_DEBUG
+$/* YY_@_DEBUG */
+#endif
+
+
+#ifndef YY_@_LSP_NEEDED
+$ /* YY_@_LSP_NEEDED*/
+#endif
+
+
+
+/* DEFAULT LTYPE*/
+#ifdef YY_@_LSP_NEEDED
+#ifndef YY_@_LTYPE
+#ifndef BISON_YYLTYPE_ISDECLARED
+#define BISON_YYLTYPE_ISDECLARED
+typedef
+ struct yyltype
+ {
+ int timestamp;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ char *text;
+ }
+ yyltype;
+
+#endif
+#define YY_@_LTYPE yyltype
+#endif
+#endif
+/* DEFAULT STYPE*/
+ /* We used to use `unsigned long' as YY_@_STYPE on MSDOS,
+ but it seems better to be consistent.
+ Most programs should declare their own type anyway. */
+
+#ifndef YY_@_STYPE
+#define YY_@_STYPE int
+#endif
+/* DEFAULT MISCELANEOUS */
+#ifndef YY_@_PARSE
+#define YY_@_PARSE yyparse
+#endif
+#ifndef YY_@_LEX
+#define YY_@_LEX yylex
+#endif
+#ifndef YY_@_LVAL
+#define YY_@_LVAL yylval
+#endif
+#ifndef YY_@_LLOC
+#define YY_@_LLOC yylloc
+#endif
+#ifndef YY_@_CHAR
+#define YY_@_CHAR yychar
+#endif
+#ifndef YY_@_NERRS
+#define YY_@_NERRS yynerrs
+#endif
+#ifndef YY_@_DEBUG_FLAG
+#define YY_@_DEBUG_FLAG yydebug
+#endif
+#ifndef YY_@_ERROR
+#define YY_@_ERROR yyerror
+#endif
+
+#ifndef YY_@_PARSE_PARAM
+ #ifndef YY_USE_CLASS
+ #ifdef YYPARSE_PARAM
+ #define YY_@_PARSE_PARAM void* YYPARSE_PARAM
+ #else
+ #ifndef __STDC__
+ #ifndef __cplusplus
+ #define YY_@_PARSE_PARAM
+ #endif
+ #endif
+ #endif
+ #endif
+ #ifndef YY_@_PARSE_PARAM
+ #define YY_@_PARSE_PARAM void
+ #endif
+#endif
+
+#if YY_@_COMPATIBILITY != 0
+/* backward compatibility */
+#ifdef YY_@_LTYPE
+#ifndef YYLTYPE
+#define YYLTYPE YY_@_LTYPE
+#else
+/* WARNING obsolete !!! user defined YYLTYPE not reported into generated header */
+#endif
+#endif
+
+/* Removed due to bison compabilityproblems
+/#ifndef YYSTYPE
+/#define YYSTYPE YY_@_STYPE
+/#else*/
+/* WARNING obsolete !!! user defined YYSTYPE not reported into generated header */
+/*#endif*/
+
+#ifdef YY_@_PURE
+# ifndef YYPURE
+# define YYPURE YY_@_PURE
+# endif
+#endif
+
+#ifdef YY_@_DEBUG
+# ifndef YYDEBUG
+# define YYDEBUG YY_@_DEBUG
+# endif
+#endif
+
+#ifndef YY_@_ERROR_VERBOSE
+ #ifdef YYERROR_VERBOSE
+ #define YY_@_ERROR_VERBOSE YYERROR_VERBOSE
+ #endif
+#endif
+
+#ifndef YY_@_LSP_NEEDED
+# ifdef YYLSP_NEEDED
+# define YY_@_LSP_NEEDED YYLSP_NEEDED
+# endif
+#endif
+
+#endif
+
+#ifndef YY_USE_CLASS
+/* TOKEN C */
+$ /* #defines tokens */
+#else
+/* CLASS */
+#ifndef YY_@_CLASS
+#define YY_@_CLASS @
+#endif
+#ifndef YY_@_INHERIT
+#define YY_@_INHERIT
+#endif
+#ifndef YY_@_MEMBERS
+#define YY_@_MEMBERS
+#endif
+#ifndef YY_@_LEX_BODY
+#define YY_@_LEX_BODY
+#endif
+#ifndef YY_@_ERROR_BODY
+#define YY_@_ERROR_BODY
+#endif
+#ifndef YY_@_CONSTRUCTOR_PARAM
+#define YY_@_CONSTRUCTOR_PARAM
+#endif
+#ifndef YY_@_CONSTRUCTOR_CODE
+#define YY_@_CONSTRUCTOR_CODE
+#endif
+#ifndef YY_@_CONSTRUCTOR_INIT
+#define YY_@_CONSTRUCTOR_INIT
+#endif
+/* choose between enum and const */
+#ifndef YY_@_USE_CONST_TOKEN
+#define YY_@_USE_CONST_TOKEN 0
+/* yes enum is more compatible with flex, */
+/* so by default we use it */
+#endif
+#if YY_@_USE_CONST_TOKEN != 0
+#ifndef YY_@_ENUM_TOKEN
+#define YY_@_ENUM_TOKEN yy_@_enum_token
+#endif
+#endif
+
+class YY_@_CLASS YY_@_INHERIT
+{
+public:
+#if YY_@_USE_CONST_TOKEN != 0
+/* static const int token ... */
+$ /* decl const */
+#else
+enum YY_@_ENUM_TOKEN { YY_@_NULL_TOKEN=0
+$ /* enum token */
+ }; /* end of enum declaration */
+#endif
+public:
+ int YY_@_PARSE (YY_@_PARSE_PARAM);
+ virtual void YY_@_ERROR(char *msg) YY_@_ERROR_BODY;
+#ifdef YY_@_PURE
+#ifdef YY_@_LSP_NEEDED
+ virtual int YY_@_LEX (YY_@_STYPE *YY_@_LVAL,YY_@_LTYPE *YY_@_LLOC) YY_@_LEX_BODY;
+#else
+ virtual int YY_@_LEX (YY_@_STYPE *YY_@_LVAL) YY_@_LEX_BODY;
+#endif
+#else
+ virtual int YY_@_LEX() YY_@_LEX_BODY;
+ YY_@_STYPE YY_@_LVAL;
+#ifdef YY_@_LSP_NEEDED
+ YY_@_LTYPE YY_@_LLOC;
+#endif
+ int YY_@_NERRS;
+ int YY_@_CHAR;
+#endif
+#if YY_@_DEBUG != 0
+ int YY_@_DEBUG_FLAG; /* nonzero means print parse trace */
+#endif
+public:
+ YY_@_CLASS(YY_@_CONSTRUCTOR_PARAM);
+public:
+ YY_@_MEMBERS
+};
+/* other declare folow */
+#if YY_@_USE_CONST_TOKEN != 0
+$ /* const YY_@_CLASS::token */
+#endif
+/*apres const */
+YY_@_CLASS::YY_@_CLASS(YY_@_CONSTRUCTOR_PARAM) YY_@_CONSTRUCTOR_INIT
+{
+#if YY_@_DEBUG != 0
+YY_@_DEBUG_FLAG=0;
+#endif
+YY_@_CONSTRUCTOR_CODE;
+};
+#endif
+$ /* fattrs + tables */
+
+/* parser code folow */
+
+
+/* This is the parser code that is written into each bison parser
+ when the %semantic_parser declaration is not specified in the grammar.
+ It was written by Richard Stallman by simplifying the hairy parser
+ used when %semantic_parser is specified. */
+
+/* Note: dollar marks section change
+ the next is replaced by the list of actions, each action
+ as one case of the switch. */
+
+#if YY_@_USE_GOTO != 0
+/*
+ SUPRESSION OF GOTO : on some C++ compiler (sun c++)
+ the goto is strictly forbidden if any constructor/destructor
+ is used in the whole function (very stupid isn't it ?)
+ so goto are to be replaced with a 'while/switch/case construct'
+ here are the macro to keep some apparent compatibility
+*/
+#define YYGOTO(lb) {yy_gotostate=lb;continue;}
+#define YYBEGINGOTO enum yy_labels yy_gotostate=yygotostart; \
+ for(;;) switch(yy_gotostate) { case yygotostart: {
+#define YYLABEL(lb) } case lb: {
+#define YYENDGOTO } }
+#define YYBEGINDECLARELABEL enum yy_labels {yygotostart
+#define YYDECLARELABEL(lb) ,lb
+#define YYENDDECLARELABEL };
+#else
+/* macro to keep goto */
+#define YYGOTO(lb) goto lb
+#define YYBEGINGOTO
+#define YYLABEL(lb) lb:
+#define YYENDGOTO
+#define YYBEGINDECLARELABEL
+#define YYDECLARELABEL(lb)
+#define YYENDDECLARELABEL
+#endif
+/* LABEL DECLARATION */
+YYBEGINDECLARELABEL
+ YYDECLARELABEL(yynewstate)
+ YYDECLARELABEL(yybackup)
+/* YYDECLARELABEL(yyresume) */
+ YYDECLARELABEL(yydefault)
+ YYDECLARELABEL(yyreduce)
+ YYDECLARELABEL(yyerrlab) /* here on detecting error */
+ YYDECLARELABEL(yyerrlab1) /* here on error raised explicitly by an action */
+ YYDECLARELABEL(yyerrdefault) /* current state does not do anything special for the error token. */
+ YYDECLARELABEL(yyerrpop) /* pop the current state because it cannot handle the error token */
+ YYDECLARELABEL(yyerrhandle)
+YYENDDECLARELABEL
+/* ALLOCA SIMULATION */
+/* __HAVE_NO_ALLOCA */
+#ifdef __HAVE_NO_ALLOCA
+int __alloca_free_ptr(char *ptr,char *ref)
+{if(ptr!=ref) free(ptr);
+ return 0;}
+
+#define __ALLOCA_alloca(size) malloc(size)
+#define __ALLOCA_free(ptr,ref) __alloca_free_ptr((char *)ptr,(char *)ref)
+
+#ifdef YY_@_LSP_NEEDED
+#define __ALLOCA_return(num) \
+ do { return( __ALLOCA_free(yyss,yyssa)+\
+ __ALLOCA_free(yyvs,yyvsa)+\
+ __ALLOCA_free(yyls,yylsa)+\
+ (num)); } while(0)
+#else
+#define __ALLOCA_return(num) \
+ do { return( __ALLOCA_free(yyss,yyssa)+\
+ __ALLOCA_free(yyvs,yyvsa)+\
+ (num)); } while(0)
+#endif
+#else
+#define __ALLOCA_return(num) do { return(num); } while(0)
+#define __ALLOCA_alloca(size) alloca(size)
+#define __ALLOCA_free(ptr,ref)
+#endif
+
+/* ENDALLOCA SIMULATION */
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (YY_@_CHAR = YYEMPTY)
+#define YYEMPTY -2
+#define YYEOF 0
+#define YYACCEPT __ALLOCA_return(0)
+#define YYABORT __ALLOCA_return(1)
+#define YYERROR YYGOTO(yyerrlab1)
+/* Like YYERROR except do call yyerror.
+ This remains here temporarily to ease the
+ transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+#define YYFAIL YYGOTO(yyerrlab)
+#define YYRECOVERING() (!!yyerrstatus)
+#define YYBACKUP(token, value) \
+do \
+ if (YY_@_CHAR == YYEMPTY && yylen == 1) \
+ { YY_@_CHAR = (token), YY_@_LVAL = (value); \
+ yychar1 = YYTRANSLATE (YY_@_CHAR); \
+ YYPOPSTACK; \
+ YYGOTO(yybackup); \
+ } \
+ else \
+ { YY_@_ERROR ("syntax error: cannot back up"); YYERROR; } \
+while (0)
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+#ifndef YY_@_PURE
+/* UNPURE */
+#define YYLEX YY_@_LEX()
+#ifndef YY_USE_CLASS
+/* If nonreentrant, and not class , generate the variables here */
+int YY_@_CHAR; /* the lookahead symbol */
+YY_@_STYPE YY_@_LVAL; /* the semantic value of the */
+ /* lookahead symbol */
+int YY_@_NERRS; /* number of parse errors so far */
+#ifdef YY_@_LSP_NEEDED
+YY_@_LTYPE YY_@_LLOC; /* location data for the lookahead */
+ /* symbol */
+#endif
+#endif
+
+
+#else
+/* PURE */
+#ifdef YY_@_LSP_NEEDED
+#define YYLEX YY_@_LEX(&YY_@_LVAL, &YY_@_LLOC)
+#else
+#define YYLEX YY_@_LEX(&YY_@_LVAL)
+#endif
+#endif
+#ifndef YY_USE_CLASS
+#if YY_@_DEBUG != 0
+int YY_@_DEBUG_FLAG; /* nonzero means print parse trace */
+/* Since this is uninitialized, it does not stop multiple parsers
+ from coexisting. */
+#endif
+#endif
+
+
+
+/* YYINITDEPTH indicates the initial size of the parser's stacks */
+
+#ifndef YYINITDEPTH
+#define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH is the maximum size the stacks can grow to
+ (effective only if the built-in stack extension method is used). */
+
+#if YYMAXDEPTH == 0
+#undef YYMAXDEPTH
+#endif
+
+#ifndef YYMAXDEPTH
+#define YYMAXDEPTH 10000
+#endif
+
+
+#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */
+#define __yy_bcopy(FROM,TO,COUNT) __builtin_memcpy(TO,FROM,COUNT)
+#else /* not GNU C or C++ */
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+
+#ifdef __cplusplus
+static void __yy_bcopy (char *from, char *to, int count)
+#else
+#ifdef __STDC__
+static void __yy_bcopy (char *from, char *to, int count)
+#else
+static void __yy_bcopy (from, to, count)
+ char *from;
+ char *to;
+ int count;
+#endif
+#endif
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+#endif
+
+
+int
+#ifdef YY_USE_CLASS
+ YY_@_CLASS::
+#endif
+ YY_@_PARSE(YY_@_PARSE_PARAM)
+#ifndef __STDC__
+#ifndef __cplusplus
+#ifndef YY_USE_CLASS
+/* parameter definition without protypes */
+YY_@_PARSE_PARAM_DEF
+#endif
+#endif
+#endif
+{
+ register int yystate;
+ register int yyn;
+ register short *yyssp;
+ register YY_@_STYPE *yyvsp;
+ int yyerrstatus; /* number of tokens to shift before error messages enabled */
+ int yychar1=0; /* lookahead token as an internal (translated) token number */
+
+ short yyssa[YYINITDEPTH]; /* the state stack */
+ YY_@_STYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */
+
+ short *yyss = yyssa; /* refer to the stacks thru separate pointers */
+ YY_@_STYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */
+
+#ifdef YY_@_LSP_NEEDED
+ YY_@_LTYPE yylsa[YYINITDEPTH]; /* the location stack */
+ YY_@_LTYPE *yyls = yylsa;
+ YY_@_LTYPE *yylsp;
+
+#define YYPOPSTACK (yyvsp--, yyssp--, yylsp--)
+#else
+#define YYPOPSTACK (yyvsp--, yyssp--)
+#endif
+
+ int yystacksize = YYINITDEPTH;
+
+#ifdef YY_@_PURE
+ int YY_@_CHAR;
+ YY_@_STYPE YY_@_LVAL;
+ int YY_@_NERRS;
+#ifdef YY_@_LSP_NEEDED
+ YY_@_LTYPE YY_@_LLOC;
+#endif
+#endif
+
+ YY_@_STYPE yyval; /* the variable used to return */
+ /* semantic values from the action */
+ /* routines */
+
+ int yylen;
+/* start loop, in which YYGOTO may be used. */
+YYBEGINGOTO
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Starting parse\n");
+#endif
+ yystate = 0;
+ yyerrstatus = 0;
+ YY_@_NERRS = 0;
+ YY_@_CHAR = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss - 1;
+ yyvsp = yyvs;
+#ifdef YY_@_LSP_NEEDED
+ yylsp = yyls;
+#endif
+
+/* Push a new state, which is found in yystate . */
+/* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks. */
+YYLABEL(yynewstate)
+
+ *++yyssp = yystate;
+
+ if (yyssp >= yyss + yystacksize - 1)
+ {
+ /* Give user a chance to reallocate the stack */
+ /* Use copies of these so that the &'s don't force the real ones into memory. */
+ YY_@_STYPE *yyvs1 = yyvs;
+ short *yyss1 = yyss;
+#ifdef YY_@_LSP_NEEDED
+ YY_@_LTYPE *yyls1 = yyls;
+#endif
+
+ /* Get the current used size of the three stacks, in elements. */
+ int size = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ /* Each stack pointer address is followed by the size of
+ the data in use in that stack, in bytes. */
+#ifdef YY_@_LSP_NEEDED
+ /* This used to be a conditional around just the two extra args,
+ but that might be undefined if yyoverflow is a macro. */
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yyls1, size * sizeof (*yylsp),
+ &yystacksize);
+#else
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yystacksize);
+#endif
+
+ yyss = yyss1; yyvs = yyvs1;
+#ifdef YY_@_LSP_NEEDED
+ yyls = yyls1;
+#endif
+#else /* no yyoverflow */
+ /* Extend the stack our own way. */
+ if (yystacksize >= YYMAXDEPTH)
+ {
+ YY_@_ERROR("parser stack overflow");
+ __ALLOCA_return(2);
+ }
+ yystacksize *= 2;
+ if (yystacksize > YYMAXDEPTH)
+ yystacksize = YYMAXDEPTH;
+ yyss = (short *) __ALLOCA_alloca (yystacksize * sizeof (*yyssp));
+ __yy_bcopy ((char *)yyss1, (char *)yyss, size * sizeof (*yyssp));
+ __ALLOCA_free(yyss1,yyssa);
+ yyvs = (YY_@_STYPE *) __ALLOCA_alloca (yystacksize * sizeof (*yyvsp));
+ __yy_bcopy ((char *)yyvs1, (char *)yyvs, size * sizeof (*yyvsp));
+ __ALLOCA_free(yyvs1,yyvsa);
+#ifdef YY_@_LSP_NEEDED
+ yyls = (YY_@_LTYPE *) __ALLOCA_alloca (yystacksize * sizeof (*yylsp));
+ __yy_bcopy ((char *)yyls1, (char *)yyls, size * sizeof (*yylsp));
+ __ALLOCA_free(yyls1,yylsa);
+#endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + size - 1;
+ yyvsp = yyvs + size - 1;
+#ifdef YY_@_LSP_NEEDED
+ yylsp = yyls + size - 1;
+#endif
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Stack size increased to %d\n", yystacksize);
+#endif
+
+ if (yyssp >= yyss + yystacksize - 1)
+ YYABORT;
+ }
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Entering state %d\n", yystate);
+#endif
+
+ YYGOTO(yybackup);
+YYLABEL(yybackup)
+
+/* Do appropriate processing given the current state. */
+/* Read a lookahead token if we need one and don't already have one. */
+/* YYLABEL(yyresume) */
+
+ /* First try to decide what to do without reference to lookahead token. */
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ YYGOTO(yydefault);
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* yychar is either YYEMPTY or YYEOF
+ or a valid token in external form. */
+
+ if (YY_@_CHAR == YYEMPTY)
+ {
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Reading a token: ");
+#endif
+ YY_@_CHAR = YYLEX;
+ }
+
+ /* Convert token to internal form (in yychar1) for indexing tables with */
+
+ if (YY_@_CHAR <= 0) /* This means end of input. */
+ {
+ yychar1 = 0;
+ YY_@_CHAR = YYEOF; /* Don't call YYLEX any more */
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Now at end of input.\n");
+#endif
+ }
+ else
+ {
+ yychar1 = YYTRANSLATE(YY_@_CHAR);
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ {
+ fprintf (stderr, "Next token is %d (%s", YY_@_CHAR, yytname[yychar1]);
+ /* Give the individual parser a way to print the precise meaning
+ of a token, for further debugging info. */
+#ifdef YYPRINT
+ YYPRINT (stderr, YY_@_CHAR, YY_@_LVAL);
+#endif
+ fprintf (stderr, ")\n");
+ }
+#endif
+ }
+
+ yyn += yychar1;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1)
+ YYGOTO(yydefault);
+
+ yyn = yytable[yyn];
+
+ /* yyn is what to do for this token type in this state.
+ Negative => reduce, -yyn is rule number.
+ Positive => shift, yyn is new state.
+ New state is final state => don't bother to shift,
+ just return success.
+ 0, or most negative number => error. */
+
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ YYGOTO(yyerrlab);
+ yyn = -yyn;
+ YYGOTO(yyreduce);
+ }
+ else if (yyn == 0)
+ YYGOTO(yyerrlab);
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the lookahead token. */
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Shifting token %d (%s), ", YY_@_CHAR, yytname[yychar1]);
+#endif
+
+ /* Discard the token being shifted unless it is eof. */
+ if (YY_@_CHAR != YYEOF)
+ YY_@_CHAR = YYEMPTY;
+
+ *++yyvsp = YY_@_LVAL;
+#ifdef YY_@_LSP_NEEDED
+ *++yylsp = YY_@_LLOC;
+#endif
+
+ /* count tokens shifted since error; after three, turn off error status. */
+ if (yyerrstatus) yyerrstatus--;
+
+ yystate = yyn;
+ YYGOTO(yynewstate);
+
+/* Do the default action for the current state. */
+YYLABEL(yydefault)
+
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ YYGOTO(yyerrlab);
+
+/* Do a reduction. yyn is the number of a rule to reduce with. */
+YYLABEL(yyreduce)
+ yylen = yyr2[yyn];
+ if (yylen > 0)
+ yyval = yyvsp[1-yylen]; /* implement default value of the action */
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ {
+ int i;
+
+ fprintf (stderr, "Reducing via rule %d (line %d), ",
+ yyn, yyrline[yyn]);
+
+ /* Print the symbols being reduced, and their result. */
+ for (i = yyprhs[yyn]; yyrhs[i] > 0; i++)
+ fprintf (stderr, "%s ", yytname[yyrhs[i]]);
+ fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]);
+ }
+#endif
+
+$ /* the action file gets copied in in place of this dollarsign */
+ yyvsp -= yylen;
+ yyssp -= yylen;
+#ifdef YY_@_LSP_NEEDED
+ yylsp -= yylen;
+#endif
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+ *++yyvsp = yyval;
+
+#ifdef YY_@_LSP_NEEDED
+ yylsp++;
+ if (yylen == 0)
+ {
+ yylsp->first_line = YY_@_LLOC.first_line;
+ yylsp->first_column = YY_@_LLOC.first_column;
+ yylsp->last_line = (yylsp-1)->last_line;
+ yylsp->last_column = (yylsp-1)->last_column;
+ yylsp->text = 0;
+ }
+ else
+ {
+ yylsp->last_line = (yylsp+yylen-1)->last_line;
+ yylsp->last_column = (yylsp+yylen-1)->last_column;
+ }
+#endif
+
+ /* Now "shift" the result of the reduction.
+ Determine what state that goes to,
+ based on the state we popped back to
+ and the rule number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTBASE] + *yyssp;
+ if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTBASE];
+
+ YYGOTO(yynewstate);
+
+YYLABEL(yyerrlab) /* here on detecting error */
+
+ if (! yyerrstatus)
+ /* If not already recovering from an error, report this error. */
+ {
+ ++YY_@_NERRS;
+
+#ifdef YY_@_ERROR_VERBOSE
+ yyn = yypact[yystate];
+
+ if (yyn > YYFLAG && yyn < YYLAST)
+ {
+ int size = 0;
+ char *msg;
+ int x, count;
+
+ count = 0;
+ /* Start X at -yyn if nec to avoid negative indexes in yycheck. */
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ size += strlen(yytname[x]) + 15, count++;
+ msg = (char *) malloc(size + 15);
+ if (msg != 0)
+ {
+ strcpy(msg, "parse error");
+
+ if (count < 5)
+ {
+ count = 0;
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ {
+ strcat(msg, count == 0 ? ", expecting `" : " or `");
+ strcat(msg, yytname[x]);
+ strcat(msg, "'");
+ count++;
+ }
+ }
+ YY_@_ERROR(msg);
+ free(msg);
+ }
+ else
+ YY_@_ERROR ("parse error; also virtual memory exceeded");
+ }
+ else
+#endif /* YY_@_ERROR_VERBOSE */
+ YY_@_ERROR("parse error");
+ }
+
+ YYGOTO(yyerrlab1);
+YYLABEL(yyerrlab1) /* here on error raised explicitly by an action */
+
+ if (yyerrstatus == 3)
+ {
+ /* if just tried and failed to reuse lookahead token after an error, discard it. */
+
+ /* return failure if at end of input */
+ if (YY_@_CHAR == YYEOF)
+ YYABORT;
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Discarding token %d (%s).\n", YY_@_CHAR, yytname[yychar1]);
+#endif
+
+ YY_@_CHAR = YYEMPTY;
+ }
+
+ /* Else will try to reuse lookahead token
+ after shifting the error token. */
+
+ yyerrstatus = 3; /* Each real token shifted decrements this */
+
+ YYGOTO(yyerrhandle);
+
+YYLABEL(yyerrdefault) /* current state does not do anything special for the error token. */
+
+#if 0
+ /* This is wrong; only states that explicitly want error tokens
+ should shift them. */
+ yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/
+ if (yyn) YYGOTO(yydefault);
+#endif
+
+YYLABEL(yyerrpop) /* pop the current state because it cannot handle the error token */
+
+ if (yyssp == yyss) YYABORT;
+ yyvsp--;
+ yystate = *--yyssp;
+#ifdef YY_@_LSP_NEEDED
+ yylsp--;
+#endif
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "Error: state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+YYLABEL(yyerrhandle)
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ YYGOTO(yyerrdefault);
+
+ yyn += YYTERROR;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR)
+ YYGOTO(yyerrdefault);
+
+ yyn = yytable[yyn];
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ YYGOTO(yyerrpop);
+ yyn = -yyn;
+ YYGOTO(yyreduce);
+ }
+ else if (yyn == 0)
+ YYGOTO(yyerrpop);
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+#if YY_@_DEBUG != 0
+ if (YY_@_DEBUG_FLAG)
+ fprintf(stderr, "Shifting error token, ");
+#endif
+
+ *++yyvsp = YY_@_LVAL;
+#ifdef YY_@_LSP_NEEDED
+ *++yylsp = YY_@_LLOC;
+#endif
+
+ yystate = yyn;
+ YYGOTO(yynewstate);
+/* end loop, in which YYGOTO may be used. */
+ YYENDGOTO
+}
+
+/* END */
+$ /* section 3 */
+
+/* AFTER END , NEVER READ !!! */
diff --git a/tools/bison++/bison.cld b/tools/bison++/bison.cld
new file mode 100644
index 000000000..d57216f9c
--- /dev/null
+++ b/tools/bison++/bison.cld
@@ -0,0 +1,18 @@
+!
+! VMS BISON command definition file
+!
+DEFINE VERB BISON
+ IMAGE GNU_BISON:[000000]BISON
+
+ PARAMETER P1,Label=BISON$INFILE,Prompt="File"
+ value(required,type=$infile)
+ QUALIFIER VERBOSE,Label=BISON$VERBOSE
+ QUALIFIER DEFINES,Label=BISON$DEFINES
+ QUALIFIER FIXED_OUTFILES,Label=BISON$FIXED_OUTFILES
+ qualifier nolines,Label=BISON$NOLINES
+ qualifier debug,Label=BISON$DEBUG
+ qualifier output,value(type=$outfile),Label=BISON$OUTPUT
+ qualifier version,label=BISON$VERSION
+ qualifier yacc,label=BISON$YACC
+ qualifier file_prefix,value(type=$outfile),label=BISON$FILE_PREFIX
+ qualifier name_prefix,value(type=$outfile),LABEL=BISON$NAME_PREFIX
diff --git a/tools/bison++/bison.h b/tools/bison++/bison.h
new file mode 100644
index 000000000..da4423726
--- /dev/null
+++ b/tools/bison++/bison.h
@@ -0,0 +1,270 @@
+/* before anything */
+#ifdef c_plusplus
+ #ifndef __cplusplus
+ #define __cplusplus
+ #endif
+#endif
+
+$ /* If we generate a class #define YY_USE_CLASS goes here*/
+
+#ifndef __cplusplus
+ #ifdef YY_USE_CLASS
+ #error "This is a C++ header generated by bison++, use a C++ compiler!"
+ #endif
+#else
+ #ifndef YY_USE_CLASS
+/* #warning "For C++ its recomended to use bison++, otherwise classes won't be generated"*/
+ #endif
+#endif
+
+#include <stdio.h>
+$ /* %{ and %header{ and %union, during decl */
+#ifndef YY_@_COMPATIBILITY
+ #ifndef YY_USE_CLASS
+ #define YY_@_COMPATIBILITY 1
+ #else
+ #define YY_@_COMPATIBILITY 0
+ #endif
+#endif
+
+#if YY_@_COMPATIBILITY != 0
+/* backward compatibility */
+ #ifdef YYLTYPE
+ #ifndef YY_@_LTYPE
+ #define YY_@_LTYPE YYLTYPE
+/* WARNING obsolete !!! user defined YYLTYPE not reported into generated header */
+/* use %define LTYPE */
+ #endif
+ #endif
+/*#ifdef YYSTYPE*/
+ #ifndef YY_@_STYPE
+ #define YY_@_STYPE YYSTYPE
+ /* WARNING obsolete !!! user defined YYSTYPE not reported into generated header */
+ /* use %define STYPE */
+ #endif
+/*#endif*/
+ #ifdef YYDEBUG
+ #ifndef YY_@_DEBUG
+ #define YY_@_DEBUG YYDEBUG
+ /* WARNING obsolete !!! user defined YYDEBUG not reported into generated header */
+ /* use %define DEBUG */
+ #endif
+ #endif
+ /* use goto to be compatible */
+ #ifndef YY_@_USE_GOTO
+ #define YY_@_USE_GOTO 1
+ #endif
+#endif
+
+/* use no goto to be clean in C++ */
+#ifndef YY_@_USE_GOTO
+ #define YY_@_USE_GOTO 0
+#endif
+
+#ifndef YY_@_PURE
+$/* YY_@_PURE */
+#endif
+
+$/* prefix */
+
+#ifndef YY_@_DEBUG
+$/* YY_@_DEBUG */
+#endif
+
+#ifndef YY_@_LSP_NEEDED
+$ /* YY_@_LSP_NEEDED*/
+#endif
+
+/* DEFAULT LTYPE*/
+#ifdef YY_@_LSP_NEEDED
+ #ifndef YY_@_LTYPE
+ #ifndef BISON_YYLTYPE_ISDECLARED
+ #define BISON_YYLTYPE_ISDECLARED
+typedef
+ struct yyltype
+ {
+ int timestamp;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ char *text;
+ }
+ yyltype;
+ #endif
+
+ #define YY_@_LTYPE yyltype
+ #endif
+#endif
+
+/* DEFAULT STYPE*/
+#ifndef YY_@_STYPE
+ #define YY_@_STYPE int
+#endif
+
+/* DEFAULT MISCELANEOUS */
+#ifndef YY_@_PARSE
+ #define YY_@_PARSE yyparse
+#endif
+
+#ifndef YY_@_LEX
+ #define YY_@_LEX yylex
+#endif
+
+#ifndef YY_@_LVAL
+ #define YY_@_LVAL yylval
+#endif
+
+#ifndef YY_@_LLOC
+ #define YY_@_LLOC yylloc
+#endif
+
+#ifndef YY_@_CHAR
+ #define YY_@_CHAR yychar
+#endif
+
+#ifndef YY_@_NERRS
+ #define YY_@_NERRS yynerrs
+#endif
+
+#ifndef YY_@_DEBUG_FLAG
+ #define YY_@_DEBUG_FLAG yydebug
+#endif
+
+#ifndef YY_@_ERROR
+ #define YY_@_ERROR yyerror
+#endif
+
+#ifndef YY_@_PARSE_PARAM
+ #ifndef __STDC__
+ #ifndef __cplusplus
+ #ifndef YY_USE_CLASS
+ #define YY_@_PARSE_PARAM
+ #ifndef YY_@_PARSE_PARAM_DEF
+ #define YY_@_PARSE_PARAM_DEF
+ #endif
+ #endif
+ #endif
+ #endif
+ #ifndef YY_@_PARSE_PARAM
+ #define YY_@_PARSE_PARAM void
+ #endif
+#endif
+
+/* TOKEN C */
+#ifndef YY_USE_CLASS
+
+ #ifndef YY_@_PURE
+ #ifndef yylval
+ extern YY_@_STYPE YY_@_LVAL;
+ #else
+ #if yylval != YY_@_LVAL
+ extern YY_@_STYPE YY_@_LVAL;
+ #else
+ #warning "Namespace conflict, disabling some functionality (bison++ only)"
+ #endif
+ #endif
+ #endif
+
+$ /* #defines token */
+/* after #define tokens, before const tokens S5*/
+#else
+ #ifndef YY_@_CLASS
+ #define YY_@_CLASS @
+ #endif
+
+ #ifndef YY_@_INHERIT
+ #define YY_@_INHERIT
+ #endif
+
+ #ifndef YY_@_MEMBERS
+ #define YY_@_MEMBERS
+ #endif
+
+ #ifndef YY_@_LEX_BODY
+ #define YY_@_LEX_BODY
+ #endif
+
+ #ifndef YY_@_ERROR_BODY
+ #define YY_@_ERROR_BODY
+ #endif
+
+ #ifndef YY_@_CONSTRUCTOR_PARAM
+ #define YY_@_CONSTRUCTOR_PARAM
+ #endif
+ /* choose between enum and const */
+ #ifndef YY_@_USE_CONST_TOKEN
+ #define YY_@_USE_CONST_TOKEN 0
+ /* yes enum is more compatible with flex, */
+ /* so by default we use it */
+ #endif
+ #if YY_@_USE_CONST_TOKEN != 0
+ #ifndef YY_@_ENUM_TOKEN
+ #define YY_@_ENUM_TOKEN yy_@_enum_token
+ #endif
+ #endif
+
+class YY_@_CLASS YY_@_INHERIT
+{
+public:
+ #if YY_@_USE_CONST_TOKEN != 0
+ /* static const int token ... */
+ $ /* decl const */
+ #else
+ enum YY_@_ENUM_TOKEN { YY_@_NULL_TOKEN=0
+ $ /* enum token */
+ }; /* end of enum declaration */
+ #endif
+public:
+ int YY_@_PARSE(YY_@_PARSE_PARAM);
+ virtual void YY_@_ERROR(char *msg) YY_@_ERROR_BODY;
+ #ifdef YY_@_PURE
+ #ifdef YY_@_LSP_NEEDED
+ virtual int YY_@_LEX(YY_@_STYPE *YY_@_LVAL,YY_@_LTYPE *YY_@_LLOC) YY_@_LEX_BODY;
+ #else
+ virtual int YY_@_LEX(YY_@_STYPE *YY_@_LVAL) YY_@_LEX_BODY;
+ #endif
+ #else
+ virtual int YY_@_LEX() YY_@_LEX_BODY;
+ YY_@_STYPE YY_@_LVAL;
+ #ifdef YY_@_LSP_NEEDED
+ YY_@_LTYPE YY_@_LLOC;
+ #endif
+ int YY_@_NERRS;
+ int YY_@_CHAR;
+ #endif
+ #if YY_@_DEBUG != 0
+ public:
+ int YY_@_DEBUG_FLAG; /* nonzero means print parse trace */
+ #endif
+public:
+ YY_@_CLASS(YY_@_CONSTRUCTOR_PARAM);
+public:
+ YY_@_MEMBERS
+};
+/* other declare folow */
+#endif
+
+
+#if YY_@_COMPATIBILITY != 0
+ /* backward compatibility */
+ /* Removed due to bison problems
+ /#ifndef YYSTYPE
+ / #define YYSTYPE YY_@_STYPE
+ /#endif*/
+
+ #ifndef YYLTYPE
+ #define YYLTYPE YY_@_LTYPE
+ #endif
+ #ifndef YYDEBUG
+ #ifdef YY_@_DEBUG
+ #define YYDEBUG YY_@_DEBUG
+ #endif
+ #endif
+
+#endif
+/* END */
+$ /* section 3 %header{ */
+ /* AFTER END , NEVER READ !!! */
+
+
diff --git a/tools/bison++/bison.hairy b/tools/bison++/bison.hairy
new file mode 100644
index 000000000..8ec07c210
--- /dev/null
+++ b/tools/bison++/bison.hairy
@@ -0,0 +1,341 @@
+/* header section */
+#include <stdio.h>
+#ifndef __STDC__
+#define const
+#endif
+
+
+$
+extern int timeclock;
+
+
+int yyerror; /* Yyerror and yycost are set by guards. */
+int yycost; /* If yyerror is set to a nonzero value by a */
+ /* guard, the reduction with which the guard */
+ /* is associated is not performed, and the */
+ /* error recovery mechanism is invoked. */
+ /* Yycost indicates the cost of performing */
+ /* the reduction given the attributes of the */
+ /* symbols. */
+
+
+/* YYMAXDEPTH indicates the size of the parser's state and value */
+/* stacks. */
+
+#ifndef YYMAXDEPTH
+#define YYMAXDEPTH 500
+#endif
+
+/* YYMAXRULES must be at least as large as the number of rules that */
+/* could be placed in the rule queue. That number could be determined */
+/* from the grammar and the size of the stack, but, as yet, it is not. */
+
+#ifndef YYMAXRULES
+#define YYMAXRULES 100
+#endif
+
+#ifndef YYMAXBACKUP
+#define YYMAXBACKUP 100
+#endif
+
+
+short yyss[YYMAXDEPTH]; /* the state stack */
+YYSTYPE yyvs[YYMAXDEPTH]; /* the semantic value stack */
+YYLTYPE yyls[YYMAXDEPTH]; /* the location stack */
+short yyrq[YYMAXRULES]; /* the rule queue */
+int yychar; /* the lookahead symbol */
+
+YYSTYPE yylval; /* the semantic value of the */
+ /* lookahead symbol */
+
+YYSTYPE yytval; /* the semantic value for the state */
+ /* at the top of the state stack. */
+
+YYSTYPE yyval; /* the variable used to return */
+ /* semantic values from the action */
+ /* routines */
+
+YYLTYPE yylloc; /* location data for the lookahead */
+ /* symbol */
+
+YYLTYPE yytloc; /* location data for the state at the */
+ /* top of the state stack */
+
+
+int yynunlexed;
+short yyunchar[YYMAXBACKUP];
+YYSTYPE yyunval[YYMAXBACKUP];
+YYLTYPE yyunloc[YYMAXBACKUP];
+
+short *yygssp; /* a pointer to the top of the state */
+ /* stack; only set during error */
+ /* recovery. */
+
+YYSTYPE *yygvsp; /* a pointer to the top of the value */
+ /* stack; only set during error */
+ /* recovery. */
+
+YYLTYPE *yyglsp; /* a pointer to the top of the */
+ /* location stack; only set during */
+ /* error recovery. */
+
+
+/* Yyget is an interface between the parser and the lexical analyzer. */
+/* It is costly to provide such an interface, but it avoids requiring */
+/* the lexical analyzer to be able to back up the scan. */
+
+yyget()
+{
+ if (yynunlexed > 0)
+ {
+ yynunlexed--;
+ yychar = yyunchar[yynunlexed];
+ yylval = yyunval[yynunlexed];
+ yylloc = yyunloc[yynunlexed];
+ }
+ else if (yychar <= 0)
+ yychar = 0;
+ else
+ {
+ yychar = yylex();
+ if (yychar < 0)
+ yychar = 0;
+ else yychar = YYTRANSLATE(yychar);
+ }
+}
+
+
+
+yyunlex(chr, val, loc)
+int chr;
+YYSTYPE val;
+YYLTYPE loc;
+{
+ yyunchar[yynunlexed] = chr;
+ yyunval[yynunlexed] = val;
+ yyunloc[yynunlexed] = loc;
+ yynunlexed++;
+}
+
+
+
+yyrestore(first, last)
+register short *first;
+register short *last;
+{
+ register short *ssp;
+ register short *rp;
+ register int symbol;
+ register int state;
+ register int tvalsaved;
+
+ ssp = yygssp;
+ yyunlex(yychar, yylval, yylloc);
+
+ tvalsaved = 0;
+ while (first != last)
+ {
+ symbol = yystos[*ssp];
+ if (symbol < YYNTBASE)
+ {
+ yyunlex(symbol, yytval, yytloc);
+ tvalsaved = 1;
+ ssp--;
+ }
+
+ ssp--;
+
+ if (first == yyrq)
+ first = yyrq + YYMAXRULES;
+
+ first--;
+
+ for (rp = yyrhs + yyprhs[*first]; symbol = *rp; rp++)
+ {
+ if (symbol < YYNTBASE)
+ state = yytable[yypact[*ssp] + symbol];
+ else
+ {
+ state = yypgoto[symbol - YYNTBASE] + *ssp;
+
+ if (state >= 0 && state <= YYLAST && yycheck[state] == *ssp)
+ state = yytable[state];
+ else
+ state = yydefgoto[symbol - YYNTBASE];
+ }
+
+ *++ssp = state;
+ }
+ }
+
+ if ( ! tvalsaved && ssp > yyss)
+ {
+ yyunlex(yystos[*ssp], yytval, yytloc);
+ ssp--;
+ }
+
+ yygssp = ssp;
+}
+
+
+
+int
+yyparse()
+{
+ register int yystate;
+ register int yyn;
+ register short *yyssp;
+ register short *yyrq0;
+ register short *yyptr;
+ register YYSTYPE *yyvsp;
+
+ int yylen;
+ YYLTYPE *yylsp;
+ short *yyrq1;
+ short *yyrq2;
+
+ yystate = 0;
+ yyssp = yyss - 1;
+ yyvsp = yyvs - 1;
+ yylsp = yyls - 1;
+ yyrq0 = yyrq;
+ yyrq1 = yyrq0;
+ yyrq2 = yyrq0;
+
+ yychar = yylex();
+ if (yychar < 0)
+ yychar = 0;
+ else yychar = YYTRANSLATE(yychar);
+
+yynewstate:
+
+ if (yyssp >= yyss + YYMAXDEPTH - 1)
+ {
+ yyabort("Parser Stack Overflow");
+ YYABORT;
+ }
+
+ *++yyssp = yystate;
+
+yyresume:
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yydefault;
+
+ yyn += yychar;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar)
+ goto yydefault;
+
+ yyn = yytable[yyn];
+ if (yyn < 0)
+ {
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrlab;
+
+ yystate = yyn;
+
+ yyptr = yyrq2;
+ while (yyptr != yyrq1)
+ {
+ yyn = *yyptr++;
+ yylen = yyr2[yyn];
+ yyvsp -= yylen;
+ yylsp -= yylen;
+
+ yyguard(yyn, yyvsp, yylsp);
+ if (yyerror)
+ goto yysemerr;
+
+ yyaction(yyn, yyvsp, yylsp);
+ *++yyvsp = yyval;
+
+ yylsp++;
+ if (yylen == 0)
+ {
+ yylsp->timestamp = timeclock;
+ yylsp->first_line = yytloc.first_line;
+ yylsp->first_column = yytloc.first_column;
+ yylsp->last_line = (yylsp-1)->last_line;
+ yylsp->last_column = (yylsp-1)->last_column;
+ yylsp->text = 0;
+ }
+ else
+ {
+ yylsp->last_line = (yylsp+yylen-1)->last_line;
+ yylsp->last_column = (yylsp+yylen-1)->last_column;
+ }
+
+ if (yyptr == yyrq + YYMAXRULES)
+ yyptr = yyrq;
+ }
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ yyrq2 = yyptr;
+ yyrq1 = yyrq0;
+
+ *++yyvsp = yytval;
+ *++yylsp = yytloc;
+ yytval = yylval;
+ yytloc = yylloc;
+ yyget();
+
+ goto yynewstate;
+
+yydefault:
+
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+
+yyreduce:
+
+ *yyrq0++ = yyn;
+
+ if (yyrq0 == yyrq + YYMAXRULES)
+ yyrq0 = yyrq;
+
+ if (yyrq0 == yyrq2)
+ {
+ yyabort("Parser Rule Queue Overflow");
+ YYABORT;
+ }
+
+ yyssp -= yyr2[yyn];
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTBASE] + *yyssp;
+ if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTBASE];
+
+ goto yynewstate;
+
+yysemerr:
+ *--yyptr = yyn;
+ yyrq2 = yyptr;
+ yyvsp += yyr2[yyn];
+
+yyerrlab:
+
+ yygssp = yyssp;
+ yygvsp = yyvsp;
+ yyglsp = yylsp;
+ yyrestore(yyrq0, yyrq2);
+ yyrecover();
+ yystate = *yygssp;
+ yyssp = yygssp;
+ yyvsp = yygvsp;
+ yyrq0 = yyrq;
+ yyrq1 = yyrq0;
+ yyrq2 = yyrq0;
+ goto yyresume;
+}
+
+$
diff --git a/tools/bison++/bison.info b/tools/bison++/bison.info
new file mode 100644
index 000000000..f34865a5c
--- /dev/null
+++ b/tools/bison++/bison.info
@@ -0,0 +1,132 @@
+This is bison.info, produced by makeinfo version 4.1 from bison.texinfo.
+
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+
+ This file documents the Bison parser generator.
+
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation, Inc.
+
+ Permission is granted to make and distribute verbatim copies of this
+manual provided the copyright notice and this permission notice are
+preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of
+this manual under the conditions for verbatim copying, provided also
+that the sections entitled "GNU General Public License" and "Conditions
+for Using Bison" are included exactly as in the original, and provided
+that the entire resulting derived work is distributed under the terms
+of a permission notice identical to this one.
+
+ Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that the sections entitled "GNU General Public
+License", "Conditions for Using Bison" and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+
+
+Indirect:
+bison.info-1: 1265
+bison.info-2: 50236
+bison.info-3: 98017
+bison.info-4: 146975
+bison.info-5: 195679
+
+Tag Table:
+(Indirect)
+Node: Top1265
+Node: Introduction8502
+Node: Conditions9777
+Node: Copying11243
+Node: Concepts30433
+Node: Language and Grammar31466
+Node: Grammar in Bison36482
+Node: Semantic Values38406
+Node: Semantic Actions40507
+Node: Bison Parser41690
+Node: Stages44000
+Node: Grammar Layout45283
+Node: Examples46540
+Node: RPN Calc47675
+Node: Rpcalc Decls48649
+Node: Rpcalc Rules50236
+Node: Rpcalc Input52036
+Node: Rpcalc Line53497
+Node: Rpcalc Expr54612
+Node: Rpcalc Lexer56557
+Node: Rpcalc Main59116
+Node: Rpcalc Error59494
+Node: Rpcalc Gen60498
+Node: Rpcalc Compile61646
+Node: Infix Calc62521
+Node: Simple Error Recovery65228
+Node: Multi-function Calc67114
+Node: Mfcalc Decl68681
+Node: Mfcalc Rules70704
+Node: Mfcalc Symtab72084
+Node: Exercises78326
+Node: Grammar File78832
+Node: Grammar Outline79600
+Node: C Declarations80334
+Node: Bison Declarations80914
+Node: Grammar Rules81326
+Node: C Code81786
+Node: Symbols82716
+Node: Rules87795
+Node: Recursion89433
+Node: Semantics91144
+Node: Value Type92241
+Node: Multiple Types92913
+Node: Actions93929
+Node: Action Types96714
+Node: Mid-Rule Actions98017
+Node: Declarations103586
+Node: Token Decl104905
+Node: Precedence Decl106901
+Node: Union Decl108452
+Node: Type Decl109296
+Node: Expect Decl110202
+Node: Start Decl111748
+Node: Pure Decl112126
+Node: Decl Summary113801
+Node: Multiple Parsers117315
+Node: Interface118809
+Node: Parser Function119681
+Node: Lexical120516
+Node: Calling Convention121922
+Node: Token Values124681
+Node: Token Positions125829
+Node: Pure Calling126721
+Node: Error Reporting129678
+Node: Action Features131802
+Node: Algorithm135463
+Node: Look-Ahead137756
+Node: Shift/Reduce139888
+Node: Precedence142800
+Node: Why Precedence143451
+Node: Using Precedence145306
+Node: Precedence Examples146274
+Node: How Precedence146975
+Node: Contextual Precedence148124
+Node: Parser States149915
+Node: Reduce/Reduce151158
+Node: Mystery Conflicts154719
+Node: Stack Overflow158105
+Node: Error Recovery159478
+Node: Context Dependency164614
+Node: Semantic Tokens165462
+Node: Lexical Tie-ins168479
+Node: Tie-in Recovery170027
+Node: Debugging172199
+Node: Invocation175549
+Node: Bison Options176212
+Node: Option Cross Key180325
+Node: VMS Invocation181207
+Node: Table of Symbols181991
+Node: Glossary189388
+Node: Index195679
+
+End Tag Table
diff --git a/tools/bison++/bison.info-1 b/tools/bison++/bison.info-1
new file mode 100644
index 000000000..3b458c6ed
--- /dev/null
+++ b/tools/bison++/bison.info-1
@@ -0,0 +1,1070 @@
+This is bison.info, produced by makeinfo version 4.1 from bison.texinfo.
+
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+
+ This file documents the Bison parser generator.
+
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation, Inc.
+
+ Permission is granted to make and distribute verbatim copies of this
+manual provided the copyright notice and this permission notice are
+preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of
+this manual under the conditions for verbatim copying, provided also
+that the sections entitled "GNU General Public License" and "Conditions
+for Using Bison" are included exactly as in the original, and provided
+that the entire resulting derived work is distributed under the terms
+of a permission notice identical to this one.
+
+ Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that the sections entitled "GNU General Public
+License", "Conditions for Using Bison" and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+
+
+File: bison.info, Node: Top, Next: Introduction, Prev: (dir), Up: (dir)
+
+ This manual documents version 2.21.5 of Bison.
+
+* Menu:
+
+* Introduction::
+* Conditions::
+* Copying:: The GNU General Public License says
+ how you can copy and share Bison
+
+Tutorial sections:
+* Concepts:: Basic concepts for understanding Bison.
+* Examples:: Three simple explained examples of using Bison.
+
+Reference sections:
+* Grammar File:: Writing Bison declarations and rules.
+* Interface:: C-language interface to the parser function `yyparse'.
+* Algorithm:: How the Bison parser works at run-time.
+* Error Recovery:: Writing rules for error recovery.
+* Context Dependency:: What to do if your language syntax is too
+ messy for Bison to handle straightforwardly.
+* Debugging:: Debugging Bison parsers that parse wrong.
+* Invocation:: How to run Bison (to produce the parser source file).
+* Table of Symbols:: All the keywords of the Bison language are explained.
+* Glossary:: Basic concepts are explained.
+* Index:: Cross-references to the text.
+
+ --- The Detailed Node Listing ---
+
+The Concepts of Bison
+
+* Language and Grammar:: Languages and context-free grammars,
+ as mathematical ideas.
+* Grammar in Bison:: How we represent grammars for Bison's sake.
+* Semantic Values:: Each token or syntactic grouping can have
+ a semantic value (the value of an integer,
+ the name of an identifier, etc.).
+* Semantic Actions:: Each rule can have an action containing C code.
+* Bison Parser:: What are Bison's input and output,
+ how is the output used?
+* Stages:: Stages in writing and running Bison grammars.
+* Grammar Layout:: Overall structure of a Bison grammar file.
+
+Examples
+
+* RPN Calc:: Reverse polish notation calculator;
+ a first example with no operator precedence.
+* Infix Calc:: Infix (algebraic) notation calculator.
+ Operator precedence is introduced.
+* Simple Error Recovery:: Continuing after syntax errors.
+* Multi-function Calc:: Calculator with memory and trig functions.
+ It uses multiple data-types for semantic values.
+* Exercises:: Ideas for improving the multi-function calculator.
+
+Reverse Polish Notation Calculator
+
+* Decls: Rpcalc Decls. Bison and C declarations for rpcalc.
+* Rules: Rpcalc Rules. Grammar Rules for rpcalc, with explanation.
+* Lexer: Rpcalc Lexer. The lexical analyzer.
+* Main: Rpcalc Main. The controlling function.
+* Error: Rpcalc Error. The error reporting function.
+* Gen: Rpcalc Gen. Running Bison on the grammar file.
+* Comp: Rpcalc Compile. Run the C compiler on the output code.
+
+Grammar Rules for `rpcalc'
+
+* Rpcalc Input::
+* Rpcalc Line::
+* Rpcalc Expr::
+
+Multi-Function Calculator: `mfcalc'
+
+* Decl: Mfcalc Decl. Bison declarations for multi-function calculator.
+* Rules: Mfcalc Rules. Grammar rules for the calculator.
+* Symtab: Mfcalc Symtab. Symbol table management subroutines.
+
+Bison Grammar Files
+
+* Grammar Outline:: Overall layout of the grammar file.
+* Symbols:: Terminal and nonterminal symbols.
+* Rules:: How to write grammar rules.
+* Recursion:: Writing recursive rules.
+* Semantics:: Semantic values and actions.
+* Declarations:: All kinds of Bison declarations are described here.
+* Multiple Parsers:: Putting more than one Bison parser in one program.
+
+Outline of a Bison Grammar
+
+* C Declarations:: Syntax and usage of the C declarations section.
+* Bison Declarations:: Syntax and usage of the Bison declarations section.
+* Grammar Rules:: Syntax and usage of the grammar rules section.
+* C Code:: Syntax and usage of the additional C code section.
+
+Defining Language Semantics
+
+* Value Type:: Specifying one data type for all semantic values.
+* Multiple Types:: Specifying several alternative data types.
+* Actions:: An action is the semantic definition of a grammar rule.
+* Action Types:: Specifying data types for actions to operate on.
+* Mid-Rule Actions:: Most actions go at the end of a rule.
+ This says when, why and how to use the exceptional
+ action in the middle of a rule.
+
+Bison Declarations
+
+* Token Decl:: Declaring terminal symbols.
+* Precedence Decl:: Declaring terminals with precedence and associativity.
+* Union Decl:: Declaring the set of all semantic value types.
+* Type Decl:: Declaring the choice of type for a nonterminal symbol.
+* Expect Decl:: Suppressing warnings about shift/reduce conflicts.
+* Start Decl:: Specifying the start symbol.
+* Pure Decl:: Requesting a reentrant parser.
+* Decl Summary:: Table of all Bison declarations.
+
+Parser C-Language Interface
+
+* Parser Function:: How to call `yyparse' and what it returns.
+* Lexical:: You must supply a function `yylex'
+ which reads tokens.
+* Error Reporting:: You must supply a function `yyerror'.
+* Action Features:: Special features for use in actions.
+
+The Lexical Analyzer Function `yylex'
+
+* Calling Convention:: How `yyparse' calls `yylex'.
+* Token Values:: How `yylex' must return the semantic value
+ of the token it has read.
+* Token Positions:: How `yylex' must return the text position
+ (line number, etc.) of the token, if the
+ actions want that.
+* Pure Calling:: How the calling convention differs
+ in a pure parser (*note A Pure (Reentrant) Parser: Pure Decl.).
+
+The Bison Parser Algorithm
+
+* Look-Ahead:: Parser looks one token ahead when deciding what to do.
+* Shift/Reduce:: Conflicts: when either shifting or reduction is valid.
+* Precedence:: Operator precedence works by resolving conflicts.
+* Contextual Precedence:: When an operator's precedence depends on context.
+* Parser States:: The parser is a finite-state-machine with stack.
+* Reduce/Reduce:: When two rules are applicable in the same situation.
+* Mystery Conflicts:: Reduce/reduce conflicts that look unjustified.
+* Stack Overflow:: What happens when stack gets full. How to avoid it.
+
+Operator Precedence
+
+* Why Precedence:: An example showing why precedence is needed.
+* Using Precedence:: How to specify precedence in Bison grammars.
+* Precedence Examples:: How these features are used in the previous example.
+* How Precedence:: How they work.
+
+Handling Context Dependencies
+
+* Semantic Tokens:: Token parsing can depend on the semantic context.
+* Lexical Tie-ins:: Token parsing can depend on the syntactic context.
+* Tie-in Recovery:: Lexical tie-ins have implications for how
+ error recovery rules must be written.
+
+Invoking Bison
+
+* Bison Options:: All the options described in detail,
+ in alphabetical order by short options.
+* Option Cross Key:: Alphabetical list of long options.
+* VMS Invocation:: Bison command syntax on VMS.
+
+
+File: bison.info, Node: Introduction, Next: Conditions, Prev: Top, Up: Top
+
+Introduction
+************
+
+ "Bison" is a general-purpose parser generator that converts a
+grammar description for an LALR(1) context-free grammar into a C
+program to parse that grammar. Once you are proficient with Bison, you
+may use it to develop a wide range of language parsers, from those used
+in simple desk calculators to complex programming languages.
+
+ Bison is upward compatible with Yacc: all properly-written Yacc
+grammars ought to work with Bison with no change. Anyone familiar with
+Yacc should be able to use Bison with little trouble. You need to be
+fluent in C programming in order to use Bison or to understand this
+manual.
+
+ We begin with tutorial chapters that explain the basic concepts of
+using Bison and show three explained examples, each building on the
+last. If you don't know Bison or Yacc, start by reading these
+chapters. Reference chapters follow which describe specific aspects of
+Bison in detail.
+
+ Bison was written primarily by Robert Corbett; Richard Stallman made
+it Yacc-compatible. Wilfred Hansen of Carnegie Mellon University added
+multicharacter string literals and other features.
+
+ This edition corresponds to version 2.21.5 of Bison.
+
+
+File: bison.info, Node: Conditions, Next: Copying, Prev: Introduction, Up: Top
+
+Conditions for Using Bison
+**************************
+
+ As of Bison version 1.24, we have changed the distribution terms for
+`yyparse' to permit using Bison's output in non-free programs.
+Formerly, Bison parsers could be used only in programs that were free
+software.
+
+ The other GNU programming tools, such as the GNU C compiler, have
+never had such a requirement. They could always be used for non-free
+software. The reason Bison was different was not due to a special
+policy decision; it resulted from applying the usual General Public
+License to all of the Bison source code.
+
+ The output of the Bison utility--the Bison parser file--contains a
+verbatim copy of a sizable piece of Bison, which is the code for the
+`yyparse' function. (The actions from your grammar are inserted into
+this function at one point, but the rest of the function is not
+changed.) When we applied the GPL terms to the code for `yyparse', the
+effect was to restrict the use of Bison output to free software.
+
+ We didn't change the terms because of sympathy for people who want to
+make software proprietary. *Software should be free.* But we
+concluded that limiting Bison's use to free software was doing little to
+encourage people to make other software free. So we decided to make the
+practical conditions for using Bison match the practical conditions for
+using the other GNU tools.
+
+
+File: bison.info, Node: Copying, Next: Concepts, Prev: Conditions, Up: Top
+
+GNU GENERAL PUBLIC LICENSE
+**************************
+
+ Version 2, June 1991
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+Preamble
+========
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it in
+new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software,
+and (2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+ 0. This License applies to any program or other work which contains a
+ notice placed by the copyright holder saying it may be distributed
+ under the terms of this General Public License. The "Program",
+ below, refers to any such program or work, and a "work based on
+ the Program" means either the Program or any derivative work under
+ copyright law: that is to say, a work containing the Program or a
+ portion of it, either verbatim or with modifications and/or
+ translated into another language. (Hereinafter, translation is
+ included without limitation in the term "modification".) Each
+ licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are
+ not covered by this License; they are outside its scope. The act
+ of running the Program is not restricted, and the output from the
+ Program is covered only if its contents constitute a work based on
+ the Program (independent of having been made by running the
+ Program). Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+ source code as you receive it, in any medium, provided that you
+ conspicuously and appropriately publish on each copy an appropriate
+ copyright notice and disclaimer of warranty; keep intact all the
+ notices that refer to this License and to the absence of any
+ warranty; and give any other recipients of the Program a copy of
+ this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy,
+ and you may at your option offer warranty protection in exchange
+ for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+ of it, thus forming a work based on the Program, and copy and
+ distribute such modifications or work under the terms of Section 1
+ above, provided that you also meet all of these conditions:
+
+ a. You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b. You must cause any work that you distribute or publish, that
+ in whole or in part contains or is derived from the Program
+ or any part thereof, to be licensed as a whole at no charge
+ to all third parties under the terms of this License.
+
+ c. If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display
+ an announcement including an appropriate copyright notice and
+ a notice that there is no warranty (or else, saying that you
+ provide a warranty) and that users may redistribute the
+ program under these conditions, and telling the user how to
+ view a copy of this License. (Exception: if the Program
+ itself is interactive but does not normally print such an
+ announcement, your work based on the Program is not required
+ to print an announcement.)
+
+ These requirements apply to the modified work as a whole. If
+ identifiable sections of that work are not derived from the
+ Program, and can be reasonably considered independent and separate
+ works in themselves, then this License, and its terms, do not
+ apply to those sections when you distribute them as separate
+ works. But when you distribute the same sections as part of a
+ whole which is a work based on the Program, the distribution of
+ the whole must be on the terms of this License, whose permissions
+ for other licensees extend to the entire whole, and thus to each
+ and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or
+ contest your rights to work written entirely by you; rather, the
+ intent is to exercise the right to control the distribution of
+ derivative or collective works based on the Program.
+
+ In addition, mere aggregation of another work not based on the
+ Program with the Program (or with a work based on the Program) on
+ a volume of a storage or distribution medium does not bring the
+ other work under the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+ under Section 2) in object code or executable form under the terms
+ of Sections 1 and 2 above provided that you also do one of the
+ following:
+
+ a. Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of
+ Sections 1 and 2 above on a medium customarily used for
+ software interchange; or,
+
+ b. Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a
+ medium customarily used for software interchange; or,
+
+ c. Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with
+ such an offer, in accord with Subsection b above.)
+
+ The source code for a work means the preferred form of the work for
+ making modifications to it. For an executable work, complete
+ source code means all the source code for all modules it contains,
+ plus any associated interface definition files, plus the scripts
+ used to control compilation and installation of the executable.
+ However, as a special exception, the source code distributed need
+ not include anything that is normally distributed (in either
+ source or binary form) with the major components (compiler,
+ kernel, and so on) of the operating system on which the executable
+ runs, unless that component itself accompanies the executable.
+
+ If distribution of executable or object code is made by offering
+ access to copy from a designated place, then offering equivalent
+ access to copy the source code from the same place counts as
+ distribution of the source code, even though third parties are not
+ compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+ except as expressly provided under this License. Any attempt
+ otherwise to copy, modify, sublicense or distribute the Program is
+ void, and will automatically terminate your rights under this
+ License. However, parties who have received copies, or rights,
+ from you under this License will not have their licenses
+ terminated so long as such parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+ signed it. However, nothing else grants you permission to modify
+ or distribute the Program or its derivative works. These actions
+ are prohibited by law if you do not accept this License.
+ Therefore, by modifying or distributing the Program (or any work
+ based on the Program), you indicate your acceptance of this
+ License to do so, and all its terms and conditions for copying,
+ distributing or modifying the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+ Program), the recipient automatically receives a license from the
+ original licensor to copy, distribute or modify the Program
+ subject to these terms and conditions. You may not impose any
+ further restrictions on the recipients' exercise of the rights
+ granted herein. You are not responsible for enforcing compliance
+ by third parties to this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent
+ issues), conditions are imposed on you (whether by court order,
+ agreement or otherwise) that contradict the conditions of this
+ License, they do not excuse you from the conditions of this
+ License. If you cannot distribute so as to satisfy simultaneously
+ your obligations under this License and any other pertinent
+ obligations, then as a consequence you may not distribute the
+ Program at all. For example, if a patent license would not permit
+ royalty-free redistribution of the Program by all those who
+ receive copies directly or indirectly through you, then the only
+ way you could satisfy both it and this License would be to refrain
+ entirely from distribution of the Program.
+
+ If any portion of this section is held invalid or unenforceable
+ under any particular circumstance, the balance of the section is
+ intended to apply and the section as a whole is intended to apply
+ in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of
+ any such claims; this section has the sole purpose of protecting
+ the integrity of the free software distribution system, which is
+ implemented by public license practices. Many people have made
+ generous contributions to the wide range of software distributed
+ through that system in reliance on consistent application of that
+ system; it is up to the author/donor to decide if he or she is
+ willing to distribute software through any other system and a
+ licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed
+ to be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+ certain countries either by patents or by copyrighted interfaces,
+ the original copyright holder who places the Program under this
+ License may add an explicit geographical distribution limitation
+ excluding those countries, so that distribution is permitted only
+ in or among countries not thus excluded. In such case, this
+ License incorporates the limitation as if written in the body of
+ this License.
+
+ 9. The Free Software Foundation may publish revised and/or new
+ versions of the General Public License from time to time. Such
+ new versions will be similar in spirit to the present version, but
+ may differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+ Program specifies a version number of this License which applies
+ to it and "any later version", you have the option of following
+ the terms and conditions either of that version or of any later
+ version published by the Free Software Foundation. If the Program
+ does not specify a version number of this License, you may choose
+ any version ever published by the Free Software Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+ programs whose distribution conditions are different, write to the
+ author to ask for permission. For software which is copyrighted
+ by the Free Software Foundation, write to the Free Software
+ Foundation; we sometimes make exceptions for this. Our decision
+ will be guided by the two goals of preserving the free status of
+ all derivatives of our free software and of promoting the sharing
+ and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
+ WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE
+ LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT
+ WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT
+ NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE
+ QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+ PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY
+ SERVICING, REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+ WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY
+ MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE
+ LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
+ INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR
+ INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU
+ OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY
+ OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+=============================================
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these
+terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ ONE LINE TO GIVE THE PROGRAM'S NAME AND A BRIEF IDEA OF WHAT IT DOES.
+ Copyright (C) 19YY NAME OF AUTHOR
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.
+
+ Also add information on how to contact you by electronic and paper
+mail.
+
+ If the program is interactive, make it output a short notice like
+this when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19YY NAME OF AUTHOR
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details
+ type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+ The hypothetical commands `show w' and `show c' should show the
+appropriate parts of the General Public License. Of course, the
+commands you use may be called something other than `show w' and `show
+c'; they could even be mouse-clicks or menu items--whatever suits your
+program.
+
+ You should also get your employer (if you work as a programmer) or
+your school, if any, to sign a "copyright disclaimer" for the program,
+if necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ SIGNATURE OF TY COON, 1 April 1989
+ Ty Coon, President of Vice
+
+ This General Public License does not permit incorporating your
+program into proprietary programs. If your program is a subroutine
+library, you may consider it more useful to permit linking proprietary
+applications with the library. If this is what you want to do, use the
+GNU Library General Public License instead of this License.
+
+
+File: bison.info, Node: Concepts, Next: Examples, Prev: Copying, Up: Top
+
+The Concepts of Bison
+*********************
+
+ This chapter introduces many of the basic concepts without which the
+details of Bison will not make sense. If you do not already know how to
+use Bison or Yacc, we suggest you start by reading this chapter
+carefully.
+
+* Menu:
+
+* Language and Grammar:: Languages and context-free grammars,
+ as mathematical ideas.
+* Grammar in Bison:: How we represent grammars for Bison's sake.
+* Semantic Values:: Each token or syntactic grouping can have
+ a semantic value (the value of an integer,
+ the name of an identifier, etc.).
+* Semantic Actions:: Each rule can have an action containing C code.
+* Bison Parser:: What are Bison's input and output,
+ how is the output used?
+* Stages:: Stages in writing and running Bison grammars.
+* Grammar Layout:: Overall structure of a Bison grammar file.
+
+
+File: bison.info, Node: Language and Grammar, Next: Grammar in Bison, Up: Concepts
+
+Languages and Context-Free Grammars
+===================================
+
+ In order for Bison to parse a language, it must be described by a
+"context-free grammar". This means that you specify one or more
+"syntactic groupings" and give rules for constructing them from their
+parts. For example, in the C language, one kind of grouping is called
+an `expression'. One rule for making an expression might be, "An
+expression can be made of a minus sign and another expression".
+Another would be, "An expression can be an integer". As you can see,
+rules are often recursive, but there must be at least one rule which
+leads out of the recursion.
+
+ The most common formal system for presenting such rules for humans
+to read is "Backus-Naur Form" or "BNF", which was developed in order to
+specify the language Algol 60. Any grammar expressed in BNF is a
+context-free grammar. The input to Bison is essentially
+machine-readable BNF.
+
+ Not all context-free languages can be handled by Bison, only those
+that are LALR(1). In brief, this means that it must be possible to
+tell how to parse any portion of an input string with just a single
+token of look-ahead. Strictly speaking, that is a description of an
+LR(1) grammar, and LALR(1) involves additional restrictions that are
+hard to explain simply; but it is rare in actual practice to find an
+LR(1) grammar that fails to be LALR(1). *Note Mysterious Reduce/Reduce
+Conflicts: Mystery Conflicts, for more information on this.
+
+ In the formal grammatical rules for a language, each kind of
+syntactic unit or grouping is named by a "symbol". Those which are
+built by grouping smaller constructs according to grammatical rules are
+called "nonterminal symbols"; those which can't be subdivided are called
+"terminal symbols" or "token types". We call a piece of input
+corresponding to a single terminal symbol a "token", and a piece
+corresponding to a single nonterminal symbol a "grouping".
+
+ We can use the C language as an example of what symbols, terminal and
+nonterminal, mean. The tokens of C are identifiers, constants (numeric
+and string), and the various keywords, arithmetic operators and
+punctuation marks. So the terminal symbols of a grammar for C include
+`identifier', `number', `string', plus one symbol for each keyword,
+operator or punctuation mark: `if', `return', `const', `static', `int',
+`char', `plus-sign', `open-brace', `close-brace', `comma' and many
+more. (These tokens can be subdivided into characters, but that is a
+matter of lexicography, not grammar.)
+
+ Here is a simple C function subdivided into tokens:
+
+ int /* keyword `int' */
+ square (x) /* identifier, open-paren, */
+ /* identifier, close-paren */
+ int x; /* keyword `int', identifier, semicolon */
+ { /* open-brace */
+ return x * x; /* keyword `return', identifier, */
+ /* asterisk, identifier, semicolon */
+ } /* close-brace */
+
+ The syntactic groupings of C include the expression, the statement,
+the declaration, and the function definition. These are represented in
+the grammar of C by nonterminal symbols `expression', `statement',
+`declaration' and `function definition'. The full grammar uses dozens
+of additional language constructs, each with its own nonterminal
+symbol, in order to express the meanings of these four. The example
+above is a function definition; it contains one declaration, and one
+statement. In the statement, each `x' is an expression and so is `x *
+x'.
+
+ Each nonterminal symbol must have grammatical rules showing how it
+is made out of simpler constructs. For example, one kind of C
+statement is the `return' statement; this would be described with a
+grammar rule which reads informally as follows:
+
+ A `statement' can be made of a `return' keyword, an `expression'
+ and a `semicolon'.
+
+There would be many other rules for `statement', one for each kind of
+statement in C.
+
+ One nonterminal symbol must be distinguished as the special one which
+defines a complete utterance in the language. It is called the "start
+symbol". In a compiler, this means a complete input program. In the C
+language, the nonterminal symbol `sequence of definitions and
+declarations' plays this role.
+
+ For example, `1 + 2' is a valid C expression--a valid part of a C
+program--but it is not valid as an _entire_ C program. In the
+context-free grammar of C, this follows from the fact that `expression'
+is not the start symbol.
+
+ The Bison parser reads a sequence of tokens as its input, and groups
+the tokens using the grammar rules. If the input is valid, the end
+result is that the entire token sequence reduces to a single grouping
+whose symbol is the grammar's start symbol. If we use a grammar for C,
+the entire input must be a `sequence of definitions and declarations'.
+If not, the parser reports a syntax error.
+
+
+File: bison.info, Node: Grammar in Bison, Next: Semantic Values, Prev: Language and Grammar, Up: Concepts
+
+From Formal Rules to Bison Input
+================================
+
+ A formal grammar is a mathematical construct. To define the language
+for Bison, you must write a file expressing the grammar in Bison syntax:
+a "Bison grammar" file. *Note Bison Grammar Files: Grammar File.
+
+ A nonterminal symbol in the formal grammar is represented in Bison
+input as an identifier, like an identifier in C. By convention, it
+should be in lower case, such as `expr', `stmt' or `declaration'.
+
+ The Bison representation for a terminal symbol is also called a
+"token type". Token types as well can be represented as C-like
+identifiers. By convention, these identifiers should be upper case to
+distinguish them from nonterminals: for example, `INTEGER',
+`IDENTIFIER', `IF' or `RETURN'. A terminal symbol that stands for a
+particular keyword in the language should be named after that keyword
+converted to upper case. The terminal symbol `error' is reserved for
+error recovery. *Note Symbols::.
+
+ A terminal symbol can also be represented as a character literal,
+just like a C character constant. You should do this whenever a token
+is just a single character (parenthesis, plus-sign, etc.): use that
+same character in a literal as the terminal symbol for that token.
+
+ A third way to represent a terminal symbol is with a C string
+constant containing several characters. *Note Symbols::, for more
+information.
+
+ The grammar rules also have an expression in Bison syntax. For
+example, here is the Bison rule for a C `return' statement. The
+semicolon in quotes is a literal character token, representing part of
+the C syntax for the statement; the naked semicolon, and the colon, are
+Bison punctuation used in every rule.
+
+ stmt: RETURN expr ';'
+ ;
+
+*Note Syntax of Grammar Rules: Rules.
+
+
+File: bison.info, Node: Semantic Values, Next: Semantic Actions, Prev: Grammar in Bison, Up: Concepts
+
+Semantic Values
+===============
+
+ A formal grammar selects tokens only by their classifications: for
+example, if a rule mentions the terminal symbol `integer constant', it
+means that _any_ integer constant is grammatically valid in that
+position. The precise value of the constant is irrelevant to how to
+parse the input: if `x+4' is grammatical then `x+1' or `x+3989' is
+equally grammatical.
+
+ But the precise value is very important for what the input means
+once it is parsed. A compiler is useless if it fails to distinguish
+between 4, 1 and 3989 as constants in the program! Therefore, each
+token in a Bison grammar has both a token type and a "semantic value".
+*Note Defining Language Semantics: Semantics, for details.
+
+ The token type is a terminal symbol defined in the grammar, such as
+`INTEGER', `IDENTIFIER' or `',''. It tells everything you need to know
+to decide where the token may validly appear and how to group it with
+other tokens. The grammar rules know nothing about tokens except their
+types.
+
+ The semantic value has all the rest of the information about the
+meaning of the token, such as the value of an integer, or the name of an
+identifier. (A token such as `','' which is just punctuation doesn't
+need to have any semantic value.)
+
+ For example, an input token might be classified as token type
+`INTEGER' and have the semantic value 4. Another input token might
+have the same token type `INTEGER' but value 3989. When a grammar rule
+says that `INTEGER' is allowed, either of these tokens is acceptable
+because each is an `INTEGER'. When the parser accepts the token, it
+keeps track of the token's semantic value.
+
+ Each grouping can also have a semantic value as well as its
+nonterminal symbol. For example, in a calculator, an expression
+typically has a semantic value that is a number. In a compiler for a
+programming language, an expression typically has a semantic value that
+is a tree structure describing the meaning of the expression.
+
+
+File: bison.info, Node: Semantic Actions, Next: Bison Parser, Prev: Semantic Values, Up: Concepts
+
+Semantic Actions
+================
+
+ In order to be useful, a program must do more than parse input; it
+must also produce some output based on the input. In a Bison grammar,
+a grammar rule can have an "action" made up of C statements. Each time
+the parser recognizes a match for that rule, the action is executed.
+*Note Actions::.
+
+ Most of the time, the purpose of an action is to compute the
+semantic value of the whole construct from the semantic values of its
+parts. For example, suppose we have a rule which says an expression
+can be the sum of two expressions. When the parser recognizes such a
+sum, each of the subexpressions has a semantic value which describes
+how it was built up. The action for this rule should create a similar
+sort of value for the newly recognized larger expression.
+
+ For example, here is a rule that says an expression can be the sum of
+two subexpressions:
+
+ expr: expr '+' expr { $$ = $1 + $3; }
+ ;
+
+The action says how to produce the semantic value of the sum expression
+from the values of the two subexpressions.
+
+
+File: bison.info, Node: Bison Parser, Next: Stages, Prev: Semantic Actions, Up: Concepts
+
+Bison Output: the Parser File
+=============================
+
+ When you run Bison, you give it a Bison grammar file as input. The
+output is a C source file that parses the language described by the
+grammar. This file is called a "Bison parser". Keep in mind that the
+Bison utility and the Bison parser are two distinct programs: the Bison
+utility is a program whose output is the Bison parser that becomes part
+of your program.
+
+ The job of the Bison parser is to group tokens into groupings
+according to the grammar rules--for example, to build identifiers and
+operators into expressions. As it does this, it runs the actions for
+the grammar rules it uses.
+
+ The tokens come from a function called the "lexical analyzer" that
+you must supply in some fashion (such as by writing it in C). The
+Bison parser calls the lexical analyzer each time it wants a new token.
+It doesn't know what is "inside" the tokens (though their semantic
+values may reflect this). Typically the lexical analyzer makes the
+tokens by parsing characters of text, but Bison does not depend on
+this. *Note The Lexical Analyzer Function `yylex': Lexical.
+
+ The Bison parser file is C code which defines a function named
+`yyparse' which implements that grammar. This function does not make a
+complete C program: you must supply some additional functions. One is
+the lexical analyzer. Another is an error-reporting function which the
+parser calls to report an error. In addition, a complete C program must
+start with a function called `main'; you have to provide this, and
+arrange for it to call `yyparse' or the parser will never run. *Note
+Parser C-Language Interface: Interface.
+
+ Aside from the token type names and the symbols in the actions you
+write, all variable and function names used in the Bison parser file
+begin with `yy' or `YY'. This includes interface functions such as the
+lexical analyzer function `yylex', the error reporting function
+`yyerror' and the parser function `yyparse' itself. This also includes
+numerous identifiers used for internal purposes. Therefore, you should
+avoid using C identifiers starting with `yy' or `YY' in the Bison
+grammar file except for the ones defined in this manual.
+
+
+File: bison.info, Node: Stages, Next: Grammar Layout, Prev: Bison Parser, Up: Concepts
+
+Stages in Using Bison
+=====================
+
+ The actual language-design process using Bison, from grammar
+specification to a working compiler or interpreter, has these parts:
+
+ 1. Formally specify the grammar in a form recognized by Bison (*note
+ Bison Grammar Files: Grammar File.). For each grammatical rule in
+ the language, describe the action that is to be taken when an
+ instance of that rule is recognized. The action is described by a
+ sequence of C statements.
+
+ 2. Write a lexical analyzer to process input and pass tokens to the
+ parser. The lexical analyzer may be written by hand in C (*note
+ The Lexical Analyzer Function `yylex': Lexical.). It could also
+ be produced using Lex, but the use of Lex is not discussed in this
+ manual.
+
+ 3. Write a controlling function that calls the Bison-produced parser.
+
+ 4. Write error-reporting routines.
+
+ To turn this source code as written into a runnable program, you
+must follow these steps:
+
+ 1. Run Bison on the grammar to produce the parser.
+
+ 2. Compile the code output by Bison, as well as any other source
+ files.
+
+ 3. Link the object files to produce the finished product.
+
+
+File: bison.info, Node: Grammar Layout, Prev: Stages, Up: Concepts
+
+The Overall Layout of a Bison Grammar
+=====================================
+
+ The input file for the Bison utility is a "Bison grammar file". The
+general form of a Bison grammar file is as follows:
+
+ %{
+ C DECLARATIONS
+ %}
+
+ BISON DECLARATIONS
+
+ %%
+ GRAMMAR RULES
+ %%
+ ADDITIONAL C CODE
+
+The `%%', `%{' and `%}' are punctuation that appears in every Bison
+grammar file to separate the sections.
+
+ The C declarations may define types and variables used in the
+actions. You can also use preprocessor commands to define macros used
+there, and use `#include' to include header files that do any of these
+things.
+
+ The Bison declarations declare the names of the terminal and
+nonterminal symbols, and may also describe operator precedence and the
+data types of semantic values of various symbols.
+
+ The grammar rules define how to construct each nonterminal symbol
+from its parts.
+
+ The additional C code can contain any C code you want to use. Often
+the definition of the lexical analyzer `yylex' goes here, plus
+subroutines called by the actions in the grammar rules. In a simple
+program, all the rest of the program can go here.
+
+
+File: bison.info, Node: Examples, Next: Grammar File, Prev: Concepts, Up: Top
+
+Examples
+********
+
+ Now we show and explain three sample programs written using Bison: a
+reverse polish notation calculator, an algebraic (infix) notation
+calculator, and a multi-function calculator. All three have been tested
+under BSD Unix 4.3; each produces a usable, though limited, interactive
+desk-top calculator.
+
+ These examples are simple, but Bison grammars for real programming
+languages are written the same way. You can copy these examples out of
+the Info file and into a source file to try them.
+
+* Menu:
+
+* RPN Calc:: Reverse polish notation calculator;
+ a first example with no operator precedence.
+* Infix Calc:: Infix (algebraic) notation calculator.
+ Operator precedence is introduced.
+* Simple Error Recovery:: Continuing after syntax errors.
+* Multi-function Calc:: Calculator with memory and trig functions.
+ It uses multiple data-types for semantic values.
+* Exercises:: Ideas for improving the multi-function calculator.
+
+
+File: bison.info, Node: RPN Calc, Next: Infix Calc, Up: Examples
+
+Reverse Polish Notation Calculator
+==================================
+
+ The first example is that of a simple double-precision "reverse
+polish notation" calculator (a calculator using postfix operators).
+This example provides a good starting point, since operator precedence
+is not an issue. The second example will illustrate how operator
+precedence is handled.
+
+ The source code for this calculator is named `rpcalc.y'. The `.y'
+extension is a convention used for Bison input files.
+
+* Menu:
+
+* Decls: Rpcalc Decls. Bison and C declarations for rpcalc.
+* Rules: Rpcalc Rules. Grammar Rules for rpcalc, with explanation.
+* Lexer: Rpcalc Lexer. The lexical analyzer.
+* Main: Rpcalc Main. The controlling function.
+* Error: Rpcalc Error. The error reporting function.
+* Gen: Rpcalc Gen. Running Bison on the grammar file.
+* Comp: Rpcalc Compile. Run the C compiler on the output code.
+
+
+File: bison.info, Node: Rpcalc Decls, Next: Rpcalc Rules, Up: RPN Calc
+
+Declarations for `rpcalc'
+-------------------------
+
+ Here are the C and Bison declarations for the reverse polish notation
+calculator. As in C, comments are placed between `/*...*/'.
+
+ /* Reverse polish notation calculator. */
+
+ %{
+ #define YYSTYPE double
+ #include <math.h>
+ %}
+
+ %token NUM
+
+ %% /* Grammar rules and actions follow */
+
+ The C declarations section (*note The C Declarations Section: C
+Declarations.) contains two preprocessor directives.
+
+ The `#define' directive defines the macro `YYSTYPE', thus specifying
+the C data type for semantic values of both tokens and groupings (*note
+Data Types of Semantic Values: Value Type.). The Bison parser will use
+whatever type `YYSTYPE' is defined as; if you don't define it, `int' is
+the default. Because we specify `double', each token and each
+expression has an associated value, which is a floating point number.
+
+ The `#include' directive is used to declare the exponentiation
+function `pow'.
+
+ The second section, Bison declarations, provides information to
+Bison about the token types (*note The Bison Declarations Section:
+Bison Declarations.). Each terminal symbol that is not a
+single-character literal must be declared here. (Single-character
+literals normally don't need to be declared.) In this example, all the
+arithmetic operators are designated by single-character literals, so the
+only terminal symbol that needs to be declared is `NUM', the token type
+for numeric constants.
+
diff --git a/tools/bison++/bison.info-2 b/tools/bison++/bison.info-2
new file mode 100644
index 000000000..2e392509a
--- /dev/null
+++ b/tools/bison++/bison.info-2
@@ -0,0 +1,1334 @@
+This is bison.info, produced by makeinfo version 4.1 from bison.texinfo.
+
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+
+ This file documents the Bison parser generator.
+
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation, Inc.
+
+ Permission is granted to make and distribute verbatim copies of this
+manual provided the copyright notice and this permission notice are
+preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of
+this manual under the conditions for verbatim copying, provided also
+that the sections entitled "GNU General Public License" and "Conditions
+for Using Bison" are included exactly as in the original, and provided
+that the entire resulting derived work is distributed under the terms
+of a permission notice identical to this one.
+
+ Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that the sections entitled "GNU General Public
+License", "Conditions for Using Bison" and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+
+
+File: bison.info, Node: Rpcalc Rules, Next: Rpcalc Lexer, Prev: Rpcalc Decls, Up: RPN Calc
+
+Grammar Rules for `rpcalc'
+--------------------------
+
+ Here are the grammar rules for the reverse polish notation
+calculator.
+
+ input: /* empty */
+ | input line
+ ;
+
+ line: '\n'
+ | exp '\n' { printf ("\t%.10g\n", $1); }
+ ;
+
+ exp: NUM { $$ = $1; }
+ | exp exp '+' { $$ = $1 + $2; }
+ | exp exp '-' { $$ = $1 - $2; }
+ | exp exp '*' { $$ = $1 * $2; }
+ | exp exp '/' { $$ = $1 / $2; }
+ /* Exponentiation */
+ | exp exp '^' { $$ = pow ($1, $2); }
+ /* Unary minus */
+ | exp 'n' { $$ = -$1; }
+ ;
+ %%
+
+ The groupings of the rpcalc "language" defined here are the
+expression (given the name `exp'), the line of input (`line'), and the
+complete input transcript (`input'). Each of these nonterminal symbols
+has several alternate rules, joined by the `|' punctuator which is read
+as "or". The following sections explain what these rules mean.
+
+ The semantics of the language is determined by the actions taken
+when a grouping is recognized. The actions are the C code that appears
+inside braces. *Note Actions::.
+
+ You must specify these actions in C, but Bison provides the means for
+passing semantic values between the rules. In each action, the
+pseudo-variable `$$' stands for the semantic value for the grouping
+that the rule is going to construct. Assigning a value to `$$' is the
+main job of most actions. The semantic values of the components of the
+rule are referred to as `$1', `$2', and so on.
+
+* Menu:
+
+* Rpcalc Input::
+* Rpcalc Line::
+* Rpcalc Expr::
+
+
+File: bison.info, Node: Rpcalc Input, Next: Rpcalc Line, Up: Rpcalc Rules
+
+Explanation of `input'
+......................
+
+ Consider the definition of `input':
+
+ input: /* empty */
+ | input line
+ ;
+
+ This definition reads as follows: "A complete input is either an
+empty string, or a complete input followed by an input line". Notice
+that "complete input" is defined in terms of itself. This definition
+is said to be "left recursive" since `input' appears always as the
+leftmost symbol in the sequence. *Note Recursive Rules: Recursion.
+
+ The first alternative is empty because there are no symbols between
+the colon and the first `|'; this means that `input' can match an empty
+string of input (no tokens). We write the rules this way because it is
+legitimate to type `Ctrl-d' right after you start the calculator. It's
+conventional to put an empty alternative first and write the comment
+`/* empty */' in it.
+
+ The second alternate rule (`input line') handles all nontrivial
+input. It means, "After reading any number of lines, read one more
+line if possible." The left recursion makes this rule into a loop.
+Since the first alternative matches empty input, the loop can be
+executed zero or more times.
+
+ The parser function `yyparse' continues to process input until a
+grammatical error is seen or the lexical analyzer says there are no more
+input tokens; we will arrange for the latter to happen at end of file.
+
+
+File: bison.info, Node: Rpcalc Line, Next: Rpcalc Expr, Prev: Rpcalc Input, Up: Rpcalc Rules
+
+Explanation of `line'
+.....................
+
+ Now consider the definition of `line':
+
+ line: '\n'
+ | exp '\n' { printf ("\t%.10g\n", $1); }
+ ;
+
+ The first alternative is a token which is a newline character; this
+means that rpcalc accepts a blank line (and ignores it, since there is
+no action). The second alternative is an expression followed by a
+newline. This is the alternative that makes rpcalc useful. The
+semantic value of the `exp' grouping is the value of `$1' because the
+`exp' in question is the first symbol in the alternative. The action
+prints this value, which is the result of the computation the user
+asked for.
+
+ This action is unusual because it does not assign a value to `$$'.
+As a consequence, the semantic value associated with the `line' is
+uninitialized (its value will be unpredictable). This would be a bug if
+that value were ever used, but we don't use it: once rpcalc has printed
+the value of the user's input line, that value is no longer needed.
+
+
+File: bison.info, Node: Rpcalc Expr, Prev: Rpcalc Line, Up: Rpcalc Rules
+
+Explanation of `expr'
+.....................
+
+ The `exp' grouping has several rules, one for each kind of
+expression. The first rule handles the simplest expressions: those
+that are just numbers. The second handles an addition-expression,
+which looks like two expressions followed by a plus-sign. The third
+handles subtraction, and so on.
+
+ exp: NUM
+ | exp exp '+' { $$ = $1 + $2; }
+ | exp exp '-' { $$ = $1 - $2; }
+ ...
+ ;
+
+ We have used `|' to join all the rules for `exp', but we could
+equally well have written them separately:
+
+ exp: NUM ;
+ exp: exp exp '+' { $$ = $1 + $2; } ;
+ exp: exp exp '-' { $$ = $1 - $2; } ;
+ ...
+
+ Most of the rules have actions that compute the value of the
+expression in terms of the value of its parts. For example, in the
+rule for addition, `$1' refers to the first component `exp' and `$2'
+refers to the second one. The third component, `'+'', has no meaningful
+associated semantic value, but if it had one you could refer to it as
+`$3'. When `yyparse' recognizes a sum expression using this rule, the
+sum of the two subexpressions' values is produced as the value of the
+entire expression. *Note Actions::.
+
+ You don't have to give an action for every rule. When a rule has no
+action, Bison by default copies the value of `$1' into `$$'. This is
+what happens in the first rule (the one that uses `NUM').
+
+ The formatting shown here is the recommended convention, but Bison
+does not require it. You can add or change whitespace as much as you
+wish. For example, this:
+
+ exp : NUM | exp exp '+' {$$ = $1 + $2; } | ...
+
+means the same thing as this:
+
+ exp: NUM
+ | exp exp '+' { $$ = $1 + $2; }
+ | ...
+
+The latter, however, is much more readable.
+
+
+File: bison.info, Node: Rpcalc Lexer, Next: Rpcalc Main, Prev: Rpcalc Rules, Up: RPN Calc
+
+The `rpcalc' Lexical Analyzer
+-----------------------------
+
+ The lexical analyzer's job is low-level parsing: converting
+characters or sequences of characters into tokens. The Bison parser
+gets its tokens by calling the lexical analyzer. *Note The Lexical
+Analyzer Function `yylex': Lexical.
+
+ Only a simple lexical analyzer is needed for the RPN calculator.
+This lexical analyzer skips blanks and tabs, then reads in numbers as
+`double' and returns them as `NUM' tokens. Any other character that
+isn't part of a number is a separate token. Note that the token-code
+for such a single-character token is the character itself.
+
+ The return value of the lexical analyzer function is a numeric code
+which represents a token type. The same text used in Bison rules to
+stand for this token type is also a C expression for the numeric code
+for the type. This works in two ways. If the token type is a
+character literal, then its numeric code is the ASCII code for that
+character; you can use the same character literal in the lexical
+analyzer to express the number. If the token type is an identifier,
+that identifier is defined by Bison as a C macro whose definition is
+the appropriate number. In this example, therefore, `NUM' becomes a
+macro for `yylex' to use.
+
+ The semantic value of the token (if it has one) is stored into the
+global variable `yylval', which is where the Bison parser will look for
+it. (The C data type of `yylval' is `YYSTYPE', which was defined at
+the beginning of the grammar; *note Declarations for `rpcalc': Rpcalc
+Decls..)
+
+ A token type code of zero is returned if the end-of-file is
+encountered. (Bison recognizes any nonpositive value as indicating the
+end of the input.)
+
+ Here is the code for the lexical analyzer:
+
+ /* Lexical analyzer returns a double floating point
+ number on the stack and the token NUM, or the ASCII
+ character read if not a number. Skips all blanks
+ and tabs, returns 0 for EOF. */
+
+ #include <ctype.h>
+
+ yylex ()
+ {
+ int c;
+
+ /* skip white space */
+ while ((c = getchar ()) == ' ' || c == '\t')
+ ;
+ /* process numbers */
+ if (c == '.' || isdigit (c))
+ {
+ ungetc (c, stdin);
+ scanf ("%lf", &yylval);
+ return NUM;
+ }
+ /* return end-of-file */
+ if (c == EOF)
+ return 0;
+ /* return single chars */
+ return c;
+ }
+
+
+File: bison.info, Node: Rpcalc Main, Next: Rpcalc Error, Prev: Rpcalc Lexer, Up: RPN Calc
+
+The Controlling Function
+------------------------
+
+ In keeping with the spirit of this example, the controlling function
+is kept to the bare minimum. The only requirement is that it call
+`yyparse' to start the process of parsing.
+
+ main ()
+ {
+ yyparse ();
+ }
+
+
+File: bison.info, Node: Rpcalc Error, Next: Rpcalc Gen, Prev: Rpcalc Main, Up: RPN Calc
+
+The Error Reporting Routine
+---------------------------
+
+ When `yyparse' detects a syntax error, it calls the error reporting
+function `yyerror' to print an error message (usually but not always
+`"parse error"'). It is up to the programmer to supply `yyerror'
+(*note Parser C-Language Interface: Interface.), so here is the
+definition we will use:
+
+ #include <stdio.h>
+
+ yyerror (s) /* Called by yyparse on error */
+ char *s;
+ {
+ printf ("%s\n", s);
+ }
+
+ After `yyerror' returns, the Bison parser may recover from the error
+and continue parsing if the grammar contains a suitable error rule
+(*note Error Recovery::). Otherwise, `yyparse' returns nonzero. We
+have not written any error rules in this example, so any invalid input
+will cause the calculator program to exit. This is not clean behavior
+for a real calculator, but it is adequate in the first example.
+
+
+File: bison.info, Node: Rpcalc Gen, Next: Rpcalc Compile, Prev: Rpcalc Error, Up: RPN Calc
+
+Running Bison to Make the Parser
+--------------------------------
+
+ Before running Bison to produce a parser, we need to decide how to
+arrange all the source code in one or more source files. For such a
+simple example, the easiest thing is to put everything in one file.
+The definitions of `yylex', `yyerror' and `main' go at the end, in the
+"additional C code" section of the file (*note The Overall Layout of a
+Bison Grammar: Grammar Layout.).
+
+ For a large project, you would probably have several source files,
+and use `make' to arrange to recompile them.
+
+ With all the source in a single file, you use the following command
+to convert it into a parser file:
+
+ bison FILE_NAME.y
+
+In this example the file was called `rpcalc.y' (for "Reverse Polish
+CALCulator"). Bison produces a file named `FILE_NAME.tab.c', removing
+the `.y' from the original file name. The file output by Bison contains
+the source code for `yyparse'. The additional functions in the input
+file (`yylex', `yyerror' and `main') are copied verbatim to the output.
+
+
+File: bison.info, Node: Rpcalc Compile, Prev: Rpcalc Gen, Up: RPN Calc
+
+Compiling the Parser File
+-------------------------
+
+ Here is how to compile and run the parser file:
+
+ # List files in current directory.
+ % ls
+ rpcalc.tab.c rpcalc.y
+
+ # Compile the Bison parser.
+ # `-lm' tells compiler to search math library for `pow'.
+ % cc rpcalc.tab.c -lm -o rpcalc
+
+ # List files again.
+ % ls
+ rpcalc rpcalc.tab.c rpcalc.y
+
+ The file `rpcalc' now contains the executable code. Here is an
+example session using `rpcalc'.
+
+ % rpcalc
+ 4 9 +
+ 13
+ 3 7 + 3 4 5 *+-
+ -13
+ 3 7 + 3 4 5 * + - n Note the unary minus, `n'
+ 13
+ 5 6 / 4 n +
+ -3.166666667
+ 3 4 ^ Exponentiation
+ 81
+ ^D End-of-file indicator
+ %
+
+
+File: bison.info, Node: Infix Calc, Next: Simple Error Recovery, Prev: RPN Calc, Up: Examples
+
+Infix Notation Calculator: `calc'
+=================================
+
+ We now modify rpcalc to handle infix operators instead of postfix.
+Infix notation involves the concept of operator precedence and the need
+for parentheses nested to arbitrary depth. Here is the Bison code for
+`calc.y', an infix desk-top calculator.
+
+ /* Infix notation calculator--calc */
+
+ %{
+ #define YYSTYPE double
+ #include <math.h>
+ %}
+
+ /* BISON Declarations */
+ %token NUM
+ %left '-' '+'
+ %left '*' '/'
+ %left NEG /* negation--unary minus */
+ %right '^' /* exponentiation */
+
+ /* Grammar follows */
+ %%
+ input: /* empty string */
+ | input line
+ ;
+
+ line: '\n'
+ | exp '\n' { printf ("\t%.10g\n", $1); }
+ ;
+
+ exp: NUM { $$ = $1; }
+ | exp '+' exp { $$ = $1 + $3; }
+ | exp '-' exp { $$ = $1 - $3; }
+ | exp '*' exp { $$ = $1 * $3; }
+ | exp '/' exp { $$ = $1 / $3; }
+ | '-' exp %prec NEG { $$ = -$2; }
+ | exp '^' exp { $$ = pow ($1, $3); }
+ | '(' exp ')' { $$ = $2; }
+ ;
+ %%
+
+The functions `yylex', `yyerror' and `main' can be the same as before.
+
+ There are two important new features shown in this code.
+
+ In the second section (Bison declarations), `%left' declares token
+types and says they are left-associative operators. The declarations
+`%left' and `%right' (right associativity) take the place of `%token'
+which is used to declare a token type name without associativity.
+(These tokens are single-character literals, which ordinarily don't
+need to be declared. We declare them here to specify the
+associativity.)
+
+ Operator precedence is determined by the line ordering of the
+declarations; the higher the line number of the declaration (lower on
+the page or screen), the higher the precedence. Hence, exponentiation
+has the highest precedence, unary minus (`NEG') is next, followed by
+`*' and `/', and so on. *Note Operator Precedence: Precedence.
+
+ The other important new feature is the `%prec' in the grammar section
+for the unary minus operator. The `%prec' simply instructs Bison that
+the rule `| '-' exp' has the same precedence as `NEG'--in this case the
+next-to-highest. *Note Context-Dependent Precedence: Contextual
+Precedence.
+
+ Here is a sample run of `calc.y':
+
+ % calc
+ 4 + 4.5 - (34/(8*3+-3))
+ 6.880952381
+ -56 + 2
+ -54
+ 3 ^ 2
+ 9
+
+
+File: bison.info, Node: Simple Error Recovery, Next: Multi-function Calc, Prev: Infix Calc, Up: Examples
+
+Simple Error Recovery
+=====================
+
+ Up to this point, this manual has not addressed the issue of "error
+recovery"--how to continue parsing after the parser detects a syntax
+error. All we have handled is error reporting with `yyerror'. Recall
+that by default `yyparse' returns after calling `yyerror'. This means
+that an erroneous input line causes the calculator program to exit.
+Now we show how to rectify this deficiency.
+
+ The Bison language itself includes the reserved word `error', which
+may be included in the grammar rules. In the example below it has been
+added to one of the alternatives for `line':
+
+ line: '\n'
+ | exp '\n' { printf ("\t%.10g\n", $1); }
+ | error '\n' { yyerrok; }
+ ;
+
+ This addition to the grammar allows for simple error recovery in the
+event of a parse error. If an expression that cannot be evaluated is
+read, the error will be recognized by the third rule for `line', and
+parsing will continue. (The `yyerror' function is still called upon to
+print its message as well.) The action executes the statement
+`yyerrok', a macro defined automatically by Bison; its meaning is that
+error recovery is complete (*note Error Recovery::). Note the
+difference between `yyerrok' and `yyerror'; neither one is a misprint.
+
+ This form of error recovery deals with syntax errors. There are
+other kinds of errors; for example, division by zero, which raises an
+exception signal that is normally fatal. A real calculator program
+must handle this signal and use `longjmp' to return to `main' and
+resume parsing input lines; it would also have to discard the rest of
+the current line of input. We won't discuss this issue further because
+it is not specific to Bison programs.
+
+
+File: bison.info, Node: Multi-function Calc, Next: Exercises, Prev: Simple Error Recovery, Up: Examples
+
+Multi-Function Calculator: `mfcalc'
+===================================
+
+ Now that the basics of Bison have been discussed, it is time to move
+on to a more advanced problem. The above calculators provided only five
+functions, `+', `-', `*', `/' and `^'. It would be nice to have a
+calculator that provides other mathematical functions such as `sin',
+`cos', etc.
+
+ It is easy to add new operators to the infix calculator as long as
+they are only single-character literals. The lexical analyzer `yylex'
+passes back all non-number characters as tokens, so new grammar rules
+suffice for adding a new operator. But we want something more
+flexible: built-in functions whose syntax has this form:
+
+ FUNCTION_NAME (ARGUMENT)
+
+At the same time, we will add memory to the calculator, by allowing you
+to create named variables, store values in them, and use them later.
+Here is a sample session with the multi-function calculator:
+
+ % mfcalc
+ pi = 3.141592653589
+ 3.1415926536
+ sin(pi)
+ 0.0000000000
+ alpha = beta1 = 2.3
+ 2.3000000000
+ alpha
+ 2.3000000000
+ ln(alpha)
+ 0.8329091229
+ exp(ln(beta1))
+ 2.3000000000
+ %
+
+ Note that multiple assignment and nested function calls are
+permitted.
+
+* Menu:
+
+* Decl: Mfcalc Decl. Bison declarations for multi-function calculator.
+* Rules: Mfcalc Rules. Grammar rules for the calculator.
+* Symtab: Mfcalc Symtab. Symbol table management subroutines.
+
+
+File: bison.info, Node: Mfcalc Decl, Next: Mfcalc Rules, Up: Multi-function Calc
+
+Declarations for `mfcalc'
+-------------------------
+
+ Here are the C and Bison declarations for the multi-function
+calculator.
+
+ %{
+ #include <math.h> /* For math functions, cos(), sin(), etc. */
+ #include "calc.h" /* Contains definition of `symrec' */
+ %}
+ %union {
+ double val; /* For returning numbers. */
+ symrec *tptr; /* For returning symbol-table pointers */
+ }
+
+ %token <val> NUM /* Simple double precision number */
+ %token <tptr> VAR FNCT /* Variable and Function */
+ %type <val> exp
+
+ %right '='
+ %left '-' '+'
+ %left '*' '/'
+ %left NEG /* Negation--unary minus */
+ %right '^' /* Exponentiation */
+
+ /* Grammar follows */
+
+ %%
+
+ The above grammar introduces only two new features of the Bison
+language. These features allow semantic values to have various data
+types (*note More Than One Value Type: Multiple Types.).
+
+ The `%union' declaration specifies the entire list of possible types;
+this is instead of defining `YYSTYPE'. The allowable types are now
+double-floats (for `exp' and `NUM') and pointers to entries in the
+symbol table. *Note The Collection of Value Types: Union Decl.
+
+ Since values can now have various types, it is necessary to
+associate a type with each grammar symbol whose semantic value is used.
+These symbols are `NUM', `VAR', `FNCT', and `exp'. Their declarations
+are augmented with information about their data type (placed between
+angle brackets).
+
+ The Bison construct `%type' is used for declaring nonterminal
+symbols, just as `%token' is used for declaring token types. We have
+not used `%type' before because nonterminal symbols are normally
+declared implicitly by the rules that define them. But `exp' must be
+declared explicitly so we can specify its value type. *Note
+Nonterminal Symbols: Type Decl.
+
+
+File: bison.info, Node: Mfcalc Rules, Next: Mfcalc Symtab, Prev: Mfcalc Decl, Up: Multi-function Calc
+
+Grammar Rules for `mfcalc'
+--------------------------
+
+ Here are the grammar rules for the multi-function calculator. Most
+of them are copied directly from `calc'; three rules, those which
+mention `VAR' or `FNCT', are new.
+
+ input: /* empty */
+ | input line
+ ;
+
+ line:
+ '\n'
+ | exp '\n' { printf ("\t%.10g\n", $1); }
+ | error '\n' { yyerrok; }
+ ;
+
+ exp: NUM { $$ = $1; }
+ | VAR { $$ = $1->value.var; }
+ | VAR '=' exp { $$ = $3; $1->value.var = $3; }
+ | FNCT '(' exp ')' { $$ = (*($1->value.fnctptr))($3); }
+ | exp '+' exp { $$ = $1 + $3; }
+ | exp '-' exp { $$ = $1 - $3; }
+ | exp '*' exp { $$ = $1 * $3; }
+ | exp '/' exp { $$ = $1 / $3; }
+ | '-' exp %prec NEG { $$ = -$2; }
+ | exp '^' exp { $$ = pow ($1, $3); }
+ | '(' exp ')' { $$ = $2; }
+ ;
+ /* End of grammar */
+ %%
+
+
+File: bison.info, Node: Mfcalc Symtab, Prev: Mfcalc Rules, Up: Multi-function Calc
+
+The `mfcalc' Symbol Table
+-------------------------
+
+ The multi-function calculator requires a symbol table to keep track
+of the names and meanings of variables and functions. This doesn't
+affect the grammar rules (except for the actions) or the Bison
+declarations, but it requires some additional C functions for support.
+
+ The symbol table itself consists of a linked list of records. Its
+definition, which is kept in the header `calc.h', is as follows. It
+provides for either functions or variables to be placed in the table.
+
+ /* Data type for links in the chain of symbols. */
+ struct symrec
+ {
+ char *name; /* name of symbol */
+ int type; /* type of symbol: either VAR or FNCT */
+ union {
+ double var; /* value of a VAR */
+ double (*fnctptr)(); /* value of a FNCT */
+ } value;
+ struct symrec *next; /* link field */
+ };
+
+ typedef struct symrec symrec;
+
+ /* The symbol table: a chain of `struct symrec'. */
+ extern symrec *sym_table;
+
+ symrec *putsym ();
+ symrec *getsym ();
+
+ The new version of `main' includes a call to `init_table', a
+function that initializes the symbol table. Here it is, and
+`init_table' as well:
+
+ #include <stdio.h>
+
+ main ()
+ {
+ init_table ();
+ yyparse ();
+ }
+
+ yyerror (s) /* Called by yyparse on error */
+ char *s;
+ {
+ printf ("%s\n", s);
+ }
+
+ struct init
+ {
+ char *fname;
+ double (*fnct)();
+ };
+
+ struct init arith_fncts[]
+ = {
+ "sin", sin,
+ "cos", cos,
+ "atan", atan,
+ "ln", log,
+ "exp", exp,
+ "sqrt", sqrt,
+ 0, 0
+ };
+
+ /* The symbol table: a chain of `struct symrec'. */
+ symrec *sym_table = (symrec *)0;
+
+ init_table () /* puts arithmetic functions in table. */
+ {
+ int i;
+ symrec *ptr;
+ for (i = 0; arith_fncts[i].fname != 0; i++)
+ {
+ ptr = putsym (arith_fncts[i].fname, FNCT);
+ ptr->value.fnctptr = arith_fncts[i].fnct;
+ }
+ }
+
+ By simply editing the initialization list and adding the necessary
+include files, you can add additional functions to the calculator.
+
+ Two important functions allow look-up and installation of symbols in
+the symbol table. The function `putsym' is passed a name and the type
+(`VAR' or `FNCT') of the object to be installed. The object is linked
+to the front of the list, and a pointer to the object is returned. The
+function `getsym' is passed the name of the symbol to look up. If
+found, a pointer to that symbol is returned; otherwise zero is returned.
+
+ symrec *
+ putsym (sym_name,sym_type)
+ char *sym_name;
+ int sym_type;
+ {
+ symrec *ptr;
+ ptr = (symrec *) malloc (sizeof (symrec));
+ ptr->name = (char *) malloc (strlen (sym_name) + 1);
+ strcpy (ptr->name,sym_name);
+ ptr->type = sym_type;
+ ptr->value.var = 0; /* set value to 0 even if fctn. */
+ ptr->next = (struct symrec *)sym_table;
+ sym_table = ptr;
+ return ptr;
+ }
+
+ symrec *
+ getsym (sym_name)
+ char *sym_name;
+ {
+ symrec *ptr;
+ for (ptr = sym_table; ptr != (symrec *) 0;
+ ptr = (symrec *)ptr->next)
+ if (strcmp (ptr->name,sym_name) == 0)
+ return ptr;
+ return 0;
+ }
+
+ The function `yylex' must now recognize variables, numeric values,
+and the single-character arithmetic operators. Strings of alphanumeric
+characters with a leading nondigit are recognized as either variables or
+functions depending on what the symbol table says about them.
+
+ The string is passed to `getsym' for look up in the symbol table. If
+the name appears in the table, a pointer to its location and its type
+(`VAR' or `FNCT') is returned to `yyparse'. If it is not already in
+the table, then it is installed as a `VAR' using `putsym'. Again, a
+pointer and its type (which must be `VAR') is returned to `yyparse'.
+
+ No change is needed in the handling of numeric values and arithmetic
+operators in `yylex'.
+
+ #include <ctype.h>
+ yylex ()
+ {
+ int c;
+
+ /* Ignore whitespace, get first nonwhite character. */
+ while ((c = getchar ()) == ' ' || c == '\t');
+
+ if (c == EOF)
+ return 0;
+
+ /* Char starts a number => parse the number. */
+ if (c == '.' || isdigit (c))
+ {
+ ungetc (c, stdin);
+ scanf ("%lf", &yylval.val);
+ return NUM;
+ }
+
+ /* Char starts an identifier => read the name. */
+ if (isalpha (c))
+ {
+ symrec *s;
+ static char *symbuf = 0;
+ static int length = 0;
+ int i;
+
+ /* Initially make the buffer long enough
+ for a 40-character symbol name. */
+ if (length == 0)
+ length = 40, symbuf = (char *)malloc (length + 1);
+
+ i = 0;
+ do
+ {
+ /* If buffer is full, make it bigger. */
+ if (i == length)
+ {
+ length *= 2;
+ symbuf = (char *)realloc (symbuf, length + 1);
+ }
+ /* Add this character to the buffer. */
+ symbuf[i++] = c;
+ /* Get another character. */
+ c = getchar ();
+ }
+ while (c != EOF && isalnum (c));
+
+ ungetc (c, stdin);
+ symbuf[i] = '\0';
+
+ s = getsym (symbuf);
+ if (s == 0)
+ s = putsym (symbuf, VAR);
+ yylval.tptr = s;
+ return s->type;
+ }
+
+ /* Any other character is a token by itself. */
+ return c;
+ }
+
+ This program is both powerful and flexible. You may easily add new
+functions, and it is a simple job to modify this code to install
+predefined variables such as `pi' or `e' as well.
+
+
+File: bison.info, Node: Exercises, Prev: Multi-function Calc, Up: Examples
+
+Exercises
+=========
+
+ 1. Add some new functions from `math.h' to the initialization list.
+
+ 2. Add another array that contains constants and their values. Then
+ modify `init_table' to add these constants to the symbol table.
+ It will be easiest to give the constants type `VAR'.
+
+ 3. Make the program report an error if the user refers to an
+ uninitialized variable in any way except to store a value in it.
+
+
+File: bison.info, Node: Grammar File, Next: Interface, Prev: Examples, Up: Top
+
+Bison Grammar Files
+*******************
+
+ Bison takes as input a context-free grammar specification and
+produces a C-language function that recognizes correct instances of the
+grammar.
+
+ The Bison grammar input file conventionally has a name ending in
+`.y'.
+
+* Menu:
+
+* Grammar Outline:: Overall layout of the grammar file.
+* Symbols:: Terminal and nonterminal symbols.
+* Rules:: How to write grammar rules.
+* Recursion:: Writing recursive rules.
+* Semantics:: Semantic values and actions.
+* Declarations:: All kinds of Bison declarations are described here.
+* Multiple Parsers:: Putting more than one Bison parser in one program.
+
+
+File: bison.info, Node: Grammar Outline, Next: Symbols, Up: Grammar File
+
+Outline of a Bison Grammar
+==========================
+
+ A Bison grammar file has four main sections, shown here with the
+appropriate delimiters:
+
+ %{
+ C DECLARATIONS
+ %}
+
+ BISON DECLARATIONS
+
+ %%
+ GRAMMAR RULES
+ %%
+
+ ADDITIONAL C CODE
+
+ Comments enclosed in `/* ... */' may appear in any of the sections.
+
+* Menu:
+
+* C Declarations:: Syntax and usage of the C declarations section.
+* Bison Declarations:: Syntax and usage of the Bison declarations section.
+* Grammar Rules:: Syntax and usage of the grammar rules section.
+* C Code:: Syntax and usage of the additional C code section.
+
+
+File: bison.info, Node: C Declarations, Next: Bison Declarations, Up: Grammar Outline
+
+The C Declarations Section
+--------------------------
+
+ The C DECLARATIONS section contains macro definitions and
+declarations of functions and variables that are used in the actions in
+the grammar rules. These are copied to the beginning of the parser
+file so that they precede the definition of `yyparse'. You can use
+`#include' to get the declarations from a header file. If you don't
+need any C declarations, you may omit the `%{' and `%}' delimiters that
+bracket this section.
+
+
+File: bison.info, Node: Bison Declarations, Next: Grammar Rules, Prev: C Declarations, Up: Grammar Outline
+
+The Bison Declarations Section
+------------------------------
+
+ The BISON DECLARATIONS section contains declarations that define
+terminal and nonterminal symbols, specify precedence, and so on. In
+some simple grammars you may not need any declarations. *Note Bison
+Declarations: Declarations.
+
+
+File: bison.info, Node: Grammar Rules, Next: C Code, Prev: Bison Declarations, Up: Grammar Outline
+
+The Grammar Rules Section
+-------------------------
+
+ The "grammar rules" section contains one or more Bison grammar
+rules, and nothing else. *Note Syntax of Grammar Rules: Rules.
+
+ There must always be at least one grammar rule, and the first `%%'
+(which precedes the grammar rules) may never be omitted even if it is
+the first thing in the file.
+
+
+File: bison.info, Node: C Code, Prev: Grammar Rules, Up: Grammar Outline
+
+The Additional C Code Section
+-----------------------------
+
+ The ADDITIONAL C CODE section is copied verbatim to the end of the
+parser file, just as the C DECLARATIONS section is copied to the
+beginning. This is the most convenient place to put anything that you
+want to have in the parser file but which need not come before the
+definition of `yyparse'. For example, the definitions of `yylex' and
+`yyerror' often go here. *Note Parser C-Language Interface: Interface.
+
+ If the last section is empty, you may omit the `%%' that separates it
+from the grammar rules.
+
+ The Bison parser itself contains many static variables whose names
+start with `yy' and many macros whose names start with `YY'. It is a
+good idea to avoid using any such names (except those documented in this
+manual) in the additional C code section of the grammar file.
+
+
+File: bison.info, Node: Symbols, Next: Rules, Prev: Grammar Outline, Up: Grammar File
+
+Symbols, Terminal and Nonterminal
+=================================
+
+ "Symbols" in Bison grammars represent the grammatical classifications
+of the language.
+
+ A "terminal symbol" (also known as a "token type") represents a
+class of syntactically equivalent tokens. You use the symbol in grammar
+rules to mean that a token in that class is allowed. The symbol is
+represented in the Bison parser by a numeric code, and the `yylex'
+function returns a token type code to indicate what kind of token has
+been read. You don't need to know what the code value is; you can use
+the symbol to stand for it.
+
+ A "nonterminal symbol" stands for a class of syntactically equivalent
+groupings. The symbol name is used in writing grammar rules. By
+convention, it should be all lower case.
+
+ Symbol names can contain letters, digits (not at the beginning),
+underscores and periods. Periods make sense only in nonterminals.
+
+ There are three ways of writing terminal symbols in the grammar:
+
+ * A "named token type" is written with an identifier, like an
+ identifier in C. By convention, it should be all upper case. Each
+ such name must be defined with a Bison declaration such as
+ `%token'. *Note Token Type Names: Token Decl.
+
+ * A "character token type" (or "literal character token") is written
+ in the grammar using the same syntax used in C for character
+ constants; for example, `'+'' is a character token type. A
+ character token type doesn't need to be declared unless you need to
+ specify its semantic value data type (*note Data Types of Semantic
+ Values: Value Type.), associativity, or precedence (*note Operator
+ Precedence: Precedence.).
+
+ By convention, a character token type is used only to represent a
+ token that consists of that particular character. Thus, the token
+ type `'+'' is used to represent the character `+' as a token.
+ Nothing enforces this convention, but if you depart from it, your
+ program will confuse other readers.
+
+ All the usual escape sequences used in character literals in C can
+ be used in Bison as well, but you must not use the null character
+ as a character literal because its ASCII code, zero, is the code
+ `yylex' returns for end-of-input (*note Calling Convention for
+ `yylex': Calling Convention.).
+
+ * A "literal string token" is written like a C string constant; for
+ example, `"<="' is a literal string token. A literal string token
+ doesn't need to be declared unless you need to specify its semantic
+ value data type (*note Value Type::), associativity, precedence
+ (*note Precedence::).
+
+ You can associate the literal string token with a symbolic name as
+ an alias, using the `%token' declaration (*note Token
+ Declarations: Token Decl.). If you don't do that, the lexical
+ analyzer has to retrieve the token number for the literal string
+ token from the `yytname' table (*note Calling Convention::).
+
+ *WARNING*: literal string tokens do not work in Yacc.
+
+ By convention, a literal string token is used only to represent a
+ token that consists of that particular string. Thus, you should
+ use the token type `"<="' to represent the string `<=' as a token.
+ Bison does not enforces this convention, but if you depart from
+ it, people who read your program will be confused.
+
+ All the escape sequences used in string literals in C can be used
+ in Bison as well. A literal string token must contain two or more
+ characters; for a token containing just one character, use a
+ character token (see above).
+
+ How you choose to write a terminal symbol has no effect on its
+grammatical meaning. That depends only on where it appears in rules and
+on when the parser function returns that symbol.
+
+ The value returned by `yylex' is always one of the terminal symbols
+(or 0 for end-of-input). Whichever way you write the token type in the
+grammar rules, you write it the same way in the definition of `yylex'.
+The numeric code for a character token type is simply the ASCII code for
+the character, so `yylex' can use the identical character constant to
+generate the requisite code. Each named token type becomes a C macro in
+the parser file, so `yylex' can use the name to stand for the code.
+(This is why periods don't make sense in terminal symbols.) *Note
+Calling Convention for `yylex': Calling Convention.
+
+ If `yylex' is defined in a separate file, you need to arrange for the
+token-type macro definitions to be available there. Use the `-d'
+option when you run Bison, so that it will write these macro definitions
+into a separate header file `NAME.tab.h' which you can include in the
+other source files that need it. *Note Invoking Bison: Invocation.
+
+ The symbol `error' is a terminal symbol reserved for error recovery
+(*note Error Recovery::); you shouldn't use it for any other purpose.
+In particular, `yylex' should never return this value.
+
+
+File: bison.info, Node: Rules, Next: Recursion, Prev: Symbols, Up: Grammar File
+
+Syntax of Grammar Rules
+=======================
+
+ A Bison grammar rule has the following general form:
+
+ RESULT: COMPONENTS...
+ ;
+
+where RESULT is the nonterminal symbol that this rule describes and
+COMPONENTS are various terminal and nonterminal symbols that are put
+together by this rule (*note Symbols::).
+
+ For example,
+
+ exp: exp '+' exp
+ ;
+
+says that two groupings of type `exp', with a `+' token in between, can
+be combined into a larger grouping of type `exp'.
+
+ Whitespace in rules is significant only to separate symbols. You
+can add extra whitespace as you wish.
+
+ Scattered among the components can be ACTIONS that determine the
+semantics of the rule. An action looks like this:
+
+ {C STATEMENTS}
+
+Usually there is only one action and it follows the components. *Note
+Actions::.
+
+ Multiple rules for the same RESULT can be written separately or can
+be joined with the vertical-bar character `|' as follows:
+
+ RESULT: RULE1-COMPONENTS...
+ | RULE2-COMPONENTS...
+ ...
+ ;
+
+They are still considered distinct rules even when joined in this way.
+
+ If COMPONENTS in a rule is empty, it means that RESULT can match the
+empty string. For example, here is how to define a comma-separated
+sequence of zero or more `exp' groupings:
+
+ expseq: /* empty */
+ | expseq1
+ ;
+
+ expseq1: exp
+ | expseq1 ',' exp
+ ;
+
+It is customary to write a comment `/* empty */' in each rule with no
+components.
+
+
+File: bison.info, Node: Recursion, Next: Semantics, Prev: Rules, Up: Grammar File
+
+Recursive Rules
+===============
+
+ A rule is called "recursive" when its RESULT nonterminal appears
+also on its right hand side. Nearly all Bison grammars need to use
+recursion, because that is the only way to define a sequence of any
+number of somethings. Consider this recursive definition of a
+comma-separated sequence of one or more expressions:
+
+ expseq1: exp
+ | expseq1 ',' exp
+ ;
+
+Since the recursive use of `expseq1' is the leftmost symbol in the
+right hand side, we call this "left recursion". By contrast, here the
+same construct is defined using "right recursion":
+
+ expseq1: exp
+ | exp ',' expseq1
+ ;
+
+Any kind of sequence can be defined using either left recursion or
+right recursion, but you should always use left recursion, because it
+can parse a sequence of any number of elements with bounded stack
+space. Right recursion uses up space on the Bison stack in proportion
+to the number of elements in the sequence, because all the elements
+must be shifted onto the stack before the rule can be applied even
+once. *Note The Bison Parser Algorithm: Algorithm, for further
+explanation of this.
+
+ "Indirect" or "mutual" recursion occurs when the result of the rule
+does not appear directly on its right hand side, but does appear in
+rules for other nonterminals which do appear on its right hand side.
+
+ For example:
+
+ expr: primary
+ | primary '+' primary
+ ;
+
+ primary: constant
+ | '(' expr ')'
+ ;
+
+defines two mutually-recursive nonterminals, since each refers to the
+other.
+
+
+File: bison.info, Node: Semantics, Next: Declarations, Prev: Recursion, Up: Grammar File
+
+Defining Language Semantics
+===========================
+
+ The grammar rules for a language determine only the syntax. The
+semantics are determined by the semantic values associated with various
+tokens and groupings, and by the actions taken when various groupings
+are recognized.
+
+ For example, the calculator calculates properly because the value
+associated with each expression is the proper number; it adds properly
+because the action for the grouping `X + Y' is to add the numbers
+associated with X and Y.
+
+* Menu:
+
+* Value Type:: Specifying one data type for all semantic values.
+* Multiple Types:: Specifying several alternative data types.
+* Actions:: An action is the semantic definition of a grammar rule.
+* Action Types:: Specifying data types for actions to operate on.
+* Mid-Rule Actions:: Most actions go at the end of a rule.
+ This says when, why and how to use the exceptional
+ action in the middle of a rule.
+
+
+File: bison.info, Node: Value Type, Next: Multiple Types, Up: Semantics
+
+Data Types of Semantic Values
+-----------------------------
+
+ In a simple program it may be sufficient to use the same data type
+for the semantic values of all language constructs. This was true in
+the RPN and infix calculator examples (*note Reverse Polish Notation
+Calculator: RPN Calc.).
+
+ Bison's default is to use type `int' for all semantic values. To
+specify some other type, define `YYSTYPE' as a macro, like this:
+
+ #define YYSTYPE double
+
+This macro definition must go in the C declarations section of the
+grammar file (*note Outline of a Bison Grammar: Grammar Outline.).
+
+
+File: bison.info, Node: Multiple Types, Next: Actions, Prev: Value Type, Up: Semantics
+
+More Than One Value Type
+------------------------
+
+ In most programs, you will need different data types for different
+kinds of tokens and groupings. For example, a numeric constant may
+need type `int' or `long', while a string constant needs type `char *',
+and an identifier might need a pointer to an entry in the symbol table.
+
+ To use more than one data type for semantic values in one parser,
+Bison requires you to do two things:
+
+ * Specify the entire collection of possible data types, with the
+ `%union' Bison declaration (*note The Collection of Value Types:
+ Union Decl.).
+
+ * Choose one of those types for each symbol (terminal or nonterminal)
+ for which semantic values are used. This is done for tokens with
+ the `%token' Bison declaration (*note Token Type Names: Token
+ Decl.) and for groupings with the `%type' Bison declaration (*note
+ Nonterminal Symbols: Type Decl.).
+
+
+File: bison.info, Node: Actions, Next: Action Types, Prev: Multiple Types, Up: Semantics
+
+Actions
+-------
+
+ An action accompanies a syntactic rule and contains C code to be
+executed each time an instance of that rule is recognized. The task of
+most actions is to compute a semantic value for the grouping built by
+the rule from the semantic values associated with tokens or smaller
+groupings.
+
+ An action consists of C statements surrounded by braces, much like a
+compound statement in C. It can be placed at any position in the rule;
+it is executed at that position. Most rules have just one action at
+the end of the rule, following all the components. Actions in the
+middle of a rule are tricky and used only for special purposes (*note
+Actions in Mid-Rule: Mid-Rule Actions.).
+
+ The C code in an action can refer to the semantic values of the
+components matched by the rule with the construct `$N', which stands for
+the value of the Nth component. The semantic value for the grouping
+being constructed is `$$'. (Bison translates both of these constructs
+into array element references when it copies the actions into the parser
+file.)
+
+ Here is a typical example:
+
+ exp: ...
+ | exp '+' exp
+ { $$ = $1 + $3; }
+
+This rule constructs an `exp' from two smaller `exp' groupings
+connected by a plus-sign token. In the action, `$1' and `$3' refer to
+the semantic values of the two component `exp' groupings, which are the
+first and third symbols on the right hand side of the rule. The sum is
+stored into `$$' so that it becomes the semantic value of the
+addition-expression just recognized by the rule. If there were a
+useful semantic value associated with the `+' token, it could be
+referred to as `$2'.
+
+ If you don't specify an action for a rule, Bison supplies a default:
+`$$ = $1'. Thus, the value of the first symbol in the rule becomes the
+value of the whole rule. Of course, the default rule is valid only if
+the two data types match. There is no meaningful default action for an
+empty rule; every empty rule must have an explicit action unless the
+rule's value does not matter.
+
+ `$N' with N zero or negative is allowed for reference to tokens and
+groupings on the stack _before_ those that match the current rule.
+This is a very risky practice, and to use it reliably you must be
+certain of the context in which the rule is applied. Here is a case in
+which you can use this reliably:
+
+ foo: expr bar '+' expr { ... }
+ | expr bar '-' expr { ... }
+ ;
+
+ bar: /* empty */
+ { previous_expr = $0; }
+ ;
+
+ As long as `bar' is used only in the fashion shown here, `$0' always
+refers to the `expr' which precedes `bar' in the definition of `foo'.
+
+
+File: bison.info, Node: Action Types, Next: Mid-Rule Actions, Prev: Actions, Up: Semantics
+
+Data Types of Values in Actions
+-------------------------------
+
+ If you have chosen a single data type for semantic values, the `$$'
+and `$N' constructs always have that data type.
+
+ If you have used `%union' to specify a variety of data types, then
+you must declare a choice among these types for each terminal or
+nonterminal symbol that can have a semantic value. Then each time you
+use `$$' or `$N', its data type is determined by which symbol it refers
+to in the rule. In this example,
+
+ exp: ...
+ | exp '+' exp
+ { $$ = $1 + $3; }
+
+`$1' and `$3' refer to instances of `exp', so they all have the data
+type declared for the nonterminal symbol `exp'. If `$2' were used, it
+would have the data type declared for the terminal symbol `'+'',
+whatever that might be.
+
+ Alternatively, you can specify the data type when you refer to the
+value, by inserting `<TYPE>' after the `$' at the beginning of the
+reference. For example, if you have defined types as shown here:
+
+ %union {
+ int itype;
+ double dtype;
+ }
+
+then you can write `$<itype>1' to refer to the first subunit of the
+rule as an integer, or `$<dtype>1' to refer to it as a double.
+
diff --git a/tools/bison++/bison.info-3 b/tools/bison++/bison.info-3
new file mode 100644
index 000000000..bf519e312
--- /dev/null
+++ b/tools/bison++/bison.info-3
@@ -0,0 +1,1287 @@
+This is bison.info, produced by makeinfo version 4.1 from bison.texinfo.
+
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+
+ This file documents the Bison parser generator.
+
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation, Inc.
+
+ Permission is granted to make and distribute verbatim copies of this
+manual provided the copyright notice and this permission notice are
+preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of
+this manual under the conditions for verbatim copying, provided also
+that the sections entitled "GNU General Public License" and "Conditions
+for Using Bison" are included exactly as in the original, and provided
+that the entire resulting derived work is distributed under the terms
+of a permission notice identical to this one.
+
+ Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that the sections entitled "GNU General Public
+License", "Conditions for Using Bison" and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+
+
+File: bison.info, Node: Mid-Rule Actions, Prev: Action Types, Up: Semantics
+
+Actions in Mid-Rule
+-------------------
+
+ Occasionally it is useful to put an action in the middle of a rule.
+These actions are written just like usual end-of-rule actions, but they
+are executed before the parser even recognizes the following components.
+
+ A mid-rule action may refer to the components preceding it using
+`$N', but it may not refer to subsequent components because it is run
+before they are parsed.
+
+ The mid-rule action itself counts as one of the components of the
+rule. This makes a difference when there is another action later in
+the same rule (and usually there is another at the end): you have to
+count the actions along with the symbols when working out which number
+N to use in `$N'.
+
+ The mid-rule action can also have a semantic value. The action can
+set its value with an assignment to `$$', and actions later in the rule
+can refer to the value using `$N'. Since there is no symbol to name
+the action, there is no way to declare a data type for the value in
+advance, so you must use the `$<...>' construct to specify a data type
+each time you refer to this value.
+
+ There is no way to set the value of the entire rule with a mid-rule
+action, because assignments to `$$' do not have that effect. The only
+way to set the value for the entire rule is with an ordinary action at
+the end of the rule.
+
+ Here is an example from a hypothetical compiler, handling a `let'
+statement that looks like `let (VARIABLE) STATEMENT' and serves to
+create a variable named VARIABLE temporarily for the duration of
+STATEMENT. To parse this construct, we must put VARIABLE into the
+symbol table while STATEMENT is parsed, then remove it afterward. Here
+is how it is done:
+
+ stmt: LET '(' var ')'
+ { $<context>$ = push_context ();
+ declare_variable ($3); }
+ stmt { $$ = $6;
+ pop_context ($<context>5); }
+
+As soon as `let (VARIABLE)' has been recognized, the first action is
+run. It saves a copy of the current semantic context (the list of
+accessible variables) as its semantic value, using alternative
+`context' in the data-type union. Then it calls `declare_variable' to
+add the new variable to that list. Once the first action is finished,
+the embedded statement `stmt' can be parsed. Note that the mid-rule
+action is component number 5, so the `stmt' is component number 6.
+
+ After the embedded statement is parsed, its semantic value becomes
+the value of the entire `let'-statement. Then the semantic value from
+the earlier action is used to restore the prior list of variables. This
+removes the temporary `let'-variable from the list so that it won't
+appear to exist while the rest of the program is parsed.
+
+ Taking action before a rule is completely recognized often leads to
+conflicts since the parser must commit to a parse in order to execute
+the action. For example, the following two rules, without mid-rule
+actions, can coexist in a working parser because the parser can shift
+the open-brace token and look at what follows before deciding whether
+there is a declaration or not:
+
+ compound: '{' declarations statements '}'
+ | '{' statements '}'
+ ;
+
+But when we add a mid-rule action as follows, the rules become
+nonfunctional:
+
+ compound: { prepare_for_local_variables (); }
+ '{' declarations statements '}'
+ | '{' statements '}'
+ ;
+
+Now the parser is forced to decide whether to run the mid-rule action
+when it has read no farther than the open-brace. In other words, it
+must commit to using one rule or the other, without sufficient
+information to do it correctly. (The open-brace token is what is called
+the "look-ahead" token at this time, since the parser is still deciding
+what to do about it. *Note Look-Ahead Tokens: Look-Ahead.)
+
+ You might think that you could correct the problem by putting
+identical actions into the two rules, like this:
+
+ compound: { prepare_for_local_variables (); }
+ '{' declarations statements '}'
+ | { prepare_for_local_variables (); }
+ '{' statements '}'
+ ;
+
+But this does not help, because Bison does not realize that the two
+actions are identical. (Bison never tries to understand the C code in
+an action.)
+
+ If the grammar is such that a declaration can be distinguished from a
+statement by the first token (which is true in C), then one solution
+which does work is to put the action after the open-brace, like this:
+
+ compound: '{' { prepare_for_local_variables (); }
+ declarations statements '}'
+ | '{' statements '}'
+ ;
+
+Now the first token of the following declaration or statement, which
+would in any case tell Bison which rule to use, can still do so.
+
+ Another solution is to bury the action inside a nonterminal symbol
+which serves as a subroutine:
+
+ subroutine: /* empty */
+ { prepare_for_local_variables (); }
+ ;
+
+ compound: subroutine
+ '{' declarations statements '}'
+ | subroutine
+ '{' statements '}'
+ ;
+
+Now Bison can execute the action in the rule for `subroutine' without
+deciding which rule for `compound' it will eventually use. Note that
+the action is now at the end of its rule. Any mid-rule action can be
+converted to an end-of-rule action in this way, and this is what Bison
+actually does to implement mid-rule actions.
+
+
+File: bison.info, Node: Declarations, Next: Multiple Parsers, Prev: Semantics, Up: Grammar File
+
+Bison Declarations
+==================
+
+ The "Bison declarations" section of a Bison grammar defines the
+symbols used in formulating the grammar and the data types of semantic
+values. *Note Symbols::.
+
+ All token type names (but not single-character literal tokens such as
+`'+'' and `'*'') must be declared. Nonterminal symbols must be
+declared if you need to specify which data type to use for the semantic
+value (*note More Than One Value Type: Multiple Types.).
+
+ The first rule in the file also specifies the start symbol, by
+default. If you want some other symbol to be the start symbol, you
+must declare it explicitly (*note Languages and Context-Free Grammars:
+Language and Grammar.).
+
+* Menu:
+
+* Token Decl:: Declaring terminal symbols.
+* Precedence Decl:: Declaring terminals with precedence and associativity.
+* Union Decl:: Declaring the set of all semantic value types.
+* Type Decl:: Declaring the choice of type for a nonterminal symbol.
+* Expect Decl:: Suppressing warnings about shift/reduce conflicts.
+* Start Decl:: Specifying the start symbol.
+* Pure Decl:: Requesting a reentrant parser.
+* Decl Summary:: Table of all Bison declarations.
+
+
+File: bison.info, Node: Token Decl, Next: Precedence Decl, Up: Declarations
+
+Token Type Names
+----------------
+
+ The basic way to declare a token type name (terminal symbol) is as
+follows:
+
+ %token NAME
+
+ Bison will convert this into a `#define' directive in the parser, so
+that the function `yylex' (if it is in this file) can use the name NAME
+to stand for this token type's code.
+
+ Alternatively, you can use `%left', `%right', or `%nonassoc' instead
+of `%token', if you wish to specify precedence. *Note Operator
+Precedence: Precedence Decl.
+
+ You can explicitly specify the numeric code for a token type by
+appending an integer value in the field immediately following the token
+name:
+
+ %token NUM 300
+
+It is generally best, however, to let Bison choose the numeric codes for
+all token types. Bison will automatically select codes that don't
+conflict with each other or with ASCII characters.
+
+ In the event that the stack type is a union, you must augment the
+`%token' or other token declaration to include the data type
+alternative delimited by angle-brackets (*note More Than One Value
+Type: Multiple Types.).
+
+ For example:
+
+ %union { /* define stack type */
+ double val;
+ symrec *tptr;
+ }
+ %token <val> NUM /* define token NUM and its type */
+
+ You can associate a literal string token with a token type name by
+writing the literal string at the end of a `%token' declaration which
+declares the name. For example:
+
+ %token arrow "=>"
+
+For example, a grammar for the C language might specify these names with
+equivalent literal string tokens:
+
+ %token <operator> OR "||"
+ %token <operator> LE 134 "<="
+ %left OR "<="
+
+Once you equate the literal string and the token name, you can use them
+interchangeably in further declarations or the grammar rules. The
+`yylex' function can use the token name or the literal string to obtain
+the token type code number (*note Calling Convention::).
+
+
+File: bison.info, Node: Precedence Decl, Next: Union Decl, Prev: Token Decl, Up: Declarations
+
+Operator Precedence
+-------------------
+
+ Use the `%left', `%right' or `%nonassoc' declaration to declare a
+token and specify its precedence and associativity, all at once. These
+are called "precedence declarations". *Note Operator Precedence:
+Precedence, for general information on operator precedence.
+
+ The syntax of a precedence declaration is the same as that of
+`%token': either
+
+ %left SYMBOLS...
+
+or
+
+ %left <TYPE> SYMBOLS...
+
+ And indeed any of these declarations serves the purposes of `%token'.
+But in addition, they specify the associativity and relative precedence
+for all the SYMBOLS:
+
+ * The associativity of an operator OP determines how repeated uses
+ of the operator nest: whether `X OP Y OP Z' is parsed by grouping
+ X with Y first or by grouping Y with Z first. `%left' specifies
+ left-associativity (grouping X with Y first) and `%right'
+ specifies right-associativity (grouping Y with Z first).
+ `%nonassoc' specifies no associativity, which means that `X OP Y
+ OP Z' is considered a syntax error.
+
+ * The precedence of an operator determines how it nests with other
+ operators. All the tokens declared in a single precedence
+ declaration have equal precedence and nest together according to
+ their associativity. When two tokens declared in different
+ precedence declarations associate, the one declared later has the
+ higher precedence and is grouped first.
+
+
+File: bison.info, Node: Union Decl, Next: Type Decl, Prev: Precedence Decl, Up: Declarations
+
+The Collection of Value Types
+-----------------------------
+
+ The `%union' declaration specifies the entire collection of possible
+data types for semantic values. The keyword `%union' is followed by a
+pair of braces containing the same thing that goes inside a `union' in
+C.
+
+ For example:
+
+ %union {
+ double val;
+ symrec *tptr;
+ }
+
+This says that the two alternative types are `double' and `symrec *'.
+They are given names `val' and `tptr'; these names are used in the
+`%token' and `%type' declarations to pick one of the types for a
+terminal or nonterminal symbol (*note Nonterminal Symbols: Type Decl.).
+
+ Note that, unlike making a `union' declaration in C, you do not write
+a semicolon after the closing brace.
+
+
+File: bison.info, Node: Type Decl, Next: Expect Decl, Prev: Union Decl, Up: Declarations
+
+Nonterminal Symbols
+-------------------
+
+When you use `%union' to specify multiple value types, you must declare
+the value type of each nonterminal symbol for which values are used.
+This is done with a `%type' declaration, like this:
+
+ %type <TYPE> NONTERMINAL...
+
+Here NONTERMINAL is the name of a nonterminal symbol, and TYPE is the
+name given in the `%union' to the alternative that you want (*note The
+Collection of Value Types: Union Decl.). You can give any number of
+nonterminal symbols in the same `%type' declaration, if they have the
+same value type. Use spaces to separate the symbol names.
+
+ You can also declare the value type of a terminal symbol. To do
+this, use the same `<TYPE>' construction in a declaration for the
+terminal symbol. All kinds of token declarations allow `<TYPE>'.
+
+
+File: bison.info, Node: Expect Decl, Next: Start Decl, Prev: Type Decl, Up: Declarations
+
+Suppressing Conflict Warnings
+-----------------------------
+
+ Bison normally warns if there are any conflicts in the grammar
+(*note Shift/Reduce Conflicts: Shift/Reduce.), but most real grammars
+have harmless shift/reduce conflicts which are resolved in a
+predictable way and would be difficult to eliminate. It is desirable
+to suppress the warning about these conflicts unless the number of
+conflicts changes. You can do this with the `%expect' declaration.
+
+ The declaration looks like this:
+
+ %expect N
+
+ Here N is a decimal integer. The declaration says there should be no
+warning if there are N shift/reduce conflicts and no reduce/reduce
+conflicts. The usual warning is given if there are either more or fewer
+conflicts, or if there are any reduce/reduce conflicts.
+
+ In general, using `%expect' involves these steps:
+
+ * Compile your grammar without `%expect'. Use the `-v' option to
+ get a verbose list of where the conflicts occur. Bison will also
+ print the number of conflicts.
+
+ * Check each of the conflicts to make sure that Bison's default
+ resolution is what you really want. If not, rewrite the grammar
+ and go back to the beginning.
+
+ * Add an `%expect' declaration, copying the number N from the number
+ which Bison printed.
+
+ Now Bison will stop annoying you about the conflicts you have
+checked, but it will warn you again if changes in the grammar result in
+additional conflicts.
+
+
+File: bison.info, Node: Start Decl, Next: Pure Decl, Prev: Expect Decl, Up: Declarations
+
+The Start-Symbol
+----------------
+
+ Bison assumes by default that the start symbol for the grammar is
+the first nonterminal specified in the grammar specification section.
+The programmer may override this restriction with the `%start'
+declaration as follows:
+
+ %start SYMBOL
+
+
+File: bison.info, Node: Pure Decl, Next: Decl Summary, Prev: Start Decl, Up: Declarations
+
+A Pure (Reentrant) Parser
+-------------------------
+
+ A "reentrant" program is one which does not alter in the course of
+execution; in other words, it consists entirely of "pure" (read-only)
+code. Reentrancy is important whenever asynchronous execution is
+possible; for example, a nonreentrant program may not be safe to call
+from a signal handler. In systems with multiple threads of control, a
+nonreentrant program must be called only within interlocks.
+
+ Normally, Bison generates a parser which is not reentrant. This is
+suitable for most uses, and it permits compatibility with YACC. (The
+standard YACC interfaces are inherently nonreentrant, because they use
+statically allocated variables for communication with `yylex',
+including `yylval' and `yylloc'.)
+
+ Alternatively, you can generate a pure, reentrant parser. The Bison
+declaration `%pure_parser' says that you want the parser to be
+reentrant. It looks like this:
+
+ %pure_parser
+
+ The result is that the communication variables `yylval' and `yylloc'
+become local variables in `yyparse', and a different calling convention
+is used for the lexical analyzer function `yylex'. *Note Calling
+Conventions for Pure Parsers: Pure Calling, for the details of this.
+The variable `yynerrs' also becomes local in `yyparse' (*note The Error
+Reporting Function `yyerror': Error Reporting.). The convention for
+calling `yyparse' itself is unchanged.
+
+ Whether the parser is pure has nothing to do with the grammar rules.
+You can generate either a pure parser or a nonreentrant parser from any
+valid grammar.
+
+
+File: bison.info, Node: Decl Summary, Prev: Pure Decl, Up: Declarations
+
+Bison Declaration Summary
+-------------------------
+
+ Here is a summary of all Bison declarations:
+
+`%union'
+ Declare the collection of data types that semantic values may have
+ (*note The Collection of Value Types: Union Decl.).
+
+`%token'
+ Declare a terminal symbol (token type name) with no precedence or
+ associativity specified (*note Token Type Names: Token Decl.).
+
+`%right'
+ Declare a terminal symbol (token type name) that is
+ right-associative (*note Operator Precedence: Precedence Decl.).
+
+`%left'
+ Declare a terminal symbol (token type name) that is
+ left-associative (*note Operator Precedence: Precedence Decl.).
+
+`%nonassoc'
+ Declare a terminal symbol (token type name) that is nonassociative
+ (using it in a way that would be associative is a syntax error)
+ (*note Operator Precedence: Precedence Decl.).
+
+`%type'
+ Declare the type of semantic values for a nonterminal symbol
+ (*note Nonterminal Symbols: Type Decl.).
+
+`%start'
+ Specify the grammar's start symbol (*note The Start-Symbol: Start
+ Decl.).
+
+`%expect'
+ Declare the expected number of shift-reduce conflicts (*note
+ Suppressing Conflict Warnings: Expect Decl.).
+
+`%pure_parser'
+ Request a pure (reentrant) parser program (*note A Pure
+ (Reentrant) Parser: Pure Decl.).
+
+`%no_lines'
+ Don't generate any `#line' preprocessor commands in the parser
+ file. Ordinarily Bison writes these commands in the parser file
+ so that the C compiler and debuggers will associate errors and
+ object code with your source file (the grammar file). This
+ directive causes them to associate errors with the parser file,
+ treating it an independent source file in its own right.
+
+`%raw'
+ The output file `NAME.h' normally defines the tokens with
+ Yacc-compatible token numbers. If this option is specified, the
+ internal Bison numbers are used instead. (Yacc-compatible numbers
+ start at 257 except for single character tokens; Bison assigns
+ token numbers sequentially for all tokens starting at 3.)
+
+`%token_table'
+ Generate an array of token names in the parser file. The name of
+ the array is `yytname'; `yytname[I]' is the name of the token
+ whose internal Bison token code number is I. The first three
+ elements of `yytname' are always `"$"', `"error"', and
+ `"$illegal"'; after these come the symbols defined in the grammar
+ file.
+
+ For single-character literal tokens and literal string tokens, the
+ name in the table includes the single-quote or double-quote
+ characters: for example, `"'+'"' is a single-character literal and
+ `"\"<=\""' is a literal string token. All the characters of the
+ literal string token appear verbatim in the string found in the
+ table; even double-quote characters are not escaped. For example,
+ if the token consists of three characters `*"*', its string in
+ `yytname' contains `"*"*"'. (In C, that would be written as
+ `"\"*\"*\""').
+
+ When you specify `%token_table', Bison also generates macro
+ definitions for macros `YYNTOKENS', `YYNNTS', and `YYNRULES', and
+ `YYNSTATES':
+
+ `YYNTOKENS'
+ The highest token number, plus one.
+
+ `YYNNTS'
+ The number of non-terminal symbols.
+
+ `YYNRULES'
+ The number of grammar rules,
+
+ `YYNSTATES'
+ The number of parser states (*note Parser States::).
+
+
+File: bison.info, Node: Multiple Parsers, Prev: Declarations, Up: Grammar File
+
+Multiple Parsers in the Same Program
+====================================
+
+ Most programs that use Bison parse only one language and therefore
+contain only one Bison parser. But what if you want to parse more than
+one language with the same program? Then you need to avoid a name
+conflict between different definitions of `yyparse', `yylval', and so
+on.
+
+ The easy way to do this is to use the option `-p PREFIX' (*note
+Invoking Bison: Invocation.). This renames the interface functions and
+variables of the Bison parser to start with PREFIX instead of `yy'.
+You can use this to give each parser distinct names that do not
+conflict.
+
+ The precise list of symbols renamed is `yyparse', `yylex',
+`yyerror', `yynerrs', `yylval', `yychar' and `yydebug'. For example,
+if you use `-p c', the names become `cparse', `clex', and so on.
+
+ *All the other variables and macros associated with Bison are not
+renamed.* These others are not global; there is no conflict if the same
+name is used in different parsers. For example, `YYSTYPE' is not
+renamed, but defining this in different ways in different parsers causes
+no trouble (*note Data Types of Semantic Values: Value Type.).
+
+ The `-p' option works by adding macro definitions to the beginning
+of the parser source file, defining `yyparse' as `PREFIXparse', and so
+on. This effectively substitutes one name for the other in the entire
+parser file.
+
+
+File: bison.info, Node: Interface, Next: Algorithm, Prev: Grammar File, Up: Top
+
+Parser C-Language Interface
+***************************
+
+ The Bison parser is actually a C function named `yyparse'. Here we
+describe the interface conventions of `yyparse' and the other functions
+that it needs to use.
+
+ Keep in mind that the parser uses many C identifiers starting with
+`yy' and `YY' for internal purposes. If you use such an identifier
+(aside from those in this manual) in an action or in additional C code
+in the grammar file, you are likely to run into trouble.
+
+* Menu:
+
+* Parser Function:: How to call `yyparse' and what it returns.
+* Lexical:: You must supply a function `yylex'
+ which reads tokens.
+* Error Reporting:: You must supply a function `yyerror'.
+* Action Features:: Special features for use in actions.
+
+
+File: bison.info, Node: Parser Function, Next: Lexical, Up: Interface
+
+The Parser Function `yyparse'
+=============================
+
+ You call the function `yyparse' to cause parsing to occur. This
+function reads tokens, executes actions, and ultimately returns when it
+encounters end-of-input or an unrecoverable syntax error. You can also
+write an action which directs `yyparse' to return immediately without
+reading further.
+
+ The value returned by `yyparse' is 0 if parsing was successful
+(return is due to end-of-input).
+
+ The value is 1 if parsing failed (return is due to a syntax error).
+
+ In an action, you can cause immediate return from `yyparse' by using
+these macros:
+
+`YYACCEPT'
+ Return immediately with value 0 (to report success).
+
+`YYABORT'
+ Return immediately with value 1 (to report failure).
+
+
+File: bison.info, Node: Lexical, Next: Error Reporting, Prev: Parser Function, Up: Interface
+
+The Lexical Analyzer Function `yylex'
+=====================================
+
+ The "lexical analyzer" function, `yylex', recognizes tokens from the
+input stream and returns them to the parser. Bison does not create
+this function automatically; you must write it so that `yyparse' can
+call it. The function is sometimes referred to as a lexical scanner.
+
+ In simple programs, `yylex' is often defined at the end of the Bison
+grammar file. If `yylex' is defined in a separate source file, you
+need to arrange for the token-type macro definitions to be available
+there. To do this, use the `-d' option when you run Bison, so that it
+will write these macro definitions into a separate header file
+`NAME.tab.h' which you can include in the other source files that need
+it. *Note Invoking Bison: Invocation.
+
+* Menu:
+
+* Calling Convention:: How `yyparse' calls `yylex'.
+* Token Values:: How `yylex' must return the semantic value
+ of the token it has read.
+* Token Positions:: How `yylex' must return the text position
+ (line number, etc.) of the token, if the
+ actions want that.
+* Pure Calling:: How the calling convention differs
+ in a pure parser (*note A Pure (Reentrant) Parser: Pure Decl.).
+
+
+File: bison.info, Node: Calling Convention, Next: Token Values, Up: Lexical
+
+Calling Convention for `yylex'
+------------------------------
+
+ The value that `yylex' returns must be the numeric code for the type
+of token it has just found, or 0 for end-of-input.
+
+ When a token is referred to in the grammar rules by a name, that name
+in the parser file becomes a C macro whose definition is the proper
+numeric code for that token type. So `yylex' can use the name to
+indicate that type. *Note Symbols::.
+
+ When a token is referred to in the grammar rules by a character
+literal, the numeric code for that character is also the code for the
+token type. So `yylex' can simply return that character code. The
+null character must not be used this way, because its code is zero and
+that is what signifies end-of-input.
+
+ Here is an example showing these things:
+
+ yylex ()
+ {
+ ...
+ if (c == EOF) /* Detect end of file. */
+ return 0;
+ ...
+ if (c == '+' || c == '-')
+ return c; /* Assume token type for `+' is '+'. */
+ ...
+ return INT; /* Return the type of the token. */
+ ...
+ }
+
+This interface has been designed so that the output from the `lex'
+utility can be used without change as the definition of `yylex'.
+
+ If the grammar uses literal string tokens, there are two ways that
+`yylex' can determine the token type codes for them:
+
+ * If the grammar defines symbolic token names as aliases for the
+ literal string tokens, `yylex' can use these symbolic names like
+ all others. In this case, the use of the literal string tokens in
+ the grammar file has no effect on `yylex'.
+
+ * `yylex' can find the multi-character token in the `yytname' table.
+ The index of the token in the table is the token type's code.
+ The name of a multi-character token is recorded in `yytname' with a
+ double-quote, the token's characters, and another double-quote.
+ The token's characters are not escaped in any way; they appear
+ verbatim in the contents of the string in the table.
+
+ Here's code for looking up a token in `yytname', assuming that the
+ characters of the token are stored in `token_buffer'.
+
+ for (i = 0; i < YYNTOKENS; i++)
+ {
+ if (yytname[i] != 0
+ && yytname[i][0] == '"'
+ && strncmp (yytname[i] + 1, token_buffer,
+ strlen (token_buffer))
+ && yytname[i][strlen (token_buffer) + 1] == '"'
+ && yytname[i][strlen (token_buffer) + 2] == 0)
+ break;
+ }
+
+ The `yytname' table is generated only if you use the
+ `%token_table' declaration. *Note Decl Summary::.
+
+
+File: bison.info, Node: Token Values, Next: Token Positions, Prev: Calling Convention, Up: Lexical
+
+Semantic Values of Tokens
+-------------------------
+
+ In an ordinary (nonreentrant) parser, the semantic value of the
+token must be stored into the global variable `yylval'. When you are
+using just one data type for semantic values, `yylval' has that type.
+Thus, if the type is `int' (the default), you might write this in
+`yylex':
+
+ ...
+ yylval = value; /* Put value onto Bison stack. */
+ return INT; /* Return the type of the token. */
+ ...
+
+ When you are using multiple data types, `yylval''s type is a union
+made from the `%union' declaration (*note The Collection of Value
+Types: Union Decl.). So when you store a token's value, you must use
+the proper member of the union. If the `%union' declaration looks like
+this:
+
+ %union {
+ int intval;
+ double val;
+ symrec *tptr;
+ }
+
+then the code in `yylex' might look like this:
+
+ ...
+ yylval.intval = value; /* Put value onto Bison stack. */
+ return INT; /* Return the type of the token. */
+ ...
+
+
+File: bison.info, Node: Token Positions, Next: Pure Calling, Prev: Token Values, Up: Lexical
+
+Textual Positions of Tokens
+---------------------------
+
+ If you are using the `@N'-feature (*note Special Features for Use in
+Actions: Action Features.) in actions to keep track of the textual
+locations of tokens and groupings, then you must provide this
+information in `yylex'. The function `yyparse' expects to find the
+textual location of a token just parsed in the global variable
+`yylloc'. So `yylex' must store the proper data in that variable. The
+value of `yylloc' is a structure and you need only initialize the
+members that are going to be used by the actions. The four members are
+called `first_line', `first_column', `last_line' and `last_column'.
+Note that the use of this feature makes the parser noticeably slower.
+
+ The data type of `yylloc' has the name `YYLTYPE'.
+
+
+File: bison.info, Node: Pure Calling, Prev: Token Positions, Up: Lexical
+
+Calling Conventions for Pure Parsers
+------------------------------------
+
+ When you use the Bison declaration `%pure_parser' to request a pure,
+reentrant parser, the global communication variables `yylval' and
+`yylloc' cannot be used. (*Note A Pure (Reentrant) Parser: Pure Decl.)
+In such parsers the two global variables are replaced by pointers
+passed as arguments to `yylex'. You must declare them as shown here,
+and pass the information back by storing it through those pointers.
+
+ yylex (lvalp, llocp)
+ YYSTYPE *lvalp;
+ YYLTYPE *llocp;
+ {
+ ...
+ *lvalp = value; /* Put value onto Bison stack. */
+ return INT; /* Return the type of the token. */
+ ...
+ }
+
+ If the grammar file does not use the `@' constructs to refer to
+textual positions, then the type `YYLTYPE' will not be defined. In
+this case, omit the second argument; `yylex' will be called with only
+one argument.
+
+ If you use a reentrant parser, you can optionally pass additional
+parameter information to it in a reentrant way. To do so, define the
+macro `YYPARSE_PARAM' as a variable name. This modifies the `yyparse'
+function to accept one argument, of type `void *', with that name.
+
+ When you call `yyparse', pass the address of an object, casting the
+address to `void *'. The grammar actions can refer to the contents of
+the object by casting the pointer value back to its proper type and
+then dereferencing it. Here's an example. Write this in the parser:
+
+ %{
+ struct parser_control
+ {
+ int nastiness;
+ int randomness;
+ };
+
+ #define YYPARSE_PARAM parm
+ %}
+
+Then call the parser like this:
+
+ struct parser_control
+ {
+ int nastiness;
+ int randomness;
+ };
+
+ ...
+
+ {
+ struct parser_control foo;
+ ... /* Store proper data in `foo'. */
+ value = yyparse ((void *) &foo);
+ ...
+ }
+
+In the grammar actions, use expressions like this to refer to the data:
+
+ ((struct parser_control *) parm)->randomness
+
+ If you wish to pass the additional parameter data to `yylex', define
+the macro `YYLEX_PARAM' just like `YYPARSE_PARAM', as shown here:
+
+ %{
+ struct parser_control
+ {
+ int nastiness;
+ int randomness;
+ };
+
+ #define YYPARSE_PARAM parm
+ #define YYLEX_PARAM parm
+ %}
+
+ You should then define `yylex' to accept one additional
+argument--the value of `parm'. (This makes either two or three
+arguments in total, depending on whether an argument of type `YYLTYPE'
+is passed.) You can declare the argument as a pointer to the proper
+object type, or you can declare it as `void *' and access the contents
+as shown above.
+
+ You can use `%pure_parser' to request a reentrant parser without
+also using `YYPARSE_PARAM'. Then you should call `yyparse' with no
+arguments, as usual.
+
+
+File: bison.info, Node: Error Reporting, Next: Action Features, Prev: Lexical, Up: Interface
+
+The Error Reporting Function `yyerror'
+======================================
+
+ The Bison parser detects a "parse error" or "syntax error" whenever
+it reads a token which cannot satisfy any syntax rule. A action in the
+grammar can also explicitly proclaim an error, using the macro
+`YYERROR' (*note Special Features for Use in Actions: Action Features.).
+
+ The Bison parser expects to report the error by calling an error
+reporting function named `yyerror', which you must supply. It is
+called by `yyparse' whenever a syntax error is found, and it receives
+one argument. For a parse error, the string is normally
+`"parse error"'.
+
+ If you define the macro `YYERROR_VERBOSE' in the Bison declarations
+section (*note The Bison Declarations Section: Bison Declarations.),
+then Bison provides a more verbose and specific error message string
+instead of just plain `"parse error"'. It doesn't matter what
+definition you use for `YYERROR_VERBOSE', just whether you define it.
+
+ The parser can detect one other kind of error: stack overflow. This
+happens when the input contains constructions that are very deeply
+nested. It isn't likely you will encounter this, since the Bison
+parser extends its stack automatically up to a very large limit. But
+if overflow happens, `yyparse' calls `yyerror' in the usual fashion,
+except that the argument string is `"parser stack overflow"'.
+
+ The following definition suffices in simple programs:
+
+ yyerror (s)
+ char *s;
+ {
+ fprintf (stderr, "%s\n", s);
+ }
+
+ After `yyerror' returns to `yyparse', the latter will attempt error
+recovery if you have written suitable error recovery grammar rules
+(*note Error Recovery::). If recovery is impossible, `yyparse' will
+immediately return 1.
+
+ The variable `yynerrs' contains the number of syntax errors
+encountered so far. Normally this variable is global; but if you
+request a pure parser (*note A Pure (Reentrant) Parser: Pure Decl.)
+then it is a local variable which only the actions can access.
+
+
+File: bison.info, Node: Action Features, Prev: Error Reporting, Up: Interface
+
+Special Features for Use in Actions
+===================================
+
+ Here is a table of Bison constructs, variables and macros that are
+useful in actions.
+
+`$$'
+ Acts like a variable that contains the semantic value for the
+ grouping made by the current rule. *Note Actions::.
+
+`$N'
+ Acts like a variable that contains the semantic value for the Nth
+ component of the current rule. *Note Actions::.
+
+`$<TYPEALT>$'
+ Like `$$' but specifies alternative TYPEALT in the union specified
+ by the `%union' declaration. *Note Data Types of Values in
+ Actions: Action Types.
+
+`$<TYPEALT>N'
+ Like `$N' but specifies alternative TYPEALT in the union specified
+ by the `%union' declaration. *Note Data Types of Values in
+ Actions: Action Types.
+
+`YYABORT;'
+ Return immediately from `yyparse', indicating failure. *Note The
+ Parser Function `yyparse': Parser Function.
+
+`YYACCEPT;'
+ Return immediately from `yyparse', indicating success. *Note The
+ Parser Function `yyparse': Parser Function.
+
+`YYBACKUP (TOKEN, VALUE);'
+ Unshift a token. This macro is allowed only for rules that reduce
+ a single value, and only when there is no look-ahead token. It
+ installs a look-ahead token with token type TOKEN and semantic
+ value VALUE; then it discards the value that was going to be
+ reduced by this rule.
+
+ If the macro is used when it is not valid, such as when there is a
+ look-ahead token already, then it reports a syntax error with a
+ message `cannot back up' and performs ordinary error recovery.
+
+ In either case, the rest of the action is not executed.
+
+`YYEMPTY'
+ Value stored in `yychar' when there is no look-ahead token.
+
+`YYERROR;'
+ Cause an immediate syntax error. This statement initiates error
+ recovery just as if the parser itself had detected an error;
+ however, it does not call `yyerror', and does not print any
+ message. If you want to print an error message, call `yyerror'
+ explicitly before the `YYERROR;' statement. *Note Error
+ Recovery::.
+
+`YYRECOVERING'
+ This macro stands for an expression that has the value 1 when the
+ parser is recovering from a syntax error, and 0 the rest of the
+ time. *Note Error Recovery::.
+
+`yychar'
+ Variable containing the current look-ahead token. (In a pure
+ parser, this is actually a local variable within `yyparse'.) When
+ there is no look-ahead token, the value `YYEMPTY' is stored in the
+ variable. *Note Look-Ahead Tokens: Look-Ahead.
+
+`yyclearin;'
+ Discard the current look-ahead token. This is useful primarily in
+ error rules. *Note Error Recovery::.
+
+`yyerrok;'
+ Resume generating error messages immediately for subsequent syntax
+ errors. This is useful primarily in error rules. *Note Error
+ Recovery::.
+
+`@N'
+ Acts like a structure variable containing information on the line
+ numbers and column numbers of the Nth component of the current
+ rule. The structure has four members, like this:
+
+ struct {
+ int first_line, last_line;
+ int first_column, last_column;
+ };
+
+ Thus, to get the starting line number of the third component, you
+ would use `@3.first_line'.
+
+ In order for the members of this structure to contain valid
+ information, you must make `yylex' supply this information about
+ each token. If you need only certain members, then `yylex' need
+ only fill in those members.
+
+ The use of this feature makes the parser noticeably slower.
+
+
+File: bison.info, Node: Algorithm, Next: Error Recovery, Prev: Interface, Up: Top
+
+The Bison Parser Algorithm
+**************************
+
+ As Bison reads tokens, it pushes them onto a stack along with their
+semantic values. The stack is called the "parser stack". Pushing a
+token is traditionally called "shifting".
+
+ For example, suppose the infix calculator has read `1 + 5 *', with a
+`3' to come. The stack will have four elements, one for each token
+that was shifted.
+
+ But the stack does not always have an element for each token read.
+When the last N tokens and groupings shifted match the components of a
+grammar rule, they can be combined according to that rule. This is
+called "reduction". Those tokens and groupings are replaced on the
+stack by a single grouping whose symbol is the result (left hand side)
+of that rule. Running the rule's action is part of the process of
+reduction, because this is what computes the semantic value of the
+resulting grouping.
+
+ For example, if the infix calculator's parser stack contains this:
+
+ 1 + 5 * 3
+
+and the next input token is a newline character, then the last three
+elements can be reduced to 15 via the rule:
+
+ expr: expr '*' expr;
+
+Then the stack contains just these three elements:
+
+ 1 + 15
+
+At this point, another reduction can be made, resulting in the single
+value 16. Then the newline token can be shifted.
+
+ The parser tries, by shifts and reductions, to reduce the entire
+input down to a single grouping whose symbol is the grammar's
+start-symbol (*note Languages and Context-Free Grammars: Language and
+Grammar.).
+
+ This kind of parser is known in the literature as a bottom-up parser.
+
+* Menu:
+
+* Look-Ahead:: Parser looks one token ahead when deciding what to do.
+* Shift/Reduce:: Conflicts: when either shifting or reduction is valid.
+* Precedence:: Operator precedence works by resolving conflicts.
+* Contextual Precedence:: When an operator's precedence depends on context.
+* Parser States:: The parser is a finite-state-machine with stack.
+* Reduce/Reduce:: When two rules are applicable in the same situation.
+* Mystery Conflicts:: Reduce/reduce conflicts that look unjustified.
+* Stack Overflow:: What happens when stack gets full. How to avoid it.
+
+
+File: bison.info, Node: Look-Ahead, Next: Shift/Reduce, Up: Algorithm
+
+Look-Ahead Tokens
+=================
+
+ The Bison parser does _not_ always reduce immediately as soon as the
+last N tokens and groupings match a rule. This is because such a
+simple strategy is inadequate to handle most languages. Instead, when a
+reduction is possible, the parser sometimes "looks ahead" at the next
+token in order to decide what to do.
+
+ When a token is read, it is not immediately shifted; first it
+becomes the "look-ahead token", which is not on the stack. Now the
+parser can perform one or more reductions of tokens and groupings on
+the stack, while the look-ahead token remains off to the side. When no
+more reductions should take place, the look-ahead token is shifted onto
+the stack. This does not mean that all possible reductions have been
+done; depending on the token type of the look-ahead token, some rules
+may choose to delay their application.
+
+ Here is a simple case where look-ahead is needed. These three rules
+define expressions which contain binary addition operators and postfix
+unary factorial operators (`!'), and allow parentheses for grouping.
+
+ expr: term '+' expr
+ | term
+ ;
+
+ term: '(' expr ')'
+ | term '!'
+ | NUMBER
+ ;
+
+ Suppose that the tokens `1 + 2' have been read and shifted; what
+should be done? If the following token is `)', then the first three
+tokens must be reduced to form an `expr'. This is the only valid
+course, because shifting the `)' would produce a sequence of symbols
+`term ')'', and no rule allows this.
+
+ If the following token is `!', then it must be shifted immediately so
+that `2 !' can be reduced to make a `term'. If instead the parser were
+to reduce before shifting, `1 + 2' would become an `expr'. It would
+then be impossible to shift the `!' because doing so would produce on
+the stack the sequence of symbols `expr '!''. No rule allows that
+sequence.
+
+ The current look-ahead token is stored in the variable `yychar'.
+*Note Special Features for Use in Actions: Action Features.
+
+
+File: bison.info, Node: Shift/Reduce, Next: Precedence, Prev: Look-Ahead, Up: Algorithm
+
+Shift/Reduce Conflicts
+======================
+
+ Suppose we are parsing a language which has if-then and if-then-else
+statements, with a pair of rules like this:
+
+ if_stmt:
+ IF expr THEN stmt
+ | IF expr THEN stmt ELSE stmt
+ ;
+
+Here we assume that `IF', `THEN' and `ELSE' are terminal symbols for
+specific keyword tokens.
+
+ When the `ELSE' token is read and becomes the look-ahead token, the
+contents of the stack (assuming the input is valid) are just right for
+reduction by the first rule. But it is also legitimate to shift the
+`ELSE', because that would lead to eventual reduction by the second
+rule.
+
+ This situation, where either a shift or a reduction would be valid,
+is called a "shift/reduce conflict". Bison is designed to resolve
+these conflicts by choosing to shift, unless otherwise directed by
+operator precedence declarations. To see the reason for this, let's
+contrast it with the other alternative.
+
+ Since the parser prefers to shift the `ELSE', the result is to attach
+the else-clause to the innermost if-statement, making these two inputs
+equivalent:
+
+ if x then if y then win (); else lose;
+
+ if x then do; if y then win (); else lose; end;
+
+ But if the parser chose to reduce when possible rather than shift,
+the result would be to attach the else-clause to the outermost
+if-statement, making these two inputs equivalent:
+
+ if x then if y then win (); else lose;
+
+ if x then do; if y then win (); end; else lose;
+
+ The conflict exists because the grammar as written is ambiguous:
+either parsing of the simple nested if-statement is legitimate. The
+established convention is that these ambiguities are resolved by
+attaching the else-clause to the innermost if-statement; this is what
+Bison accomplishes by choosing to shift rather than reduce. (It would
+ideally be cleaner to write an unambiguous grammar, but that is very
+hard to do in this case.) This particular ambiguity was first
+encountered in the specifications of Algol 60 and is called the
+"dangling `else'" ambiguity.
+
+ To avoid warnings from Bison about predictable, legitimate
+shift/reduce conflicts, use the `%expect N' declaration. There will be
+no warning as long as the number of shift/reduce conflicts is exactly N.
+*Note Suppressing Conflict Warnings: Expect Decl.
+
+ The definition of `if_stmt' above is solely to blame for the
+conflict, but the conflict does not actually appear without additional
+rules. Here is a complete Bison input file that actually manifests the
+conflict:
+
+ %token IF THEN ELSE variable
+ %%
+ stmt: expr
+ | if_stmt
+ ;
+
+ if_stmt:
+ IF expr THEN stmt
+ | IF expr THEN stmt ELSE stmt
+ ;
+
+ expr: variable
+ ;
+
+
+File: bison.info, Node: Precedence, Next: Contextual Precedence, Prev: Shift/Reduce, Up: Algorithm
+
+Operator Precedence
+===================
+
+ Another situation where shift/reduce conflicts appear is in
+arithmetic expressions. Here shifting is not always the preferred
+resolution; the Bison declarations for operator precedence allow you to
+specify when to shift and when to reduce.
+
+* Menu:
+
+* Why Precedence:: An example showing why precedence is needed.
+* Using Precedence:: How to specify precedence in Bison grammars.
+* Precedence Examples:: How these features are used in the previous example.
+* How Precedence:: How they work.
+
+
+File: bison.info, Node: Why Precedence, Next: Using Precedence, Up: Precedence
+
+When Precedence is Needed
+-------------------------
+
+ Consider the following ambiguous grammar fragment (ambiguous because
+the input `1 - 2 * 3' can be parsed in two different ways):
+
+ expr: expr '-' expr
+ | expr '*' expr
+ | expr '<' expr
+ | '(' expr ')'
+ ...
+ ;
+
+Suppose the parser has seen the tokens `1', `-' and `2'; should it
+reduce them via the rule for the addition operator? It depends on the
+next token. Of course, if the next token is `)', we must reduce;
+shifting is invalid because no single rule can reduce the token
+sequence `- 2 )' or anything starting with that. But if the next token
+is `*' or `<', we have a choice: either shifting or reduction would
+allow the parse to complete, but with different results.
+
+ To decide which one Bison should do, we must consider the results.
+If the next operator token OP is shifted, then it must be reduced first
+in order to permit another opportunity to reduce the sum. The result
+is (in effect) `1 - (2 OP 3)'. On the other hand, if the subtraction
+is reduced before shifting OP, the result is `(1 - 2) OP 3'. Clearly,
+then, the choice of shift or reduce should depend on the relative
+precedence of the operators `-' and OP: `*' should be shifted first,
+but not `<'.
+
+ What about input such as `1 - 2 - 5'; should this be `(1 - 2) - 5'
+or should it be `1 - (2 - 5)'? For most operators we prefer the
+former, which is called "left association". The latter alternative,
+"right association", is desirable for assignment operators. The choice
+of left or right association is a matter of whether the parser chooses
+to shift or reduce when the stack contains `1 - 2' and the look-ahead
+token is `-': shifting makes right-associativity.
+
+
+File: bison.info, Node: Using Precedence, Next: Precedence Examples, Prev: Why Precedence, Up: Precedence
+
+Specifying Operator Precedence
+------------------------------
+
+ Bison allows you to specify these choices with the operator
+precedence declarations `%left' and `%right'. Each such declaration
+contains a list of tokens, which are operators whose precedence and
+associativity is being declared. The `%left' declaration makes all
+those operators left-associative and the `%right' declaration makes
+them right-associative. A third alternative is `%nonassoc', which
+declares that it is a syntax error to find the same operator twice "in a
+row".
+
+ The relative precedence of different operators is controlled by the
+order in which they are declared. The first `%left' or `%right'
+declaration in the file declares the operators whose precedence is
+lowest, the next such declaration declares the operators whose
+precedence is a little higher, and so on.
+
+
+File: bison.info, Node: Precedence Examples, Next: How Precedence, Prev: Using Precedence, Up: Precedence
+
+Precedence Examples
+-------------------
+
+ In our example, we would want the following declarations:
+
+ %left '<'
+ %left '-'
+ %left '*'
+
+ In a more complete example, which supports other operators as well,
+we would declare them in groups of equal precedence. For example,
+`'+'' is declared with `'-'':
+
+ %left '<' '>' '=' NE LE GE
+ %left '+' '-'
+ %left '*' '/'
+
+(Here `NE' and so on stand for the operators for "not equal" and so on.
+We assume that these tokens are more than one character long and
+therefore are represented by names, not character literals.)
+
diff --git a/tools/bison++/bison.info-4 b/tools/bison++/bison.info-4
new file mode 100644
index 000000000..5c2be51f1
--- /dev/null
+++ b/tools/bison++/bison.info-4
@@ -0,0 +1,1304 @@
+This is bison.info, produced by makeinfo version 4.1 from bison.texinfo.
+
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+
+ This file documents the Bison parser generator.
+
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation, Inc.
+
+ Permission is granted to make and distribute verbatim copies of this
+manual provided the copyright notice and this permission notice are
+preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of
+this manual under the conditions for verbatim copying, provided also
+that the sections entitled "GNU General Public License" and "Conditions
+for Using Bison" are included exactly as in the original, and provided
+that the entire resulting derived work is distributed under the terms
+of a permission notice identical to this one.
+
+ Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that the sections entitled "GNU General Public
+License", "Conditions for Using Bison" and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+
+
+File: bison.info, Node: How Precedence, Prev: Precedence Examples, Up: Precedence
+
+How Precedence Works
+--------------------
+
+ The first effect of the precedence declarations is to assign
+precedence levels to the terminal symbols declared. The second effect
+is to assign precedence levels to certain rules: each rule gets its
+precedence from the last terminal symbol mentioned in the components.
+(You can also specify explicitly the precedence of a rule. *Note
+Context-Dependent Precedence: Contextual Precedence.)
+
+ Finally, the resolution of conflicts works by comparing the
+precedence of the rule being considered with that of the look-ahead
+token. If the token's precedence is higher, the choice is to shift.
+If the rule's precedence is higher, the choice is to reduce. If they
+have equal precedence, the choice is made based on the associativity of
+that precedence level. The verbose output file made by `-v' (*note
+Invoking Bison: Invocation.) says how each conflict was resolved.
+
+ Not all rules and not all tokens have precedence. If either the
+rule or the look-ahead token has no precedence, then the default is to
+shift.
+
+
+File: bison.info, Node: Contextual Precedence, Next: Parser States, Prev: Precedence, Up: Algorithm
+
+Context-Dependent Precedence
+============================
+
+ Often the precedence of an operator depends on the context. This
+sounds outlandish at first, but it is really very common. For example,
+a minus sign typically has a very high precedence as a unary operator,
+and a somewhat lower precedence (lower than multiplication) as a binary
+operator.
+
+ The Bison precedence declarations, `%left', `%right' and
+`%nonassoc', can only be used once for a given token; so a token has
+only one precedence declared in this way. For context-dependent
+precedence, you need to use an additional mechanism: the `%prec'
+modifier for rules.
+
+ The `%prec' modifier declares the precedence of a particular rule by
+specifying a terminal symbol whose precedence should be used for that
+rule. It's not necessary for that symbol to appear otherwise in the
+rule. The modifier's syntax is:
+
+ %prec TERMINAL-SYMBOL
+
+and it is written after the components of the rule. Its effect is to
+assign the rule the precedence of TERMINAL-SYMBOL, overriding the
+precedence that would be deduced for it in the ordinary way. The
+altered rule precedence then affects how conflicts involving that rule
+are resolved (*note Operator Precedence: Precedence.).
+
+ Here is how `%prec' solves the problem of unary minus. First,
+declare a precedence for a fictitious terminal symbol named `UMINUS'.
+There are no tokens of this type, but the symbol serves to stand for its
+precedence:
+
+ ...
+ %left '+' '-'
+ %left '*'
+ %left UMINUS
+
+ Now the precedence of `UMINUS' can be used in specific rules:
+
+ exp: ...
+ | exp '-' exp
+ ...
+ | '-' exp %prec UMINUS
+
+
+File: bison.info, Node: Parser States, Next: Reduce/Reduce, Prev: Contextual Precedence, Up: Algorithm
+
+Parser States
+=============
+
+ The function `yyparse' is implemented using a finite-state machine.
+The values pushed on the parser stack are not simply token type codes;
+they represent the entire sequence of terminal and nonterminal symbols
+at or near the top of the stack. The current state collects all the
+information about previous input which is relevant to deciding what to
+do next.
+
+ Each time a look-ahead token is read, the current parser state
+together with the type of look-ahead token are looked up in a table.
+This table entry can say, "Shift the look-ahead token." In this case,
+it also specifies the new parser state, which is pushed onto the top of
+the parser stack. Or it can say, "Reduce using rule number N." This
+means that a certain number of tokens or groupings are taken off the
+top of the stack, and replaced by one grouping. In other words, that
+number of states are popped from the stack, and one new state is pushed.
+
+ There is one other alternative: the table can say that the
+look-ahead token is erroneous in the current state. This causes error
+processing to begin (*note Error Recovery::).
+
+
+File: bison.info, Node: Reduce/Reduce, Next: Mystery Conflicts, Prev: Parser States, Up: Algorithm
+
+Reduce/Reduce Conflicts
+=======================
+
+ A reduce/reduce conflict occurs if there are two or more rules that
+apply to the same sequence of input. This usually indicates a serious
+error in the grammar.
+
+ For example, here is an erroneous attempt to define a sequence of
+zero or more `word' groupings.
+
+ sequence: /* empty */
+ { printf ("empty sequence\n"); }
+ | maybeword
+ | sequence word
+ { printf ("added word %s\n", $2); }
+ ;
+
+ maybeword: /* empty */
+ { printf ("empty maybeword\n"); }
+ | word
+ { printf ("single word %s\n", $1); }
+ ;
+
+The error is an ambiguity: there is more than one way to parse a single
+`word' into a `sequence'. It could be reduced to a `maybeword' and
+then into a `sequence' via the second rule. Alternatively,
+nothing-at-all could be reduced into a `sequence' via the first rule,
+and this could be combined with the `word' using the third rule for
+`sequence'.
+
+ There is also more than one way to reduce nothing-at-all into a
+`sequence'. This can be done directly via the first rule, or
+indirectly via `maybeword' and then the second rule.
+
+ You might think that this is a distinction without a difference,
+because it does not change whether any particular input is valid or
+not. But it does affect which actions are run. One parsing order runs
+the second rule's action; the other runs the first rule's action and
+the third rule's action. In this example, the output of the program
+changes.
+
+ Bison resolves a reduce/reduce conflict by choosing to use the rule
+that appears first in the grammar, but it is very risky to rely on
+this. Every reduce/reduce conflict must be studied and usually
+eliminated. Here is the proper way to define `sequence':
+
+ sequence: /* empty */
+ { printf ("empty sequence\n"); }
+ | sequence word
+ { printf ("added word %s\n", $2); }
+ ;
+
+ Here is another common error that yields a reduce/reduce conflict:
+
+ sequence: /* empty */
+ | sequence words
+ | sequence redirects
+ ;
+
+ words: /* empty */
+ | words word
+ ;
+
+ redirects:/* empty */
+ | redirects redirect
+ ;
+
+The intention here is to define a sequence which can contain either
+`word' or `redirect' groupings. The individual definitions of
+`sequence', `words' and `redirects' are error-free, but the three
+together make a subtle ambiguity: even an empty input can be parsed in
+infinitely many ways!
+
+ Consider: nothing-at-all could be a `words'. Or it could be two
+`words' in a row, or three, or any number. It could equally well be a
+`redirects', or two, or any number. Or it could be a `words' followed
+by three `redirects' and another `words'. And so on.
+
+ Here are two ways to correct these rules. First, to make it a
+single level of sequence:
+
+ sequence: /* empty */
+ | sequence word
+ | sequence redirect
+ ;
+
+ Second, to prevent either a `words' or a `redirects' from being
+empty:
+
+ sequence: /* empty */
+ | sequence words
+ | sequence redirects
+ ;
+
+ words: word
+ | words word
+ ;
+
+ redirects:redirect
+ | redirects redirect
+ ;
+
+
+File: bison.info, Node: Mystery Conflicts, Next: Stack Overflow, Prev: Reduce/Reduce, Up: Algorithm
+
+Mysterious Reduce/Reduce Conflicts
+==================================
+
+ Sometimes reduce/reduce conflicts can occur that don't look
+warranted. Here is an example:
+
+ %token ID
+
+ %%
+ def: param_spec return_spec ','
+ ;
+ param_spec:
+ type
+ | name_list ':' type
+ ;
+ return_spec:
+ type
+ | name ':' type
+ ;
+ type: ID
+ ;
+ name: ID
+ ;
+ name_list:
+ name
+ | name ',' name_list
+ ;
+
+ It would seem that this grammar can be parsed with only a single
+token of look-ahead: when a `param_spec' is being read, an `ID' is a
+`name' if a comma or colon follows, or a `type' if another `ID'
+follows. In other words, this grammar is LR(1).
+
+ However, Bison, like most parser generators, cannot actually handle
+all LR(1) grammars. In this grammar, two contexts, that after an `ID'
+at the beginning of a `param_spec' and likewise at the beginning of a
+`return_spec', are similar enough that Bison assumes they are the same.
+They appear similar because the same set of rules would be active--the
+rule for reducing to a `name' and that for reducing to a `type'. Bison
+is unable to determine at that stage of processing that the rules would
+require different look-ahead tokens in the two contexts, so it makes a
+single parser state for them both. Combining the two contexts causes a
+conflict later. In parser terminology, this occurrence means that the
+grammar is not LALR(1).
+
+ In general, it is better to fix deficiencies than to document them.
+But this particular deficiency is intrinsically hard to fix; parser
+generators that can handle LR(1) grammars are hard to write and tend to
+produce parsers that are very large. In practice, Bison is more useful
+as it is now.
+
+ When the problem arises, you can often fix it by identifying the two
+parser states that are being confused, and adding something to make them
+look distinct. In the above example, adding one rule to `return_spec'
+as follows makes the problem go away:
+
+ %token BOGUS
+ ...
+ %%
+ ...
+ return_spec:
+ type
+ | name ':' type
+ /* This rule is never used. */
+ | ID BOGUS
+ ;
+
+ This corrects the problem because it introduces the possibility of an
+additional active rule in the context after the `ID' at the beginning of
+`return_spec'. This rule is not active in the corresponding context in
+a `param_spec', so the two contexts receive distinct parser states. As
+long as the token `BOGUS' is never generated by `yylex', the added rule
+cannot alter the way actual input is parsed.
+
+ In this particular example, there is another way to solve the
+problem: rewrite the rule for `return_spec' to use `ID' directly
+instead of via `name'. This also causes the two confusing contexts to
+have different sets of active rules, because the one for `return_spec'
+activates the altered rule for `return_spec' rather than the one for
+`name'.
+
+ param_spec:
+ type
+ | name_list ':' type
+ ;
+ return_spec:
+ type
+ | ID ':' type
+ ;
+
+
+File: bison.info, Node: Stack Overflow, Prev: Mystery Conflicts, Up: Algorithm
+
+Stack Overflow, and How to Avoid It
+===================================
+
+ The Bison parser stack can overflow if too many tokens are shifted
+and not reduced. When this happens, the parser function `yyparse'
+returns a nonzero value, pausing only to call `yyerror' to report the
+overflow.
+
+ By defining the macro `YYMAXDEPTH', you can control how deep the
+parser stack can become before a stack overflow occurs. Define the
+macro with a value that is an integer. This value is the maximum number
+of tokens that can be shifted (and not reduced) before overflow. It
+must be a constant expression whose value is known at compile time.
+
+ The stack space allowed is not necessarily allocated. If you
+specify a large value for `YYMAXDEPTH', the parser actually allocates a
+small stack at first, and then makes it bigger by stages as needed.
+This increasing allocation happens automatically and silently.
+Therefore, you do not need to make `YYMAXDEPTH' painfully small merely
+to save space for ordinary inputs that do not need much stack.
+
+ The default value of `YYMAXDEPTH', if you do not define it, is 10000.
+
+ You can control how much stack is allocated initially by defining the
+macro `YYINITDEPTH'. This value too must be a compile-time constant
+integer. The default is 200.
+
+
+File: bison.info, Node: Error Recovery, Next: Context Dependency, Prev: Algorithm, Up: Top
+
+Error Recovery
+**************
+
+ It is not usually acceptable to have a program terminate on a parse
+error. For example, a compiler should recover sufficiently to parse the
+rest of the input file and check it for errors; a calculator should
+accept another expression.
+
+ In a simple interactive command parser where each input is one line,
+it may be sufficient to allow `yyparse' to return 1 on error and have
+the caller ignore the rest of the input line when that happens (and
+then call `yyparse' again). But this is inadequate for a compiler,
+because it forgets all the syntactic context leading up to the error.
+A syntax error deep within a function in the compiler input should not
+cause the compiler to treat the following line like the beginning of a
+source file.
+
+ You can define how to recover from a syntax error by writing rules to
+recognize the special token `error'. This is a terminal symbol that is
+always defined (you need not declare it) and reserved for error
+handling. The Bison parser generates an `error' token whenever a
+syntax error happens; if you have provided a rule to recognize this
+token in the current context, the parse can continue.
+
+ For example:
+
+ stmnts: /* empty string */
+ | stmnts '\n'
+ | stmnts exp '\n'
+ | stmnts error '\n'
+
+ The fourth rule in this example says that an error followed by a
+newline makes a valid addition to any `stmnts'.
+
+ What happens if a syntax error occurs in the middle of an `exp'? The
+error recovery rule, interpreted strictly, applies to the precise
+sequence of a `stmnts', an `error' and a newline. If an error occurs in
+the middle of an `exp', there will probably be some additional tokens
+and subexpressions on the stack after the last `stmnts', and there will
+be tokens to read before the next newline. So the rule is not
+applicable in the ordinary way.
+
+ But Bison can force the situation to fit the rule, by discarding
+part of the semantic context and part of the input. First it discards
+states and objects from the stack until it gets back to a state in
+which the `error' token is acceptable. (This means that the
+subexpressions already parsed are discarded, back to the last complete
+`stmnts'.) At this point the `error' token can be shifted. Then, if
+the old look-ahead token is not acceptable to be shifted next, the
+parser reads tokens and discards them until it finds a token which is
+acceptable. In this example, Bison reads and discards input until the
+next newline so that the fourth rule can apply.
+
+ The choice of error rules in the grammar is a choice of strategies
+for error recovery. A simple and useful strategy is simply to skip the
+rest of the current input line or current statement if an error is
+detected:
+
+ stmnt: error ';' /* on error, skip until ';' is read */
+
+ It is also useful to recover to the matching close-delimiter of an
+opening-delimiter that has already been parsed. Otherwise the
+close-delimiter will probably appear to be unmatched, and generate
+another, spurious error message:
+
+ primary: '(' expr ')'
+ | '(' error ')'
+ ...
+ ;
+
+ Error recovery strategies are necessarily guesses. When they guess
+wrong, one syntax error often leads to another. In the above example,
+the error recovery rule guesses that an error is due to bad input
+within one `stmnt'. Suppose that instead a spurious semicolon is
+inserted in the middle of a valid `stmnt'. After the error recovery
+rule recovers from the first error, another syntax error will be found
+straightaway, since the text following the spurious semicolon is also
+an invalid `stmnt'.
+
+ To prevent an outpouring of error messages, the parser will output
+no error message for another syntax error that happens shortly after
+the first; only after three consecutive input tokens have been
+successfully shifted will error messages resume.
+
+ Note that rules which accept the `error' token may have actions, just
+as any other rules can.
+
+ You can make error messages resume immediately by using the macro
+`yyerrok' in an action. If you do this in the error rule's action, no
+error messages will be suppressed. This macro requires no arguments;
+`yyerrok;' is a valid C statement.
+
+ The previous look-ahead token is reanalyzed immediately after an
+error. If this is unacceptable, then the macro `yyclearin' may be used
+to clear this token. Write the statement `yyclearin;' in the error
+rule's action.
+
+ For example, suppose that on a parse error, an error handling
+routine is called that advances the input stream to some point where
+parsing should once again commence. The next symbol returned by the
+lexical scanner is probably correct. The previous look-ahead token
+ought to be discarded with `yyclearin;'.
+
+ The macro `YYRECOVERING' stands for an expression that has the value
+1 when the parser is recovering from a syntax error, and 0 the rest of
+the time. A value of 1 indicates that error messages are currently
+suppressed for new syntax errors.
+
+
+File: bison.info, Node: Context Dependency, Next: Debugging, Prev: Error Recovery, Up: Top
+
+Handling Context Dependencies
+*****************************
+
+ The Bison paradigm is to parse tokens first, then group them into
+larger syntactic units. In many languages, the meaning of a token is
+affected by its context. Although this violates the Bison paradigm,
+certain techniques (known as "kludges") may enable you to write Bison
+parsers for such languages.
+
+* Menu:
+
+* Semantic Tokens:: Token parsing can depend on the semantic context.
+* Lexical Tie-ins:: Token parsing can depend on the syntactic context.
+* Tie-in Recovery:: Lexical tie-ins have implications for how
+ error recovery rules must be written.
+
+ (Actually, "kludge" means any technique that gets its job done but is
+neither clean nor robust.)
+
+
+File: bison.info, Node: Semantic Tokens, Next: Lexical Tie-ins, Up: Context Dependency
+
+Semantic Info in Token Types
+============================
+
+ The C language has a context dependency: the way an identifier is
+used depends on what its current meaning is. For example, consider
+this:
+
+ foo (x);
+
+ This looks like a function call statement, but if `foo' is a typedef
+name, then this is actually a declaration of `x'. How can a Bison
+parser for C decide how to parse this input?
+
+ The method used in GNU C is to have two different token types,
+`IDENTIFIER' and `TYPENAME'. When `yylex' finds an identifier, it
+looks up the current declaration of the identifier in order to decide
+which token type to return: `TYPENAME' if the identifier is declared as
+a typedef, `IDENTIFIER' otherwise.
+
+ The grammar rules can then express the context dependency by the
+choice of token type to recognize. `IDENTIFIER' is accepted as an
+expression, but `TYPENAME' is not. `TYPENAME' can start a declaration,
+but `IDENTIFIER' cannot. In contexts where the meaning of the
+identifier is _not_ significant, such as in declarations that can
+shadow a typedef name, either `TYPENAME' or `IDENTIFIER' is
+accepted--there is one rule for each of the two token types.
+
+ This technique is simple to use if the decision of which kinds of
+identifiers to allow is made at a place close to where the identifier is
+parsed. But in C this is not always so: C allows a declaration to
+redeclare a typedef name provided an explicit type has been specified
+earlier:
+
+ typedef int foo, bar, lose;
+ static foo (bar); /* redeclare `bar' as static variable */
+ static int foo (lose); /* redeclare `foo' as function */
+
+ Unfortunately, the name being declared is separated from the
+declaration construct itself by a complicated syntactic structure--the
+"declarator".
+
+ As a result, the part of Bison parser for C needs to be duplicated,
+with all the nonterminal names changed: once for parsing a declaration
+in which a typedef name can be redefined, and once for parsing a
+declaration in which that can't be done. Here is a part of the
+duplication, with actions omitted for brevity:
+
+ initdcl:
+ declarator maybeasm '='
+ init
+ | declarator maybeasm
+ ;
+
+ notype_initdcl:
+ notype_declarator maybeasm '='
+ init
+ | notype_declarator maybeasm
+ ;
+
+Here `initdcl' can redeclare a typedef name, but `notype_initdcl'
+cannot. The distinction between `declarator' and `notype_declarator'
+is the same sort of thing.
+
+ There is some similarity between this technique and a lexical tie-in
+(described next), in that information which alters the lexical analysis
+is changed during parsing by other parts of the program. The
+difference is here the information is global, and is used for other
+purposes in the program. A true lexical tie-in has a special-purpose
+flag controlled by the syntactic context.
+
+
+File: bison.info, Node: Lexical Tie-ins, Next: Tie-in Recovery, Prev: Semantic Tokens, Up: Context Dependency
+
+Lexical Tie-ins
+===============
+
+ One way to handle context-dependency is the "lexical tie-in": a flag
+which is set by Bison actions, whose purpose is to alter the way tokens
+are parsed.
+
+ For example, suppose we have a language vaguely like C, but with a
+special construct `hex (HEX-EXPR)'. After the keyword `hex' comes an
+expression in parentheses in which all integers are hexadecimal. In
+particular, the token `a1b' must be treated as an integer rather than
+as an identifier if it appears in that context. Here is how you can do
+it:
+
+ %{
+ int hexflag;
+ %}
+ %%
+ ...
+ expr: IDENTIFIER
+ | constant
+ | HEX '('
+ { hexflag = 1; }
+ expr ')'
+ { hexflag = 0;
+ $$ = $4; }
+ | expr '+' expr
+ { $$ = make_sum ($1, $3); }
+ ...
+ ;
+
+ constant:
+ INTEGER
+ | STRING
+ ;
+
+Here we assume that `yylex' looks at the value of `hexflag'; when it is
+nonzero, all integers are parsed in hexadecimal, and tokens starting
+with letters are parsed as integers if possible.
+
+ The declaration of `hexflag' shown in the C declarations section of
+the parser file is needed to make it accessible to the actions (*note
+The C Declarations Section: C Declarations.). You must also write the
+code in `yylex' to obey the flag.
+
+
+File: bison.info, Node: Tie-in Recovery, Prev: Lexical Tie-ins, Up: Context Dependency
+
+Lexical Tie-ins and Error Recovery
+==================================
+
+ Lexical tie-ins make strict demands on any error recovery rules you
+have. *Note Error Recovery::.
+
+ The reason for this is that the purpose of an error recovery rule is
+to abort the parsing of one construct and resume in some larger
+construct. For example, in C-like languages, a typical error recovery
+rule is to skip tokens until the next semicolon, and then start a new
+statement, like this:
+
+ stmt: expr ';'
+ | IF '(' expr ')' stmt { ... }
+ ...
+ error ';'
+ { hexflag = 0; }
+ ;
+
+ If there is a syntax error in the middle of a `hex (EXPR)'
+construct, this error rule will apply, and then the action for the
+completed `hex (EXPR)' will never run. So `hexflag' would remain set
+for the entire rest of the input, or until the next `hex' keyword,
+causing identifiers to be misinterpreted as integers.
+
+ To avoid this problem the error recovery rule itself clears
+`hexflag'.
+
+ There may also be an error recovery rule that works within
+expressions. For example, there could be a rule which applies within
+parentheses and skips to the close-parenthesis:
+
+ expr: ...
+ | '(' expr ')'
+ { $$ = $2; }
+ | '(' error ')'
+ ...
+
+ If this rule acts within the `hex' construct, it is not going to
+abort that construct (since it applies to an inner level of parentheses
+within the construct). Therefore, it should not clear the flag: the
+rest of the `hex' construct should be parsed with the flag still in
+effect.
+
+ What if there is an error recovery rule which might abort out of the
+`hex' construct or might not, depending on circumstances? There is no
+way you can write the action to determine whether a `hex' construct is
+being aborted or not. So if you are using a lexical tie-in, you had
+better make sure your error recovery rules are not of this kind. Each
+rule must be such that you can be sure that it always will, or always
+won't, have to clear the flag.
+
+
+File: bison.info, Node: Debugging, Next: Invocation, Prev: Context Dependency, Up: Top
+
+Debugging Your Parser
+*********************
+
+ If a Bison grammar compiles properly but doesn't do what you want
+when it runs, the `yydebug' parser-trace feature can help you figure
+out why.
+
+ To enable compilation of trace facilities, you must define the macro
+`YYDEBUG' when you compile the parser. You could use `-DYYDEBUG=1' as
+a compiler option or you could put `#define YYDEBUG 1' in the C
+declarations section of the grammar file (*note The C Declarations
+Section: C Declarations.). Alternatively, use the `-t' option when you
+run Bison (*note Invoking Bison: Invocation.). We always define
+`YYDEBUG' so that debugging is always possible.
+
+ The trace facility uses `stderr', so you must add
+`#include <stdio.h>' to the C declarations section unless it is already
+there.
+
+ Once you have compiled the program with trace facilities, the way to
+request a trace is to store a nonzero value in the variable `yydebug'.
+You can do this by making the C code do it (in `main', perhaps), or you
+can alter the value with a C debugger.
+
+ Each step taken by the parser when `yydebug' is nonzero produces a
+line or two of trace information, written on `stderr'. The trace
+messages tell you these things:
+
+ * Each time the parser calls `yylex', what kind of token was read.
+
+ * Each time a token is shifted, the depth and complete contents of
+ the state stack (*note Parser States::).
+
+ * Each time a rule is reduced, which rule it is, and the complete
+ contents of the state stack afterward.
+
+ To make sense of this information, it helps to refer to the listing
+file produced by the Bison `-v' option (*note Invoking Bison:
+Invocation.). This file shows the meaning of each state in terms of
+positions in various rules, and also what each state will do with each
+possible input token. As you read the successive trace messages, you
+can see that the parser is functioning according to its specification
+in the listing file. Eventually you will arrive at the place where
+something undesirable happens, and you will see which parts of the
+grammar are to blame.
+
+ The parser file is a C program and you can use C debuggers on it,
+but it's not easy to interpret what it is doing. The parser function
+is a finite-state machine interpreter, and aside from the actions it
+executes the same code over and over. Only the values of variables
+show where in the grammar it is working.
+
+ The debugging information normally gives the token type of each token
+read, but not its semantic value. You can optionally define a macro
+named `YYPRINT' to provide a way to print the value. If you define
+`YYPRINT', it should take three arguments. The parser will pass a
+standard I/O stream, the numeric code for the token type, and the token
+value (from `yylval').
+
+ Here is an example of `YYPRINT' suitable for the multi-function
+calculator (*note Declarations for `mfcalc': Mfcalc Decl.):
+
+ #define YYPRINT(file, type, value) yyprint (file, type, value)
+
+ static void
+ yyprint (file, type, value)
+ FILE *file;
+ int type;
+ YYSTYPE value;
+ {
+ if (type == VAR)
+ fprintf (file, " %s", value.tptr->name);
+ else if (type == NUM)
+ fprintf (file, " %d", value.val);
+ }
+
+
+File: bison.info, Node: Invocation, Next: Table of Symbols, Prev: Debugging, Up: Top
+
+Invoking Bison
+**************
+
+ The usual way to invoke Bison is as follows:
+
+ bison INFILE
+
+ Here INFILE is the grammar file name, which usually ends in `.y'.
+The parser file's name is made by replacing the `.y' with `.tab.c'.
+Thus, the `bison foo.y' filename yields `foo.tab.c', and the `bison
+hack/foo.y' filename yields `hack/foo.tab.c'.
+
+* Menu:
+
+* Bison Options:: All the options described in detail,
+ in alphabetical order by short options.
+* Option Cross Key:: Alphabetical list of long options.
+* VMS Invocation:: Bison command syntax on VMS.
+
+
+File: bison.info, Node: Bison Options, Next: Option Cross Key, Up: Invocation
+
+Bison Options
+=============
+
+ Bison supports both traditional single-letter options and mnemonic
+long option names. Long option names are indicated with `--' instead of
+`-'. Abbreviations for option names are allowed as long as they are
+unique. When a long option takes an argument, like `--file-prefix',
+connect the option name and the argument with `='.
+
+ Here is a list of options that can be used with Bison, alphabetized
+by short option. It is followed by a cross key alphabetized by long
+option.
+
+`-b FILE-PREFIX'
+`--file-prefix=PREFIX'
+ Specify a prefix to use for all Bison output file names. The
+ names are chosen as if the input file were named `PREFIX.c'.
+
+`-d'
+`--defines'
+ Write an extra output file containing macro definitions for the
+ token type names defined in the grammar and the semantic value type
+ `YYSTYPE', as well as a few `extern' variable declarations.
+
+ If the parser output file is named `NAME.c' then this file is
+ named `NAME.h'.
+
+ This output file is essential if you wish to put the definition of
+ `yylex' in a separate source file, because `yylex' needs to be
+ able to refer to token type codes and the variable `yylval'.
+ *Note Semantic Values of Tokens: Token Values.
+
+`-l'
+`--no-lines'
+ Don't put any `#line' preprocessor commands in the parser file.
+ Ordinarily Bison puts them in the parser file so that the C
+ compiler and debuggers will associate errors with your source
+ file, the grammar file. This option causes them to associate
+ errors with the parser file, treating it as an independent source
+ file in its own right.
+
+`-n'
+`--no-parser'
+ Do not include any C code in the parser file; generate tables
+ only. The parser file contains just `#define' directives and
+ static variable declarations.
+
+ This option also tells Bison to write the C code for the grammar
+ actions into a file named `FILENAME.act', in the form of a
+ brace-surrounded body fit for a `switch' statement.
+
+`-o OUTFILE'
+`--output-file=OUTFILE'
+ Specify the name OUTFILE for the parser file.
+
+ The other output files' names are constructed from OUTFILE as
+ described under the `-v' and `-d' options.
+
+`-p PREFIX'
+`--name-prefix=PREFIX'
+ Rename the external symbols used in the parser so that they start
+ with PREFIX instead of `yy'. The precise list of symbols renamed
+ is `yyparse', `yylex', `yyerror', `yynerrs', `yylval', `yychar'
+ and `yydebug'.
+
+ For example, if you use `-p c', the names become `cparse', `clex',
+ and so on.
+
+ *Note Multiple Parsers in the Same Program: Multiple Parsers.
+
+`-r'
+`--raw'
+ Pretend that `%raw' was specified. *Note Decl Summary::.
+
+`-t'
+`--debug'
+ Output a definition of the macro `YYDEBUG' into the parser file,
+ so that the debugging facilities are compiled. *Note Debugging
+ Your Parser: Debugging.
+
+`-v'
+`--verbose'
+ Write an extra output file containing verbose descriptions of the
+ parser states and what is done for each type of look-ahead token in
+ that state.
+
+ This file also describes all the conflicts, both those resolved by
+ operator precedence and the unresolved ones.
+
+ The file's name is made by removing `.tab.c' or `.c' from the
+ parser output file name, and adding `.output' instead.
+
+ Therefore, if the input file is `foo.y', then the parser file is
+ called `foo.tab.c' by default. As a consequence, the verbose
+ output file is called `foo.output'.
+
+`-V'
+`--version'
+ Print the version number of Bison and exit.
+
+`-h'
+`--help'
+ Print a summary of the command-line options to Bison and exit.
+
+`-y'
+`--yacc'
+`--fixed-output-files'
+ Equivalent to `-o y.tab.c'; the parser output file is called
+ `y.tab.c', and the other outputs are called `y.output' and
+ `y.tab.h'. The purpose of this option is to imitate Yacc's output
+ file name conventions. Thus, the following shell script can
+ substitute for Yacc:
+
+ bison -y $*
+
+
+File: bison.info, Node: Option Cross Key, Next: VMS Invocation, Prev: Bison Options, Up: Invocation
+
+Option Cross Key
+================
+
+ Here is a list of options, alphabetized by long option, to help you
+find the corresponding short option.
+
+ --debug -t
+ --defines -d
+ --file-prefix=PREFIX -b FILE-PREFIX
+ --fixed-output-files --yacc -y
+ --help -h
+ --name-prefix=PREFIX -p NAME-PREFIX
+ --no-lines -l
+ --no-parser -n
+ --output-file=OUTFILE -o OUTFILE
+ --raw -r
+ --token-table -k
+ --verbose -v
+ --version -V
+
+
+File: bison.info, Node: VMS Invocation, Prev: Option Cross Key, Up: Invocation
+
+Invoking Bison under VMS
+========================
+
+ The command line syntax for Bison on VMS is a variant of the usual
+Bison command syntax--adapted to fit VMS conventions.
+
+ To find the VMS equivalent for any Bison option, start with the long
+option, and substitute a `/' for the leading `--', and substitute a `_'
+for each `-' in the name of the long option. For example, the
+following invocation under VMS:
+
+ bison /debug/name_prefix=bar foo.y
+
+is equivalent to the following command under POSIX.
+
+ bison --debug --name-prefix=bar foo.y
+
+ The VMS file system does not permit filenames such as `foo.tab.c'.
+In the above example, the output file would instead be named
+`foo_tab.c'.
+
+
+File: bison.info, Node: Table of Symbols, Next: Glossary, Prev: Invocation, Up: Top
+
+Bison Symbols
+*************
+
+`error'
+ A token name reserved for error recovery. This token may be used
+ in grammar rules so as to allow the Bison parser to recognize an
+ error in the grammar without halting the process. In effect, a
+ sentence containing an error may be recognized as valid. On a
+ parse error, the token `error' becomes the current look-ahead
+ token. Actions corresponding to `error' are then executed, and
+ the look-ahead token is reset to the token that originally caused
+ the violation. *Note Error Recovery::.
+
+`YYABORT'
+ Macro to pretend that an unrecoverable syntax error has occurred,
+ by making `yyparse' return 1 immediately. The error reporting
+ function `yyerror' is not called. *Note The Parser Function
+ `yyparse': Parser Function.
+
+`YYACCEPT'
+ Macro to pretend that a complete utterance of the language has been
+ read, by making `yyparse' return 0 immediately. *Note The Parser
+ Function `yyparse': Parser Function.
+
+`YYBACKUP'
+ Macro to discard a value from the parser stack and fake a
+ look-ahead token. *Note Special Features for Use in Actions:
+ Action Features.
+
+`YYERROR'
+ Macro to pretend that a syntax error has just been detected: call
+ `yyerror' and then perform normal error recovery if possible
+ (*note Error Recovery::), or (if recovery is impossible) make
+ `yyparse' return 1. *Note Error Recovery::.
+
+`YYERROR_VERBOSE'
+ Macro that you define with `#define' in the Bison declarations
+ section to request verbose, specific error message strings when
+ `yyerror' is called.
+
+`YYINITDEPTH'
+ Macro for specifying the initial size of the parser stack. *Note
+ Stack Overflow::.
+
+`YYLEX_PARAM'
+ Macro for specifying an extra argument (or list of extra
+ arguments) for `yyparse' to pass to `yylex'. *Note Calling
+ Conventions for Pure Parsers: Pure Calling.
+
+`YYLTYPE'
+ Macro for the data type of `yylloc'; a structure with four
+ members. *Note Textual Positions of Tokens: Token Positions.
+
+`yyltype'
+ Default value for YYLTYPE.
+
+`YYMAXDEPTH'
+ Macro for specifying the maximum size of the parser stack. *Note
+ Stack Overflow::.
+
+`YYPARSE_PARAM'
+ Macro for specifying the name of a parameter that `yyparse' should
+ accept. *Note Calling Conventions for Pure Parsers: Pure Calling.
+
+`YYRECOVERING'
+ Macro whose value indicates whether the parser is recovering from a
+ syntax error. *Note Special Features for Use in Actions: Action
+ Features.
+
+`YYSTYPE'
+ Macro for the data type of semantic values; `int' by default.
+ *Note Data Types of Semantic Values: Value Type.
+
+`yychar'
+ External integer variable that contains the integer value of the
+ current look-ahead token. (In a pure parser, it is a local
+ variable within `yyparse'.) Error-recovery rule actions may
+ examine this variable. *Note Special Features for Use in Actions:
+ Action Features.
+
+`yyclearin'
+ Macro used in error-recovery rule actions. It clears the previous
+ look-ahead token. *Note Error Recovery::.
+
+`yydebug'
+ External integer variable set to zero by default. If `yydebug' is
+ given a nonzero value, the parser will output information on input
+ symbols and parser action. *Note Debugging Your Parser: Debugging.
+
+`yyerrok'
+ Macro to cause parser to recover immediately to its normal mode
+ after a parse error. *Note Error Recovery::.
+
+`yyerror'
+ User-supplied function to be called by `yyparse' on error. The
+ function receives one argument, a pointer to a character string
+ containing an error message. *Note The Error Reporting Function
+ `yyerror': Error Reporting.
+
+`yylex'
+ User-supplied lexical analyzer function, called with no arguments
+ to get the next token. *Note The Lexical Analyzer Function
+ `yylex': Lexical.
+
+`yylval'
+ External variable in which `yylex' should place the semantic value
+ associated with a token. (In a pure parser, it is a local
+ variable within `yyparse', and its address is passed to `yylex'.)
+ *Note Semantic Values of Tokens: Token Values.
+
+`yylloc'
+ External variable in which `yylex' should place the line and
+ column numbers associated with a token. (In a pure parser, it is a
+ local variable within `yyparse', and its address is passed to
+ `yylex'.) You can ignore this variable if you don't use the `@'
+ feature in the grammar actions. *Note Textual Positions of
+ Tokens: Token Positions.
+
+`yynerrs'
+ Global variable which Bison increments each time there is a parse
+ error. (In a pure parser, it is a local variable within
+ `yyparse'.) *Note The Error Reporting Function `yyerror': Error
+ Reporting.
+
+`yyparse'
+ The parser function produced by Bison; call this function to start
+ parsing. *Note The Parser Function `yyparse': Parser Function.
+
+`%left'
+ Bison declaration to assign left associativity to token(s). *Note
+ Operator Precedence: Precedence Decl.
+
+`%no_lines'
+ Bison declaration to avoid generating `#line' directives in the
+ parser file. *Note Decl Summary::.
+
+`%nonassoc'
+ Bison declaration to assign nonassociativity to token(s). *Note
+ Operator Precedence: Precedence Decl.
+
+`%prec'
+ Bison declaration to assign a precedence to a specific rule.
+ *Note Context-Dependent Precedence: Contextual Precedence.
+
+`%pure_parser'
+ Bison declaration to request a pure (reentrant) parser. *Note A
+ Pure (Reentrant) Parser: Pure Decl.
+
+`%raw'
+ Bison declaration to use Bison internal token code numbers in token
+ tables instead of the usual Yacc-compatible token code numbers.
+ *Note Decl Summary::.
+
+`%right'
+ Bison declaration to assign right associativity to token(s).
+ *Note Operator Precedence: Precedence Decl.
+
+`%start'
+ Bison declaration to specify the start symbol. *Note The
+ Start-Symbol: Start Decl.
+
+`%token'
+ Bison declaration to declare token(s) without specifying
+ precedence. *Note Token Type Names: Token Decl.
+
+`%token_table'
+ Bison declaration to include a token name table in the parser file.
+ *Note Decl Summary::.
+
+`%type'
+ Bison declaration to declare nonterminals. *Note Nonterminal
+ Symbols: Type Decl.
+
+`%union'
+ Bison declaration to specify several possible data types for
+ semantic values. *Note The Collection of Value Types: Union Decl.
+
+ These are the punctuation and delimiters used in Bison input:
+
+`%%'
+ Delimiter used to separate the grammar rule section from the Bison
+ declarations section or the additional C code section. *Note The
+ Overall Layout of a Bison Grammar: Grammar Layout.
+
+`%{ %}'
+ All code listed between `%{' and `%}' is copied directly to the
+ output file uninterpreted. Such code forms the "C declarations"
+ section of the input file. *Note Outline of a Bison Grammar:
+ Grammar Outline.
+
+`/*...*/'
+ Comment delimiters, as in C.
+
+`:'
+ Separates a rule's result from its components. *Note Syntax of
+ Grammar Rules: Rules.
+
+`;'
+ Terminates a rule. *Note Syntax of Grammar Rules: Rules.
+
+`|'
+ Separates alternate rules for the same result nonterminal. *Note
+ Syntax of Grammar Rules: Rules.
+
+
+File: bison.info, Node: Glossary, Next: Index, Prev: Table of Symbols, Up: Top
+
+Glossary
+********
+
+Backus-Naur Form (BNF)
+ Formal method of specifying context-free grammars. BNF was first
+ used in the `ALGOL-60' report, 1963. *Note Languages and
+ Context-Free Grammars: Language and Grammar.
+
+Context-free grammars
+ Grammars specified as rules that can be applied regardless of
+ context. Thus, if there is a rule which says that an integer can
+ be used as an expression, integers are allowed _anywhere_ an
+ expression is permitted. *Note Languages and Context-Free
+ Grammars: Language and Grammar.
+
+Dynamic allocation
+ Allocation of memory that occurs during execution, rather than at
+ compile time or on entry to a function.
+
+Empty string
+ Analogous to the empty set in set theory, the empty string is a
+ character string of length zero.
+
+Finite-state stack machine
+ A "machine" that has discrete states in which it is said to exist
+ at each instant in time. As input to the machine is processed, the
+ machine moves from state to state as specified by the logic of the
+ machine. In the case of the parser, the input is the language
+ being parsed, and the states correspond to various stages in the
+ grammar rules. *Note The Bison Parser Algorithm: Algorithm.
+
+Grouping
+ A language construct that is (in general) grammatically divisible;
+ for example, `expression' or `declaration' in C. *Note Languages
+ and Context-Free Grammars: Language and Grammar.
+
+Infix operator
+ An arithmetic operator that is placed between the operands on
+ which it performs some operation.
+
+Input stream
+ A continuous flow of data between devices or programs.
+
+Language construct
+ One of the typical usage schemas of the language. For example,
+ one of the constructs of the C language is the `if' statement.
+ *Note Languages and Context-Free Grammars: Language and Grammar.
+
+Left associativity
+ Operators having left associativity are analyzed from left to
+ right: `a+b+c' first computes `a+b' and then combines with `c'.
+ *Note Operator Precedence: Precedence.
+
+Left recursion
+ A rule whose result symbol is also its first component symbol; for
+ example, `expseq1 : expseq1 ',' exp;'. *Note Recursive Rules:
+ Recursion.
+
+Left-to-right parsing
+ Parsing a sentence of a language by analyzing it token by token
+ from left to right. *Note The Bison Parser Algorithm: Algorithm.
+
+Lexical analyzer (scanner)
+ A function that reads an input stream and returns tokens one by
+ one. *Note The Lexical Analyzer Function `yylex': Lexical.
+
+Lexical tie-in
+ A flag, set by actions in the grammar rules, which alters the way
+ tokens are parsed. *Note Lexical Tie-ins::.
+
+Literal string token
+ A token which constists of two or more fixed characters. *Note
+ Symbols::.
+
+Look-ahead token
+ A token already read but not yet shifted. *Note Look-Ahead
+ Tokens: Look-Ahead.
+
+LALR(1)
+ The class of context-free grammars that Bison (like most other
+ parser generators) can handle; a subset of LR(1). *Note
+ Mysterious Reduce/Reduce Conflicts: Mystery Conflicts.
+
+LR(1)
+ The class of context-free grammars in which at most one token of
+ look-ahead is needed to disambiguate the parsing of any piece of
+ input.
+
+Nonterminal symbol
+ A grammar symbol standing for a grammatical construct that can be
+ expressed through rules in terms of smaller constructs; in other
+ words, a construct that is not a token. *Note Symbols::.
+
+Parse error
+ An error encountered during parsing of an input stream due to
+ invalid syntax. *Note Error Recovery::.
+
+Parser
+ A function that recognizes valid sentences of a language by
+ analyzing the syntax structure of a set of tokens passed to it
+ from a lexical analyzer.
+
+Postfix operator
+ An arithmetic operator that is placed after the operands upon
+ which it performs some operation.
+
+Reduction
+ Replacing a string of nonterminals and/or terminals with a single
+ nonterminal, according to a grammar rule. *Note The Bison Parser
+ Algorithm: Algorithm.
+
+Reentrant
+ A reentrant subprogram is a subprogram which can be in invoked any
+ number of times in parallel, without interference between the
+ various invocations. *Note A Pure (Reentrant) Parser: Pure Decl.
+
+Reverse polish notation
+ A language in which all operators are postfix operators.
+
+Right recursion
+ A rule whose result symbol is also its last component symbol; for
+ example, `expseq1: exp ',' expseq1;'. *Note Recursive Rules:
+ Recursion.
+
+Semantics
+ In computer languages, the semantics are specified by the actions
+ taken for each instance of the language, i.e., the meaning of each
+ statement. *Note Defining Language Semantics: Semantics.
+
+Shift
+ A parser is said to shift when it makes the choice of analyzing
+ further input from the stream rather than reducing immediately some
+ already-recognized rule. *Note The Bison Parser Algorithm:
+ Algorithm.
+
+Single-character literal
+ A single character that is recognized and interpreted as is.
+ *Note From Formal Rules to Bison Input: Grammar in Bison.
+
+Start symbol
+ The nonterminal symbol that stands for a complete valid utterance
+ in the language being parsed. The start symbol is usually listed
+ as the first nonterminal symbol in a language specification.
+ *Note The Start-Symbol: Start Decl.
+
+Symbol table
+ A data structure where symbol names and associated data are stored
+ during parsing to allow for recognition and use of existing
+ information in repeated uses of a symbol. *Note Multi-function
+ Calc::.
+
+Token
+ A basic, grammatically indivisible unit of a language. The symbol
+ that describes a token in the grammar is a terminal symbol. The
+ input of the Bison parser is a stream of tokens which comes from
+ the lexical analyzer. *Note Symbols::.
+
+Terminal symbol
+ A grammar symbol that has no rules in the grammar and therefore is
+ grammatically indivisible. The piece of text it represents is a
+ token. *Note Languages and Context-Free Grammars: Language and
+ Grammar.
+
diff --git a/tools/bison++/bison.info-5 b/tools/bison++/bison.info-5
new file mode 100644
index 000000000..4b0ca2351
--- /dev/null
+++ b/tools/bison++/bison.info-5
@@ -0,0 +1,238 @@
+This is bison.info, produced by makeinfo version 4.1 from bison.texinfo.
+
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+
+ This file documents the Bison parser generator.
+
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation, Inc.
+
+ Permission is granted to make and distribute verbatim copies of this
+manual provided the copyright notice and this permission notice are
+preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of
+this manual under the conditions for verbatim copying, provided also
+that the sections entitled "GNU General Public License" and "Conditions
+for Using Bison" are included exactly as in the original, and provided
+that the entire resulting derived work is distributed under the terms
+of a permission notice identical to this one.
+
+ Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that the sections entitled "GNU General Public
+License", "Conditions for Using Bison" and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+
+
+File: bison.info, Node: Index, Prev: Glossary, Up: Top
+
+Index
+*****
+
+* Menu:
+
+* $$: Actions.
+* $N: Actions.
+* %expect: Expect Decl.
+* %left: Using Precedence.
+* %nonassoc: Using Precedence.
+* %prec: Contextual Precedence.
+* %pure_parser: Pure Decl.
+* %right: Using Precedence.
+* %start: Start Decl.
+* %token: Token Decl.
+* %type: Type Decl.
+* %union: Union Decl.
+* @N: Action Features.
+* action: Actions.
+* action data types: Action Types.
+* action features summary: Action Features.
+* actions in mid-rule: Mid-Rule Actions.
+* actions, semantic: Semantic Actions.
+* additional C code section: C Code.
+* algorithm of parser: Algorithm.
+* associativity: Why Precedence.
+* Backus-Naur form: Language and Grammar.
+* Bison declaration summary: Decl Summary.
+* Bison declarations: Declarations.
+* Bison declarations (introduction): Bison Declarations.
+* Bison grammar: Grammar in Bison.
+* Bison invocation: Invocation.
+* Bison parser: Bison Parser.
+* Bison parser algorithm: Algorithm.
+* Bison symbols, table of: Table of Symbols.
+* Bison utility: Bison Parser.
+* BNF: Language and Grammar.
+* C code, section for additional: C Code.
+* C declarations section: C Declarations.
+* C-language interface: Interface.
+* calc: Infix Calc.
+* calculator, infix notation: Infix Calc.
+* calculator, multi-function: Multi-function Calc.
+* calculator, simple: RPN Calc.
+* character token: Symbols.
+* compiling the parser: Rpcalc Compile.
+* conflicts: Shift/Reduce.
+* conflicts, reduce/reduce: Reduce/Reduce.
+* conflicts, suppressing warnings of: Expect Decl.
+* context-dependent precedence: Contextual Precedence.
+* context-free grammar: Language and Grammar.
+* controlling function: Rpcalc Main.
+* dangling else: Shift/Reduce.
+* data types in actions: Action Types.
+* data types of semantic values: Value Type.
+* debugging: Debugging.
+* declaration summary: Decl Summary.
+* declarations, Bison: Declarations.
+* declarations, Bison (introduction): Bison Declarations.
+* declarations, C: C Declarations.
+* declaring literal string tokens: Token Decl.
+* declaring operator precedence: Precedence Decl.
+* declaring the start symbol: Start Decl.
+* declaring token type names: Token Decl.
+* declaring value types: Union Decl.
+* declaring value types, nonterminals: Type Decl.
+* default action: Actions.
+* default data type: Value Type.
+* default stack limit: Stack Overflow.
+* default start symbol: Start Decl.
+* defining language semantics: Semantics.
+* else, dangling: Shift/Reduce.
+* error: Error Recovery.
+* error recovery: Error Recovery.
+* error recovery, simple: Simple Error Recovery.
+* error reporting function: Error Reporting.
+* error reporting routine: Rpcalc Error.
+* examples, simple: Examples.
+* exercises: Exercises.
+* file format: Grammar Layout.
+* finite-state machine: Parser States.
+* formal grammar: Grammar in Bison.
+* format of grammar file: Grammar Layout.
+* glossary: Glossary.
+* grammar file: Grammar Layout.
+* grammar rule syntax: Rules.
+* grammar rules section: Grammar Rules.
+* grammar, Bison: Grammar in Bison.
+* grammar, context-free: Language and Grammar.
+* grouping, syntactic: Language and Grammar.
+* infix notation calculator: Infix Calc.
+* interface: Interface.
+* introduction: Introduction.
+* invoking Bison: Invocation.
+* invoking Bison under VMS: VMS Invocation.
+* LALR(1): Mystery Conflicts.
+* language semantics, defining: Semantics.
+* layout of Bison grammar: Grammar Layout.
+* left recursion: Recursion.
+* lexical analyzer: Lexical.
+* lexical analyzer, purpose: Bison Parser.
+* lexical analyzer, writing: Rpcalc Lexer.
+* lexical tie-in: Lexical Tie-ins.
+* literal string token: Symbols.
+* literal token: Symbols.
+* look-ahead token: Look-Ahead.
+* LR(1): Mystery Conflicts.
+* main function in simple example: Rpcalc Main.
+* mfcalc: Multi-function Calc.
+* mid-rule actions: Mid-Rule Actions.
+* multi-character literal: Symbols.
+* multi-function calculator: Multi-function Calc.
+* mutual recursion: Recursion.
+* nonterminal symbol: Symbols.
+* operator precedence: Precedence.
+* operator precedence, declaring: Precedence Decl.
+* options for invoking Bison: Invocation.
+* overflow of parser stack: Stack Overflow.
+* parse error: Error Reporting.
+* parser: Bison Parser.
+* parser stack: Algorithm.
+* parser stack overflow: Stack Overflow.
+* parser state: Parser States.
+* polish notation calculator: RPN Calc.
+* precedence declarations: Precedence Decl.
+* precedence of operators: Precedence.
+* precedence, context-dependent: Contextual Precedence.
+* precedence, unary operator: Contextual Precedence.
+* preventing warnings about conflicts: Expect Decl.
+* pure parser: Pure Decl.
+* recovery from errors: Error Recovery.
+* recursive rule: Recursion.
+* reduce/reduce conflict: Reduce/Reduce.
+* reduction: Algorithm.
+* reentrant parser: Pure Decl.
+* reverse polish notation: RPN Calc.
+* right recursion: Recursion.
+* rpcalc: RPN Calc.
+* rule syntax: Rules.
+* rules section for grammar: Grammar Rules.
+* running Bison (introduction): Rpcalc Gen.
+* semantic actions: Semantic Actions.
+* semantic value: Semantic Values.
+* semantic value type: Value Type.
+* shift/reduce conflicts: Shift/Reduce.
+* shifting: Algorithm.
+* simple examples: Examples.
+* single-character literal: Symbols.
+* stack overflow: Stack Overflow.
+* stack, parser: Algorithm.
+* stages in using Bison: Stages.
+* start symbol: Language and Grammar.
+* start symbol, declaring: Start Decl.
+* state (of parser): Parser States.
+* string token: Symbols.
+* summary, action features: Action Features.
+* summary, Bison declaration: Decl Summary.
+* suppressing conflict warnings: Expect Decl.
+* symbol: Symbols.
+* symbol table example: Mfcalc Symtab.
+* symbols (abstract): Language and Grammar.
+* symbols in Bison, table of: Table of Symbols.
+* syntactic grouping: Language and Grammar.
+* syntax error: Error Reporting.
+* syntax of grammar rules: Rules.
+* terminal symbol: Symbols.
+* token: Language and Grammar.
+* token type: Symbols.
+* token type names, declaring: Token Decl.
+* tracing the parser: Debugging.
+* unary operator precedence: Contextual Precedence.
+* using Bison: Stages.
+* value type, semantic: Value Type.
+* value types, declaring: Union Decl.
+* value types, nonterminals, declaring: Type Decl.
+* value, semantic: Semantic Values.
+* VMS: VMS Invocation.
+* warnings, preventing: Expect Decl.
+* writing a lexical analyzer: Rpcalc Lexer.
+* YYABORT: Parser Function.
+* YYACCEPT: Parser Function.
+* YYBACKUP: Action Features.
+* yychar: Look-Ahead.
+* yyclearin: Error Recovery.
+* yydebug: Debugging.
+* YYDEBUG: Debugging.
+* YYEMPTY: Action Features.
+* yyerrok: Error Recovery.
+* YYERROR: Action Features.
+* yyerror: Error Reporting.
+* YYERROR_VERBOSE: Error Reporting.
+* YYINITDEPTH: Stack Overflow.
+* yylex: Lexical.
+* YYLEX_PARAM: Pure Calling.
+* yylloc: Token Positions.
+* YYLTYPE: Token Positions.
+* yylval: Token Values.
+* YYMAXDEPTH: Stack Overflow.
+* yynerrs: Error Reporting.
+* yyparse: Parser Function.
+* YYPARSE_PARAM: Pure Calling.
+* YYPRINT: Debugging.
+* YYRECOVERING: Error Recovery.
+* |: Rules.
+
+
diff --git a/tools/bison++/bison.ps.gz b/tools/bison++/bison.ps.gz
new file mode 100644
index 000000000..d71c6a291
--- /dev/null
+++ b/tools/bison++/bison.ps.gz
Binary files differ
diff --git a/tools/bison++/bison.rnh b/tools/bison++/bison.rnh
new file mode 100644
index 000000000..544178149
--- /dev/null
+++ b/tools/bison++/bison.rnh
@@ -0,0 +1,167 @@
+.!
+.! RUNOFF source file for BISON.HLP
+.!
+.! This is a RUNOFF input file which will produce a VMS help file
+.! for the VMS HELP library.
+.!
+.! Date of last revision: June 21, 1992
+.!
+.!
+.! Eric Youngdale
+.!
+.literal
+.end literal
+.no paging
+.no flags all
+.right margin 70
+.left margin 1
+
+.indent -1
+1 BISON
+.skip
+ The BISON command invokes the GNU BISON parser generator.
+.skip
+.literal
+ BISON file-spec
+.end literal
+.skip
+.indent -1
+2 Parameters
+.skip
+ file-spec
+.skip
+Here file-spec is the grammar file name, which usually ends in
+.y. The parser file's name is made by replacing the .y
+with _tab.c. Thus, the command bison foo.y yields
+foo_tab.c.
+
+.skip
+.indent -1
+2 Qualifiers
+.skip
+ The following is the list of available qualifiers for BISON:
+.literal
+ /DEBUG
+ /DEFINES
+ /FILE_PREFIX=prefix
+ /FIXED_OUTFILES
+ /NAME_PREFIX=prefix
+ /NOLINES
+ /OUTPUT=outfilefile
+ /VERBOSE
+ /VERSION
+ /YACC
+.end literal
+.skip
+.indent -1
+2 /DEBUG
+.skip
+Output a definition of the macro YYDEBUG into the parser file,
+so that the debugging facilities are compiled.
+.skip
+.indent -1
+2 /DEFINES
+.skip
+Write an extra output file containing macro definitions for the token
+type names defined in the grammar and the semantic value type
+YYSTYPE, as well as a extern variable declarations.
+.skip
+If the parser output file is named "name.c" then this file
+is named "name.h".
+.skip
+This output file is essential if you wish to put the definition of
+yylex in a separate source file, because yylex needs to
+be able to refer to token type codes and the variable
+yylval.
+.skip
+.indent -1
+2 /FILE_PREFIX
+.skip
+.literal
+ /FILIE_PREFIX=prefix
+.end literal
+.skip
+ Specify a prefix to use for all Bison output file names. The names are
+chosen as if the input file were named prefix.c
+
+.skip
+.indent -1
+2 /FIXED_OUTFILES
+.skip
+Equivalent to /OUTPUT=y_tab.c; the parser output file is called
+y_tab.c, and the other outputs are called y.output and
+y_tab.h. The purpose of this switch is to imitate Yacc's output
+file name conventions. The /YACC qualifier is functionally equivalent
+to /FIXED_OUTFILES. The following command definition will
+work as a substitute for Yacc:
+
+.literal
+$YACC:==BISON/FIXED_OUTFILES
+.end literal
+.skip
+.indent -1
+2 /NAME_PREFIX
+.skip
+.literal
+ /NAME_PREFIX=prefix
+.end literal
+.skip
+Rename the external symbols used in the parser so that they start with
+"prefix" instead of "yy". The precise list of symbols renamed
+is yyparse, yylex, yyerror, yylval, yychar and yydebug.
+
+For example, if you use /NAME_PREFIX="c", the names become cparse,
+clex, and so on.
+
+.skip
+.indent -1
+2 /NOLINES
+.skip
+Don't put any "#line" preprocessor commands in the parser file.
+Ordinarily Bison puts them in the parser file so that the C compiler
+and debuggers will associate errors with your source file, the
+grammar file. This option causes them to associate errors with the
+parser file, treating it an independent source file in its own right.
+
+.skip
+.indent -1
+2 /OUTPUT
+.skip
+.literal
+ /OUTPUT=outfile
+.end literal
+.skip
+Specify the name "outfile" for the parser file.
+.skip
+.indent -1
+2 /VERBOSE
+.skip
+Write an extra output file containing verbose descriptions of the
+parser states and what is done for each type of look-ahead token in
+that state.
+.skip
+This file also describes all the conflicts, both those resolved by
+operator precedence and the unresolved ones.
+.skip
+The file's name is made by removing _tab.c or .c from
+the parser output file name, and adding .output instead.
+.skip
+Therefore, if the input file is foo.y, then the parser file is
+called foo_tab.c by default. As a consequence, the verbose
+output file is called foo.output.
+.skip
+.indent -1
+2 /VERSION
+.skip
+Print the version number of Bison.
+
+.skip
+.indent -1
+2 /YACC
+.skip
+See /FIXED_OUTFILES.
+.skip
+.indent -1
+
+
+
diff --git a/tools/bison++/bison.texinfo b/tools/bison++/bison.texinfo
new file mode 100644
index 000000000..b179e71de
--- /dev/null
+++ b/tools/bison++/bison.texinfo
@@ -0,0 +1,5452 @@
+\input texinfo @c -*-texinfo-*-
+@comment %**start of header
+@setfilename bison.info
+@include version.texi
+@settitle Bison @value{VERSION}
+@setchapternewpage odd
+
+@iftex
+@finalout
+@end iftex
+
+@c SMALL BOOK version
+@c This edition has been formatted so that you can format and print it in
+@c the smallbook format.
+@c @smallbook
+
+@c Set following if you have the new `shorttitlepage' command
+@c @clear shorttitlepage-enabled
+@c @set shorttitlepage-enabled
+
+@c ISPELL CHECK: done, 14 Jan 1993 --bob
+
+@c Check COPYRIGHT dates. should be updated in the titlepage, ifinfo
+@c titlepage; should NOT be changed in the GPL. --mew
+
+@iftex
+@syncodeindex fn cp
+@syncodeindex vr cp
+@syncodeindex tp cp
+@end iftex
+@ifinfo
+@synindex fn cp
+@synindex vr cp
+@synindex tp cp
+@end ifinfo
+@comment %**end of header
+
+@ifinfo
+@format
+START-INFO-DIR-ENTRY
+* bison: (bison). GNU Project parser generator (yacc replacement).
+END-INFO-DIR-ENTRY
+@end format
+@end ifinfo
+
+@ifinfo
+This file documents the Bison parser generator.
+
+Copyright (C) 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software Foundation, Inc.
+
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+
+@ignore
+Permission is granted to process this file through Tex and print the
+results, provided the printed document carries copying permission
+notice identical to this one except for the removal of this paragraph
+(this paragraph not being relevant to the printed manual).
+
+@end ignore
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided also that the
+sections entitled ``GNU General Public License'' and ``Conditions for
+Using Bison'' are included exactly as in the original, and provided that
+the entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+
+Permission is granted to copy and distribute translations of this manual
+into another language, under the above conditions for modified versions,
+except that the sections entitled ``GNU General Public License'',
+``Conditions for Using Bison'' and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+@end ifinfo
+
+@ifset shorttitlepage-enabled
+@shorttitlepage Bison
+@end ifset
+@titlepage
+@title Bison
+@subtitle The YACC-compatible Parser Generator
+@subtitle @value{UPDATED}, Bison Version @value{VERSION}
+
+@author by Charles Donnelly and Richard Stallman
+
+@page
+@vskip 0pt plus 1filll
+Copyright @copyright{} 1988, 89, 90, 91, 92, 93, 95, 98, 1999 Free Software
+Foundation
+
+@sp 2
+Published by the Free Software Foundation @*
+59 Temple Place, Suite 330 @*
+Boston, MA 02111-1307 USA @*
+Printed copies are available for $15 each.@*
+ISBN 1-882114-45-0
+
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+
+@ignore
+Permission is granted to process this file through TeX and print the
+results, provided the printed document carries copying permission
+notice identical to this one except for the removal of this paragraph
+(this paragraph not being relevant to the printed manual).
+
+@end ignore
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided also that the
+sections entitled ``GNU General Public License'' and ``Conditions for
+Using Bison'' are included exactly as in the original, and provided that
+the entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+
+Permission is granted to copy and distribute translations of this manual
+into another language, under the above conditions for modified versions,
+except that the sections entitled ``GNU General Public License'',
+``Conditions for Using Bison'' and this permission notice may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+@sp 2
+Cover art by Etienne Suvasa.
+@end titlepage
+@page
+
+@node Top, Introduction, (dir), (dir)
+
+@ifinfo
+This manual documents version @value{VERSION} of Bison.
+@end ifinfo
+
+@menu
+* Introduction::
+* Conditions::
+* Copying:: The GNU General Public License says
+ how you can copy and share Bison
+
+Tutorial sections:
+* Concepts:: Basic concepts for understanding Bison.
+* Examples:: Three simple explained examples of using Bison.
+
+Reference sections:
+* Grammar File:: Writing Bison declarations and rules.
+* Interface:: C-language interface to the parser function @code{yyparse}.
+* Algorithm:: How the Bison parser works at run-time.
+* Error Recovery:: Writing rules for error recovery.
+* Context Dependency:: What to do if your language syntax is too
+ messy for Bison to handle straightforwardly.
+* Debugging:: Debugging Bison parsers that parse wrong.
+* Invocation:: How to run Bison (to produce the parser source file).
+* Table of Symbols:: All the keywords of the Bison language are explained.
+* Glossary:: Basic concepts are explained.
+* Index:: Cross-references to the text.
+
+ --- The Detailed Node Listing ---
+
+The Concepts of Bison
+
+* Language and Grammar:: Languages and context-free grammars,
+ as mathematical ideas.
+* Grammar in Bison:: How we represent grammars for Bison's sake.
+* Semantic Values:: Each token or syntactic grouping can have
+ a semantic value (the value of an integer,
+ the name of an identifier, etc.).
+* Semantic Actions:: Each rule can have an action containing C code.
+* Bison Parser:: What are Bison's input and output,
+ how is the output used?
+* Stages:: Stages in writing and running Bison grammars.
+* Grammar Layout:: Overall structure of a Bison grammar file.
+
+Examples
+
+* RPN Calc:: Reverse polish notation calculator;
+ a first example with no operator precedence.
+* Infix Calc:: Infix (algebraic) notation calculator.
+ Operator precedence is introduced.
+* Simple Error Recovery:: Continuing after syntax errors.
+* Multi-function Calc:: Calculator with memory and trig functions.
+ It uses multiple data-types for semantic values.
+* Exercises:: Ideas for improving the multi-function calculator.
+
+Reverse Polish Notation Calculator
+
+* Decls: Rpcalc Decls. Bison and C declarations for rpcalc.
+* Rules: Rpcalc Rules. Grammar Rules for rpcalc, with explanation.
+* Lexer: Rpcalc Lexer. The lexical analyzer.
+* Main: Rpcalc Main. The controlling function.
+* Error: Rpcalc Error. The error reporting function.
+* Gen: Rpcalc Gen. Running Bison on the grammar file.
+* Comp: Rpcalc Compile. Run the C compiler on the output code.
+
+Grammar Rules for @code{rpcalc}
+
+* Rpcalc Input::
+* Rpcalc Line::
+* Rpcalc Expr::
+
+Multi-Function Calculator: @code{mfcalc}
+
+* Decl: Mfcalc Decl. Bison declarations for multi-function calculator.
+* Rules: Mfcalc Rules. Grammar rules for the calculator.
+* Symtab: Mfcalc Symtab. Symbol table management subroutines.
+
+Bison Grammar Files
+
+* Grammar Outline:: Overall layout of the grammar file.
+* Symbols:: Terminal and nonterminal symbols.
+* Rules:: How to write grammar rules.
+* Recursion:: Writing recursive rules.
+* Semantics:: Semantic values and actions.
+* Declarations:: All kinds of Bison declarations are described here.
+* Multiple Parsers:: Putting more than one Bison parser in one program.
+
+Outline of a Bison Grammar
+
+* C Declarations:: Syntax and usage of the C declarations section.
+* Bison Declarations:: Syntax and usage of the Bison declarations section.
+* Grammar Rules:: Syntax and usage of the grammar rules section.
+* C Code:: Syntax and usage of the additional C code section.
+
+Defining Language Semantics
+
+* Value Type:: Specifying one data type for all semantic values.
+* Multiple Types:: Specifying several alternative data types.
+* Actions:: An action is the semantic definition of a grammar rule.
+* Action Types:: Specifying data types for actions to operate on.
+* Mid-Rule Actions:: Most actions go at the end of a rule.
+ This says when, why and how to use the exceptional
+ action in the middle of a rule.
+
+Bison Declarations
+
+* Token Decl:: Declaring terminal symbols.
+* Precedence Decl:: Declaring terminals with precedence and associativity.
+* Union Decl:: Declaring the set of all semantic value types.
+* Type Decl:: Declaring the choice of type for a nonterminal symbol.
+* Expect Decl:: Suppressing warnings about shift/reduce conflicts.
+* Start Decl:: Specifying the start symbol.
+* Pure Decl:: Requesting a reentrant parser.
+* Decl Summary:: Table of all Bison declarations.
+
+Parser C-Language Interface
+
+* Parser Function:: How to call @code{yyparse} and what it returns.
+* Lexical:: You must supply a function @code{yylex}
+ which reads tokens.
+* Error Reporting:: You must supply a function @code{yyerror}.
+* Action Features:: Special features for use in actions.
+
+The Lexical Analyzer Function @code{yylex}
+
+* Calling Convention:: How @code{yyparse} calls @code{yylex}.
+* Token Values:: How @code{yylex} must return the semantic value
+ of the token it has read.
+* Token Positions:: How @code{yylex} must return the text position
+ (line number, etc.) of the token, if the
+ actions want that.
+* Pure Calling:: How the calling convention differs
+ in a pure parser (@pxref{Pure Decl, ,A Pure (Reentrant) Parser}).
+
+The Bison Parser Algorithm
+
+* Look-Ahead:: Parser looks one token ahead when deciding what to do.
+* Shift/Reduce:: Conflicts: when either shifting or reduction is valid.
+* Precedence:: Operator precedence works by resolving conflicts.
+* Contextual Precedence:: When an operator's precedence depends on context.
+* Parser States:: The parser is a finite-state-machine with stack.
+* Reduce/Reduce:: When two rules are applicable in the same situation.
+* Mystery Conflicts:: Reduce/reduce conflicts that look unjustified.
+* Stack Overflow:: What happens when stack gets full. How to avoid it.
+
+Operator Precedence
+
+* Why Precedence:: An example showing why precedence is needed.
+* Using Precedence:: How to specify precedence in Bison grammars.
+* Precedence Examples:: How these features are used in the previous example.
+* How Precedence:: How they work.
+
+Handling Context Dependencies
+
+* Semantic Tokens:: Token parsing can depend on the semantic context.
+* Lexical Tie-ins:: Token parsing can depend on the syntactic context.
+* Tie-in Recovery:: Lexical tie-ins have implications for how
+ error recovery rules must be written.
+
+Invoking Bison
+
+* Bison Options:: All the options described in detail,
+ in alphabetical order by short options.
+* Option Cross Key:: Alphabetical list of long options.
+* VMS Invocation:: Bison command syntax on VMS.
+@end menu
+
+@node Introduction, Conditions, Top, Top
+@unnumbered Introduction
+@cindex introduction
+
+@dfn{Bison} is a general-purpose parser generator that converts a
+grammar description for an LALR(1) context-free grammar into a C
+program to parse that grammar. Once you are proficient with Bison,
+you may use it to develop a wide range of language parsers, from those
+used in simple desk calculators to complex programming languages.
+
+Bison is upward compatible with Yacc: all properly-written Yacc grammars
+ought to work with Bison with no change. Anyone familiar with Yacc
+should be able to use Bison with little trouble. You need to be fluent in
+C programming in order to use Bison or to understand this manual.
+
+We begin with tutorial chapters that explain the basic concepts of using
+Bison and show three explained examples, each building on the last. If you
+don't know Bison or Yacc, start by reading these chapters. Reference
+chapters follow which describe specific aspects of Bison in detail.
+
+Bison was written primarily by Robert Corbett; Richard Stallman made it
+Yacc-compatible. Wilfred Hansen of Carnegie Mellon University added
+multicharacter string literals and other features.
+
+This edition corresponds to version @value{VERSION} of Bison.
+
+@node Conditions, Copying, Introduction, Top
+@unnumbered Conditions for Using Bison
+
+As of Bison version 1.24, we have changed the distribution terms for
+@code{yyparse} to permit using Bison's output in non-free programs.
+Formerly, Bison parsers could be used only in programs that were free
+software.
+
+The other GNU programming tools, such as the GNU C compiler, have never
+had such a requirement. They could always be used for non-free
+software. The reason Bison was different was not due to a special
+policy decision; it resulted from applying the usual General Public
+License to all of the Bison source code.
+
+The output of the Bison utility---the Bison parser file---contains a
+verbatim copy of a sizable piece of Bison, which is the code for the
+@code{yyparse} function. (The actions from your grammar are inserted
+into this function at one point, but the rest of the function is not
+changed.) When we applied the GPL terms to the code for @code{yyparse},
+the effect was to restrict the use of Bison output to free software.
+
+We didn't change the terms because of sympathy for people who want to
+make software proprietary. @strong{Software should be free.} But we
+concluded that limiting Bison's use to free software was doing little to
+encourage people to make other software free. So we decided to make the
+practical conditions for using Bison match the practical conditions for
+using the other GNU tools.
+
+@node Copying, Concepts, Conditions, Top
+@unnumbered GNU GENERAL PUBLIC LICENSE
+@center Version 2, June 1991
+
+@display
+Copyright @copyright{} 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+@end display
+
+@unnumberedsec Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software---to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+@iftex
+@unnumberedsec TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+@end iftex
+@ifinfo
+@center TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+@end ifinfo
+
+@enumerate 0
+@item
+This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The ``Program'', below,
+refers to any such program or work, and a ``work based on the Program''
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term ``modification''.) Each licensee is addressed as ``you''.
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+@item
+You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+@item
+You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+@enumerate a
+@item
+You must cause the modified files to carry prominent notices
+stating that you changed the files and the date of any change.
+
+@item
+You must cause any work that you distribute or publish, that in
+whole or in part contains or is derived from the Program or any
+part thereof, to be licensed as a whole at no charge to all third
+parties under the terms of this License.
+
+@item
+If the modified program normally reads commands interactively
+when run, you must cause it, when started running for such
+interactive use in the most ordinary way, to print or display an
+announcement including an appropriate copyright notice and a
+notice that there is no warranty (or else, saying that you provide
+a warranty) and that users may redistribute the program under
+these conditions, and telling the user how to view a copy of this
+License. (Exception: if the Program itself is interactive but
+does not normally print such an announcement, your work based on
+the Program is not required to print an announcement.)
+@end enumerate
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+@item
+You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+@enumerate a
+@item
+Accompany it with the complete corresponding machine-readable
+source code, which must be distributed under the terms of Sections
+1 and 2 above on a medium customarily used for software interchange; or,
+
+@item
+Accompany it with a written offer, valid for at least three
+years, to give any third party, for a charge no more than your
+cost of physically performing source distribution, a complete
+machine-readable copy of the corresponding source code, to be
+distributed under the terms of Sections 1 and 2 above on a medium
+customarily used for software interchange; or,
+
+@item
+Accompany it with the information you received as to the offer
+to distribute corresponding source code. (This alternative is
+allowed only for noncommercial distribution and only if you
+received the program in object code or executable form with such
+an offer, in accord with Subsection b above.)
+@end enumerate
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+@item
+You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+@item
+You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+@item
+Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+@item
+If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+@item
+If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+@item
+The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and ``any
+later version'', you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+@item
+If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+@iftex
+@heading NO WARRANTY
+@end iftex
+@ifinfo
+@center NO WARRANTY
+@end ifinfo
+
+@item
+BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM ``AS IS'' WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+@item
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+@end enumerate
+
+@iftex
+@heading END OF TERMS AND CONDITIONS
+@end iftex
+@ifinfo
+@center END OF TERMS AND CONDITIONS
+@end ifinfo
+
+@page
+@unnumberedsec How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the ``copyright'' line and a pointer to where the full notice is found.
+
+@smallexample
+@var{one line to give the program's name and a brief idea of what it does.}
+Copyright (C) 19@var{yy} @var{name of author}
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+@end smallexample
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+@smallexample
+Gnomovision version 69, Copyright (C) 19@var{yy} @var{name of author}
+Gnomovision comes with ABSOLUTELY NO WARRANTY; for details
+type `show w'.
+This is free software, and you are welcome to redistribute it
+under certain conditions; type `show c' for details.
+@end smallexample
+
+The hypothetical commands @samp{show w} and @samp{show c} should show
+the appropriate parts of the General Public License. Of course, the
+commands you use may be called something other than @samp{show w} and
+@samp{show c}; they could even be mouse-clicks or menu items---whatever
+suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a ``copyright disclaimer'' for the program, if
+necessary. Here is a sample; alter the names:
+
+@smallexample
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+`Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+@var{signature of Ty Coon}, 1 April 1989
+Ty Coon, President of Vice
+@end smallexample
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
+
+@node Concepts, Examples, Copying, Top
+@chapter The Concepts of Bison
+
+This chapter introduces many of the basic concepts without which the
+details of Bison will not make sense. If you do not already know how to
+use Bison or Yacc, we suggest you start by reading this chapter carefully.
+
+@menu
+* Language and Grammar:: Languages and context-free grammars,
+ as mathematical ideas.
+* Grammar in Bison:: How we represent grammars for Bison's sake.
+* Semantic Values:: Each token or syntactic grouping can have
+ a semantic value (the value of an integer,
+ the name of an identifier, etc.).
+* Semantic Actions:: Each rule can have an action containing C code.
+* Bison Parser:: What are Bison's input and output,
+ how is the output used?
+* Stages:: Stages in writing and running Bison grammars.
+* Grammar Layout:: Overall structure of a Bison grammar file.
+@end menu
+
+@node Language and Grammar, Grammar in Bison, , Concepts
+@section Languages and Context-Free Grammars
+
+@cindex context-free grammar
+@cindex grammar, context-free
+In order for Bison to parse a language, it must be described by a
+@dfn{context-free grammar}. This means that you specify one or more
+@dfn{syntactic groupings} and give rules for constructing them from their
+parts. For example, in the C language, one kind of grouping is called an
+`expression'. One rule for making an expression might be, ``An expression
+can be made of a minus sign and another expression''. Another would be,
+``An expression can be an integer''. As you can see, rules are often
+recursive, but there must be at least one rule which leads out of the
+recursion.
+
+@cindex BNF
+@cindex Backus-Naur form
+The most common formal system for presenting such rules for humans to read
+is @dfn{Backus-Naur Form} or ``BNF'', which was developed in order to
+specify the language Algol 60. Any grammar expressed in BNF is a
+context-free grammar. The input to Bison is essentially machine-readable
+BNF.
+
+Not all context-free languages can be handled by Bison, only those
+that are LALR(1). In brief, this means that it must be possible to
+tell how to parse any portion of an input string with just a single
+token of look-ahead. Strictly speaking, that is a description of an
+LR(1) grammar, and LALR(1) involves additional restrictions that are
+hard to explain simply; but it is rare in actual practice to find an
+LR(1) grammar that fails to be LALR(1). @xref{Mystery Conflicts, ,
+Mysterious Reduce/Reduce Conflicts}, for more information on this.
+
+@cindex symbols (abstract)
+@cindex token
+@cindex syntactic grouping
+@cindex grouping, syntactic
+In the formal grammatical rules for a language, each kind of syntactic unit
+or grouping is named by a @dfn{symbol}. Those which are built by grouping
+smaller constructs according to grammatical rules are called
+@dfn{nonterminal symbols}; those which can't be subdivided are called
+@dfn{terminal symbols} or @dfn{token types}. We call a piece of input
+corresponding to a single terminal symbol a @dfn{token}, and a piece
+corresponding to a single nonterminal symbol a @dfn{grouping}.@refill
+
+We can use the C language as an example of what symbols, terminal and
+nonterminal, mean. The tokens of C are identifiers, constants (numeric and
+string), and the various keywords, arithmetic operators and punctuation
+marks. So the terminal symbols of a grammar for C include `identifier',
+`number', `string', plus one symbol for each keyword, operator or
+punctuation mark: `if', `return', `const', `static', `int', `char',
+`plus-sign', `open-brace', `close-brace', `comma' and many more. (These
+tokens can be subdivided into characters, but that is a matter of
+lexicography, not grammar.)
+
+Here is a simple C function subdivided into tokens:
+
+@example
+int /* @r{keyword `int'} */
+square (x) /* @r{identifier, open-paren,} */
+ /* @r{identifier, close-paren} */
+ int x; /* @r{keyword `int', identifier, semicolon} */
+@{ /* @r{open-brace} */
+ return x * x; /* @r{keyword `return', identifier,} */
+ /* @r{asterisk, identifier, semicolon} */
+@} /* @r{close-brace} */
+@end example
+
+The syntactic groupings of C include the expression, the statement, the
+declaration, and the function definition. These are represented in the
+grammar of C by nonterminal symbols `expression', `statement',
+`declaration' and `function definition'. The full grammar uses dozens of
+additional language constructs, each with its own nonterminal symbol, in
+order to express the meanings of these four. The example above is a
+function definition; it contains one declaration, and one statement. In
+the statement, each @samp{x} is an expression and so is @samp{x * x}.
+
+Each nonterminal symbol must have grammatical rules showing how it is made
+out of simpler constructs. For example, one kind of C statement is the
+@code{return} statement; this would be described with a grammar rule which
+reads informally as follows:
+
+@quotation
+A `statement' can be made of a `return' keyword, an `expression' and a
+`semicolon'.
+@end quotation
+
+@noindent
+There would be many other rules for `statement', one for each kind of
+statement in C.
+
+@cindex start symbol
+One nonterminal symbol must be distinguished as the special one which
+defines a complete utterance in the language. It is called the @dfn{start
+symbol}. In a compiler, this means a complete input program. In the C
+language, the nonterminal symbol `sequence of definitions and declarations'
+plays this role.
+
+For example, @samp{1 + 2} is a valid C expression---a valid part of a C
+program---but it is not valid as an @emph{entire} C program. In the
+context-free grammar of C, this follows from the fact that `expression' is
+not the start symbol.
+
+The Bison parser reads a sequence of tokens as its input, and groups the
+tokens using the grammar rules. If the input is valid, the end result is
+that the entire token sequence reduces to a single grouping whose symbol is
+the grammar's start symbol. If we use a grammar for C, the entire input
+must be a `sequence of definitions and declarations'. If not, the parser
+reports a syntax error.
+
+@node Grammar in Bison, Semantic Values, Language and Grammar, Concepts
+@section From Formal Rules to Bison Input
+@cindex Bison grammar
+@cindex grammar, Bison
+@cindex formal grammar
+
+A formal grammar is a mathematical construct. To define the language
+for Bison, you must write a file expressing the grammar in Bison syntax:
+a @dfn{Bison grammar} file. @xref{Grammar File, ,Bison Grammar Files}.
+
+A nonterminal symbol in the formal grammar is represented in Bison input
+as an identifier, like an identifier in C. By convention, it should be
+in lower case, such as @code{expr}, @code{stmt} or @code{declaration}.
+
+The Bison representation for a terminal symbol is also called a @dfn{token
+type}. Token types as well can be represented as C-like identifiers. By
+convention, these identifiers should be upper case to distinguish them from
+nonterminals: for example, @code{INTEGER}, @code{IDENTIFIER}, @code{IF} or
+@code{RETURN}. A terminal symbol that stands for a particular keyword in
+the language should be named after that keyword converted to upper case.
+The terminal symbol @code{error} is reserved for error recovery.
+@xref{Symbols}.
+
+A terminal symbol can also be represented as a character literal, just like
+a C character constant. You should do this whenever a token is just a
+single character (parenthesis, plus-sign, etc.): use that same character in
+a literal as the terminal symbol for that token.
+
+A third way to represent a terminal symbol is with a C string constant
+containing several characters. @xref{Symbols}, for more information.
+
+The grammar rules also have an expression in Bison syntax. For example,
+here is the Bison rule for a C @code{return} statement. The semicolon in
+quotes is a literal character token, representing part of the C syntax for
+the statement; the naked semicolon, and the colon, are Bison punctuation
+used in every rule.
+
+@example
+stmt: RETURN expr ';'
+ ;
+@end example
+
+@noindent
+@xref{Rules, ,Syntax of Grammar Rules}.
+
+@node Semantic Values, Semantic Actions, Grammar in Bison, Concepts
+@section Semantic Values
+@cindex semantic value
+@cindex value, semantic
+
+A formal grammar selects tokens only by their classifications: for example,
+if a rule mentions the terminal symbol `integer constant', it means that
+@emph{any} integer constant is grammatically valid in that position. The
+precise value of the constant is irrelevant to how to parse the input: if
+@samp{x+4} is grammatical then @samp{x+1} or @samp{x+3989} is equally
+grammatical.@refill
+
+But the precise value is very important for what the input means once it is
+parsed. A compiler is useless if it fails to distinguish between 4, 1 and
+3989 as constants in the program! Therefore, each token in a Bison grammar
+has both a token type and a @dfn{semantic value}. @xref{Semantics, ,Defining Language Semantics},
+for details.
+
+The token type is a terminal symbol defined in the grammar, such as
+@code{INTEGER}, @code{IDENTIFIER} or @code{','}. It tells everything
+you need to know to decide where the token may validly appear and how to
+group it with other tokens. The grammar rules know nothing about tokens
+except their types.@refill
+
+The semantic value has all the rest of the information about the
+meaning of the token, such as the value of an integer, or the name of an
+identifier. (A token such as @code{','} which is just punctuation doesn't
+need to have any semantic value.)
+
+For example, an input token might be classified as token type
+@code{INTEGER} and have the semantic value 4. Another input token might
+have the same token type @code{INTEGER} but value 3989. When a grammar
+rule says that @code{INTEGER} is allowed, either of these tokens is
+acceptable because each is an @code{INTEGER}. When the parser accepts the
+token, it keeps track of the token's semantic value.
+
+Each grouping can also have a semantic value as well as its nonterminal
+symbol. For example, in a calculator, an expression typically has a
+semantic value that is a number. In a compiler for a programming
+language, an expression typically has a semantic value that is a tree
+structure describing the meaning of the expression.
+
+@node Semantic Actions, Bison Parser, Semantic Values, Concepts
+@section Semantic Actions
+@cindex semantic actions
+@cindex actions, semantic
+
+In order to be useful, a program must do more than parse input; it must
+also produce some output based on the input. In a Bison grammar, a grammar
+rule can have an @dfn{action} made up of C statements. Each time the
+parser recognizes a match for that rule, the action is executed.
+@xref{Actions}.
+
+Most of the time, the purpose of an action is to compute the semantic value
+of the whole construct from the semantic values of its parts. For example,
+suppose we have a rule which says an expression can be the sum of two
+expressions. When the parser recognizes such a sum, each of the
+subexpressions has a semantic value which describes how it was built up.
+The action for this rule should create a similar sort of value for the
+newly recognized larger expression.
+
+For example, here is a rule that says an expression can be the sum of
+two subexpressions:
+
+@example
+expr: expr '+' expr @{ $$ = $1 + $3; @}
+ ;
+@end example
+
+@noindent
+The action says how to produce the semantic value of the sum expression
+from the values of the two subexpressions.
+
+@node Bison Parser, Stages, Semantic Actions, Concepts
+@section Bison Output: the Parser File
+@cindex Bison parser
+@cindex Bison utility
+@cindex lexical analyzer, purpose
+@cindex parser
+
+When you run Bison, you give it a Bison grammar file as input. The output
+is a C source file that parses the language described by the grammar.
+This file is called a @dfn{Bison parser}. Keep in mind that the Bison
+utility and the Bison parser are two distinct programs: the Bison utility
+is a program whose output is the Bison parser that becomes part of your
+program.
+
+The job of the Bison parser is to group tokens into groupings according to
+the grammar rules---for example, to build identifiers and operators into
+expressions. As it does this, it runs the actions for the grammar rules it
+uses.
+
+The tokens come from a function called the @dfn{lexical analyzer} that you
+must supply in some fashion (such as by writing it in C). The Bison parser
+calls the lexical analyzer each time it wants a new token. It doesn't know
+what is ``inside'' the tokens (though their semantic values may reflect
+this). Typically the lexical analyzer makes the tokens by parsing
+characters of text, but Bison does not depend on this. @xref{Lexical, ,The Lexical Analyzer Function @code{yylex}}.
+
+The Bison parser file is C code which defines a function named
+@code{yyparse} which implements that grammar. This function does not make
+a complete C program: you must supply some additional functions. One is
+the lexical analyzer. Another is an error-reporting function which the
+parser calls to report an error. In addition, a complete C program must
+start with a function called @code{main}; you have to provide this, and
+arrange for it to call @code{yyparse} or the parser will never run.
+@xref{Interface, ,Parser C-Language Interface}.
+
+Aside from the token type names and the symbols in the actions you
+write, all variable and function names used in the Bison parser file
+begin with @samp{yy} or @samp{YY}. This includes interface functions
+such as the lexical analyzer function @code{yylex}, the error reporting
+function @code{yyerror} and the parser function @code{yyparse} itself.
+This also includes numerous identifiers used for internal purposes.
+Therefore, you should avoid using C identifiers starting with @samp{yy}
+or @samp{YY} in the Bison grammar file except for the ones defined in
+this manual.
+
+@node Stages, Grammar Layout, Bison Parser, Concepts
+@section Stages in Using Bison
+@cindex stages in using Bison
+@cindex using Bison
+
+The actual language-design process using Bison, from grammar specification
+to a working compiler or interpreter, has these parts:
+
+@enumerate
+@item
+Formally specify the grammar in a form recognized by Bison
+(@pxref{Grammar File, ,Bison Grammar Files}). For each grammatical rule in the language,
+describe the action that is to be taken when an instance of that rule
+is recognized. The action is described by a sequence of C statements.
+
+@item
+Write a lexical analyzer to process input and pass tokens to the
+parser. The lexical analyzer may be written by hand in C
+(@pxref{Lexical, ,The Lexical Analyzer Function @code{yylex}}). It could also be produced using Lex, but the use
+of Lex is not discussed in this manual.
+
+@item
+Write a controlling function that calls the Bison-produced parser.
+
+@item
+Write error-reporting routines.
+@end enumerate
+
+To turn this source code as written into a runnable program, you
+must follow these steps:
+
+@enumerate
+@item
+Run Bison on the grammar to produce the parser.
+
+@item
+Compile the code output by Bison, as well as any other source files.
+
+@item
+Link the object files to produce the finished product.
+@end enumerate
+
+@node Grammar Layout, , Stages, Concepts
+@section The Overall Layout of a Bison Grammar
+@cindex grammar file
+@cindex file format
+@cindex format of grammar file
+@cindex layout of Bison grammar
+
+The input file for the Bison utility is a @dfn{Bison grammar file}. The
+general form of a Bison grammar file is as follows:
+
+@example
+%@{
+@var{C declarations}
+%@}
+
+@var{Bison declarations}
+
+%%
+@var{Grammar rules}
+%%
+@var{Additional C code}
+@end example
+
+@noindent
+The @samp{%%}, @samp{%@{} and @samp{%@}} are punctuation that appears
+in every Bison grammar file to separate the sections.
+
+The C declarations may define types and variables used in the actions.
+You can also use preprocessor commands to define macros used there, and use
+@code{#include} to include header files that do any of these things.
+
+The Bison declarations declare the names of the terminal and nonterminal
+symbols, and may also describe operator precedence and the data types of
+semantic values of various symbols.
+
+The grammar rules define how to construct each nonterminal symbol from its
+parts.
+
+The additional C code can contain any C code you want to use. Often the
+definition of the lexical analyzer @code{yylex} goes here, plus subroutines
+called by the actions in the grammar rules. In a simple program, all the
+rest of the program can go here.
+
+@node Examples, Grammar File, Concepts, Top
+@chapter Examples
+@cindex simple examples
+@cindex examples, simple
+
+Now we show and explain three sample programs written using Bison: a
+reverse polish notation calculator, an algebraic (infix) notation
+calculator, and a multi-function calculator. All three have been tested
+under BSD Unix 4.3; each produces a usable, though limited, interactive
+desk-top calculator.
+
+These examples are simple, but Bison grammars for real programming
+languages are written the same way.
+@ifinfo
+You can copy these examples out of the Info file and into a source file
+to try them.
+@end ifinfo
+
+@menu
+* RPN Calc:: Reverse polish notation calculator;
+ a first example with no operator precedence.
+* Infix Calc:: Infix (algebraic) notation calculator.
+ Operator precedence is introduced.
+* Simple Error Recovery:: Continuing after syntax errors.
+* Multi-function Calc:: Calculator with memory and trig functions.
+ It uses multiple data-types for semantic values.
+* Exercises:: Ideas for improving the multi-function calculator.
+@end menu
+
+@node RPN Calc, Infix Calc, , Examples
+@section Reverse Polish Notation Calculator
+@cindex reverse polish notation
+@cindex polish notation calculator
+@cindex @code{rpcalc}
+@cindex calculator, simple
+
+The first example is that of a simple double-precision @dfn{reverse polish
+notation} calculator (a calculator using postfix operators). This example
+provides a good starting point, since operator precedence is not an issue.
+The second example will illustrate how operator precedence is handled.
+
+The source code for this calculator is named @file{rpcalc.y}. The
+@samp{.y} extension is a convention used for Bison input files.
+
+@menu
+* Decls: Rpcalc Decls. Bison and C declarations for rpcalc.
+* Rules: Rpcalc Rules. Grammar Rules for rpcalc, with explanation.
+* Lexer: Rpcalc Lexer. The lexical analyzer.
+* Main: Rpcalc Main. The controlling function.
+* Error: Rpcalc Error. The error reporting function.
+* Gen: Rpcalc Gen. Running Bison on the grammar file.
+* Comp: Rpcalc Compile. Run the C compiler on the output code.
+@end menu
+
+@node Rpcalc Decls, Rpcalc Rules, , RPN Calc
+@subsection Declarations for @code{rpcalc}
+
+Here are the C and Bison declarations for the reverse polish notation
+calculator. As in C, comments are placed between @samp{/*@dots{}*/}.
+
+@example
+/* Reverse polish notation calculator. */
+
+%@{
+#define YYSTYPE double
+#include <math.h>
+%@}
+
+%token NUM
+
+%% /* Grammar rules and actions follow */
+@end example
+
+The C declarations section (@pxref{C Declarations, ,The C Declarations Section}) contains two
+preprocessor directives.
+
+The @code{#define} directive defines the macro @code{YYSTYPE}, thus
+specifying the C data type for semantic values of both tokens and groupings
+(@pxref{Value Type, ,Data Types of Semantic Values}). The Bison parser will use whatever type
+@code{YYSTYPE} is defined as; if you don't define it, @code{int} is the
+default. Because we specify @code{double}, each token and each expression
+has an associated value, which is a floating point number.
+
+The @code{#include} directive is used to declare the exponentiation
+function @code{pow}.
+
+The second section, Bison declarations, provides information to Bison about
+the token types (@pxref{Bison Declarations, ,The Bison Declarations Section}). Each terminal symbol that is
+not a single-character literal must be declared here. (Single-character
+literals normally don't need to be declared.) In this example, all the
+arithmetic operators are designated by single-character literals, so the
+only terminal symbol that needs to be declared is @code{NUM}, the token
+type for numeric constants.
+
+@node Rpcalc Rules, Rpcalc Lexer, Rpcalc Decls, RPN Calc
+@subsection Grammar Rules for @code{rpcalc}
+
+Here are the grammar rules for the reverse polish notation calculator.
+
+@example
+input: /* empty */
+ | input line
+;
+
+line: '\n'
+ | exp '\n' @{ printf ("\t%.10g\n", $1); @}
+;
+
+exp: NUM @{ $$ = $1; @}
+ | exp exp '+' @{ $$ = $1 + $2; @}
+ | exp exp '-' @{ $$ = $1 - $2; @}
+ | exp exp '*' @{ $$ = $1 * $2; @}
+ | exp exp '/' @{ $$ = $1 / $2; @}
+ /* Exponentiation */
+ | exp exp '^' @{ $$ = pow ($1, $2); @}
+ /* Unary minus */
+ | exp 'n' @{ $$ = -$1; @}
+;
+%%
+@end example
+
+The groupings of the rpcalc ``language'' defined here are the expression
+(given the name @code{exp}), the line of input (@code{line}), and the
+complete input transcript (@code{input}). Each of these nonterminal
+symbols has several alternate rules, joined by the @samp{|} punctuator
+which is read as ``or''. The following sections explain what these rules
+mean.
+
+The semantics of the language is determined by the actions taken when a
+grouping is recognized. The actions are the C code that appears inside
+braces. @xref{Actions}.
+
+You must specify these actions in C, but Bison provides the means for
+passing semantic values between the rules. In each action, the
+pseudo-variable @code{$$} stands for the semantic value for the grouping
+that the rule is going to construct. Assigning a value to @code{$$} is the
+main job of most actions. The semantic values of the components of the
+rule are referred to as @code{$1}, @code{$2}, and so on.
+
+@menu
+* Rpcalc Input::
+* Rpcalc Line::
+* Rpcalc Expr::
+@end menu
+
+@node Rpcalc Input, Rpcalc Line, , Rpcalc Rules
+@subsubsection Explanation of @code{input}
+
+Consider the definition of @code{input}:
+
+@example
+input: /* empty */
+ | input line
+;
+@end example
+
+This definition reads as follows: ``A complete input is either an empty
+string, or a complete input followed by an input line''. Notice that
+``complete input'' is defined in terms of itself. This definition is said
+to be @dfn{left recursive} since @code{input} appears always as the
+leftmost symbol in the sequence. @xref{Recursion, ,Recursive Rules}.
+
+The first alternative is empty because there are no symbols between the
+colon and the first @samp{|}; this means that @code{input} can match an
+empty string of input (no tokens). We write the rules this way because it
+is legitimate to type @kbd{Ctrl-d} right after you start the calculator.
+It's conventional to put an empty alternative first and write the comment
+@samp{/* empty */} in it.
+
+The second alternate rule (@code{input line}) handles all nontrivial input.
+It means, ``After reading any number of lines, read one more line if
+possible.'' The left recursion makes this rule into a loop. Since the
+first alternative matches empty input, the loop can be executed zero or
+more times.
+
+The parser function @code{yyparse} continues to process input until a
+grammatical error is seen or the lexical analyzer says there are no more
+input tokens; we will arrange for the latter to happen at end of file.
+
+@node Rpcalc Line, Rpcalc Expr, Rpcalc Input, Rpcalc Rules
+@subsubsection Explanation of @code{line}
+
+Now consider the definition of @code{line}:
+
+@example
+line: '\n'
+ | exp '\n' @{ printf ("\t%.10g\n", $1); @}
+;
+@end example
+
+The first alternative is a token which is a newline character; this means
+that rpcalc accepts a blank line (and ignores it, since there is no
+action). The second alternative is an expression followed by a newline.
+This is the alternative that makes rpcalc useful. The semantic value of
+the @code{exp} grouping is the value of @code{$1} because the @code{exp} in
+question is the first symbol in the alternative. The action prints this
+value, which is the result of the computation the user asked for.
+
+This action is unusual because it does not assign a value to @code{$$}. As
+a consequence, the semantic value associated with the @code{line} is
+uninitialized (its value will be unpredictable). This would be a bug if
+that value were ever used, but we don't use it: once rpcalc has printed the
+value of the user's input line, that value is no longer needed.
+
+@node Rpcalc Expr, , Rpcalc Line, Rpcalc Rules
+@subsubsection Explanation of @code{expr}
+
+The @code{exp} grouping has several rules, one for each kind of expression.
+The first rule handles the simplest expressions: those that are just numbers.
+The second handles an addition-expression, which looks like two expressions
+followed by a plus-sign. The third handles subtraction, and so on.
+
+@example
+exp: NUM
+ | exp exp '+' @{ $$ = $1 + $2; @}
+ | exp exp '-' @{ $$ = $1 - $2; @}
+ @dots{}
+ ;
+@end example
+
+We have used @samp{|} to join all the rules for @code{exp}, but we could
+equally well have written them separately:
+
+@example
+exp: NUM ;
+exp: exp exp '+' @{ $$ = $1 + $2; @} ;
+exp: exp exp '-' @{ $$ = $1 - $2; @} ;
+ @dots{}
+@end example
+
+Most of the rules have actions that compute the value of the expression in
+terms of the value of its parts. For example, in the rule for addition,
+@code{$1} refers to the first component @code{exp} and @code{$2} refers to
+the second one. The third component, @code{'+'}, has no meaningful
+associated semantic value, but if it had one you could refer to it as
+@code{$3}. When @code{yyparse} recognizes a sum expression using this
+rule, the sum of the two subexpressions' values is produced as the value of
+the entire expression. @xref{Actions}.
+
+You don't have to give an action for every rule. When a rule has no
+action, Bison by default copies the value of @code{$1} into @code{$$}.
+This is what happens in the first rule (the one that uses @code{NUM}).
+
+The formatting shown here is the recommended convention, but Bison does
+not require it. You can add or change whitespace as much as you wish.
+For example, this:
+
+@example
+exp : NUM | exp exp '+' @{$$ = $1 + $2; @} | @dots{}
+@end example
+
+@noindent
+means the same thing as this:
+
+@example
+exp: NUM
+ | exp exp '+' @{ $$ = $1 + $2; @}
+ | @dots{}
+@end example
+
+@noindent
+The latter, however, is much more readable.
+
+@node Rpcalc Lexer, Rpcalc Main, Rpcalc Rules, RPN Calc
+@subsection The @code{rpcalc} Lexical Analyzer
+@cindex writing a lexical analyzer
+@cindex lexical analyzer, writing
+
+The lexical analyzer's job is low-level parsing: converting characters or
+sequences of characters into tokens. The Bison parser gets its tokens by
+calling the lexical analyzer. @xref{Lexical, ,The Lexical Analyzer Function @code{yylex}}.
+
+Only a simple lexical analyzer is needed for the RPN calculator. This
+lexical analyzer skips blanks and tabs, then reads in numbers as
+@code{double} and returns them as @code{NUM} tokens. Any other character
+that isn't part of a number is a separate token. Note that the token-code
+for such a single-character token is the character itself.
+
+The return value of the lexical analyzer function is a numeric code which
+represents a token type. The same text used in Bison rules to stand for
+this token type is also a C expression for the numeric code for the type.
+This works in two ways. If the token type is a character literal, then its
+numeric code is the ASCII code for that character; you can use the same
+character literal in the lexical analyzer to express the number. If the
+token type is an identifier, that identifier is defined by Bison as a C
+macro whose definition is the appropriate number. In this example,
+therefore, @code{NUM} becomes a macro for @code{yylex} to use.
+
+The semantic value of the token (if it has one) is stored into the global
+variable @code{yylval}, which is where the Bison parser will look for it.
+(The C data type of @code{yylval} is @code{YYSTYPE}, which was defined
+at the beginning of the grammar; @pxref{Rpcalc Decls, ,Declarations for @code{rpcalc}}.)
+
+A token type code of zero is returned if the end-of-file is encountered.
+(Bison recognizes any nonpositive value as indicating the end of the
+input.)
+
+Here is the code for the lexical analyzer:
+
+@example
+@group
+/* Lexical analyzer returns a double floating point
+ number on the stack and the token NUM, or the ASCII
+ character read if not a number. Skips all blanks
+ and tabs, returns 0 for EOF. */
+
+#include <ctype.h>
+@end group
+
+@group
+yylex ()
+@{
+ int c;
+
+ /* skip white space */
+ while ((c = getchar ()) == ' ' || c == '\t')
+ ;
+@end group
+@group
+ /* process numbers */
+ if (c == '.' || isdigit (c))
+ @{
+ ungetc (c, stdin);
+ scanf ("%lf", &yylval);
+ return NUM;
+ @}
+@end group
+@group
+ /* return end-of-file */
+ if (c == EOF)
+ return 0;
+ /* return single chars */
+ return c;
+@}
+@end group
+@end example
+
+@node Rpcalc Main, Rpcalc Error, Rpcalc Lexer, RPN Calc
+@subsection The Controlling Function
+@cindex controlling function
+@cindex main function in simple example
+
+In keeping with the spirit of this example, the controlling function is
+kept to the bare minimum. The only requirement is that it call
+@code{yyparse} to start the process of parsing.
+
+@example
+@group
+main ()
+@{
+ yyparse ();
+@}
+@end group
+@end example
+
+@node Rpcalc Error, Rpcalc Gen, Rpcalc Main, RPN Calc
+@subsection The Error Reporting Routine
+@cindex error reporting routine
+
+When @code{yyparse} detects a syntax error, it calls the error reporting
+function @code{yyerror} to print an error message (usually but not always
+@code{"parse error"}). It is up to the programmer to supply @code{yyerror}
+(@pxref{Interface, ,Parser C-Language Interface}), so here is the definition we will use:
+
+@example
+@group
+#include <stdio.h>
+
+yyerror (s) /* Called by yyparse on error */
+ char *s;
+@{
+ printf ("%s\n", s);
+@}
+@end group
+@end example
+
+After @code{yyerror} returns, the Bison parser may recover from the error
+and continue parsing if the grammar contains a suitable error rule
+(@pxref{Error Recovery}). Otherwise, @code{yyparse} returns nonzero. We
+have not written any error rules in this example, so any invalid input will
+cause the calculator program to exit. This is not clean behavior for a
+real calculator, but it is adequate in the first example.
+
+@node Rpcalc Gen, Rpcalc Compile, Rpcalc Error, RPN Calc
+@subsection Running Bison to Make the Parser
+@cindex running Bison (introduction)
+
+Before running Bison to produce a parser, we need to decide how to arrange
+all the source code in one or more source files. For such a simple example,
+the easiest thing is to put everything in one file. The definitions of
+@code{yylex}, @code{yyerror} and @code{main} go at the end, in the
+``additional C code'' section of the file (@pxref{Grammar Layout, ,The Overall Layout of a Bison Grammar}).
+
+For a large project, you would probably have several source files, and use
+@code{make} to arrange to recompile them.
+
+With all the source in a single file, you use the following command to
+convert it into a parser file:
+
+@example
+bison @var{file_name}.y
+@end example
+
+@noindent
+In this example the file was called @file{rpcalc.y} (for ``Reverse Polish
+CALCulator''). Bison produces a file named @file{@var{file_name}.tab.c},
+removing the @samp{.y} from the original file name. The file output by
+Bison contains the source code for @code{yyparse}. The additional
+functions in the input file (@code{yylex}, @code{yyerror} and @code{main})
+are copied verbatim to the output.
+
+@node Rpcalc Compile, , Rpcalc Gen, RPN Calc
+@subsection Compiling the Parser File
+@cindex compiling the parser
+
+Here is how to compile and run the parser file:
+
+@example
+@group
+# @r{List files in current directory.}
+% ls
+rpcalc.tab.c rpcalc.y
+@end group
+
+@group
+# @r{Compile the Bison parser.}
+# @r{@samp{-lm} tells compiler to search math library for @code{pow}.}
+% cc rpcalc.tab.c -lm -o rpcalc
+@end group
+
+@group
+# @r{List files again.}
+% ls
+rpcalc rpcalc.tab.c rpcalc.y
+@end group
+@end example
+
+The file @file{rpcalc} now contains the executable code. Here is an
+example session using @code{rpcalc}.
+
+@example
+% rpcalc
+4 9 +
+13
+3 7 + 3 4 5 *+-
+-13
+3 7 + 3 4 5 * + - n @r{Note the unary minus, @samp{n}}
+13
+5 6 / 4 n +
+-3.166666667
+3 4 ^ @r{Exponentiation}
+81
+^D @r{End-of-file indicator}
+%
+@end example
+
+@node Infix Calc, Simple Error Recovery, RPN Calc, Examples
+@section Infix Notation Calculator: @code{calc}
+@cindex infix notation calculator
+@cindex @code{calc}
+@cindex calculator, infix notation
+
+We now modify rpcalc to handle infix operators instead of postfix. Infix
+notation involves the concept of operator precedence and the need for
+parentheses nested to arbitrary depth. Here is the Bison code for
+@file{calc.y}, an infix desk-top calculator.
+
+@example
+/* Infix notation calculator--calc */
+
+%@{
+#define YYSTYPE double
+#include <math.h>
+%@}
+
+/* BISON Declarations */
+%token NUM
+%left '-' '+'
+%left '*' '/'
+%left NEG /* negation--unary minus */
+%right '^' /* exponentiation */
+
+/* Grammar follows */
+%%
+input: /* empty string */
+ | input line
+;
+
+line: '\n'
+ | exp '\n' @{ printf ("\t%.10g\n", $1); @}
+;
+
+exp: NUM @{ $$ = $1; @}
+ | exp '+' exp @{ $$ = $1 + $3; @}
+ | exp '-' exp @{ $$ = $1 - $3; @}
+ | exp '*' exp @{ $$ = $1 * $3; @}
+ | exp '/' exp @{ $$ = $1 / $3; @}
+ | '-' exp %prec NEG @{ $$ = -$2; @}
+ | exp '^' exp @{ $$ = pow ($1, $3); @}
+ | '(' exp ')' @{ $$ = $2; @}
+;
+%%
+@end example
+
+@noindent
+The functions @code{yylex}, @code{yyerror} and @code{main} can be the same
+as before.
+
+There are two important new features shown in this code.
+
+In the second section (Bison declarations), @code{%left} declares token
+types and says they are left-associative operators. The declarations
+@code{%left} and @code{%right} (right associativity) take the place of
+@code{%token} which is used to declare a token type name without
+associativity. (These tokens are single-character literals, which
+ordinarily don't need to be declared. We declare them here to specify
+the associativity.)
+
+Operator precedence is determined by the line ordering of the
+declarations; the higher the line number of the declaration (lower on
+the page or screen), the higher the precedence. Hence, exponentiation
+has the highest precedence, unary minus (@code{NEG}) is next, followed
+by @samp{*} and @samp{/}, and so on. @xref{Precedence, ,Operator Precedence}.
+
+The other important new feature is the @code{%prec} in the grammar section
+for the unary minus operator. The @code{%prec} simply instructs Bison that
+the rule @samp{| '-' exp} has the same precedence as @code{NEG}---in this
+case the next-to-highest. @xref{Contextual Precedence, ,Context-Dependent Precedence}.
+
+Here is a sample run of @file{calc.y}:
+
+@need 500
+@example
+% calc
+4 + 4.5 - (34/(8*3+-3))
+6.880952381
+-56 + 2
+-54
+3 ^ 2
+9
+@end example
+
+@node Simple Error Recovery, Multi-function Calc, Infix Calc, Examples
+@section Simple Error Recovery
+@cindex error recovery, simple
+
+Up to this point, this manual has not addressed the issue of @dfn{error
+recovery}---how to continue parsing after the parser detects a syntax
+error. All we have handled is error reporting with @code{yyerror}. Recall
+that by default @code{yyparse} returns after calling @code{yyerror}. This
+means that an erroneous input line causes the calculator program to exit.
+Now we show how to rectify this deficiency.
+
+The Bison language itself includes the reserved word @code{error}, which
+may be included in the grammar rules. In the example below it has
+been added to one of the alternatives for @code{line}:
+
+@example
+@group
+line: '\n'
+ | exp '\n' @{ printf ("\t%.10g\n", $1); @}
+ | error '\n' @{ yyerrok; @}
+;
+@end group
+@end example
+
+This addition to the grammar allows for simple error recovery in the event
+of a parse error. If an expression that cannot be evaluated is read, the
+error will be recognized by the third rule for @code{line}, and parsing
+will continue. (The @code{yyerror} function is still called upon to print
+its message as well.) The action executes the statement @code{yyerrok}, a
+macro defined automatically by Bison; its meaning is that error recovery is
+complete (@pxref{Error Recovery}). Note the difference between
+@code{yyerrok} and @code{yyerror}; neither one is a misprint.@refill
+
+This form of error recovery deals with syntax errors. There are other
+kinds of errors; for example, division by zero, which raises an exception
+signal that is normally fatal. A real calculator program must handle this
+signal and use @code{longjmp} to return to @code{main} and resume parsing
+input lines; it would also have to discard the rest of the current line of
+input. We won't discuss this issue further because it is not specific to
+Bison programs.
+
+@node Multi-function Calc, Exercises, Simple Error Recovery, Examples
+@section Multi-Function Calculator: @code{mfcalc}
+@cindex multi-function calculator
+@cindex @code{mfcalc}
+@cindex calculator, multi-function
+
+Now that the basics of Bison have been discussed, it is time to move on to
+a more advanced problem. The above calculators provided only five
+functions, @samp{+}, @samp{-}, @samp{*}, @samp{/} and @samp{^}. It would
+be nice to have a calculator that provides other mathematical functions such
+as @code{sin}, @code{cos}, etc.
+
+It is easy to add new operators to the infix calculator as long as they are
+only single-character literals. The lexical analyzer @code{yylex} passes
+back all non-number characters as tokens, so new grammar rules suffice for
+adding a new operator. But we want something more flexible: built-in
+functions whose syntax has this form:
+
+@example
+@var{function_name} (@var{argument})
+@end example
+
+@noindent
+At the same time, we will add memory to the calculator, by allowing you
+to create named variables, store values in them, and use them later.
+Here is a sample session with the multi-function calculator:
+
+@example
+% mfcalc
+pi = 3.141592653589
+3.1415926536
+sin(pi)
+0.0000000000
+alpha = beta1 = 2.3
+2.3000000000
+alpha
+2.3000000000
+ln(alpha)
+0.8329091229
+exp(ln(beta1))
+2.3000000000
+%
+@end example
+
+Note that multiple assignment and nested function calls are permitted.
+
+@menu
+* Decl: Mfcalc Decl. Bison declarations for multi-function calculator.
+* Rules: Mfcalc Rules. Grammar rules for the calculator.
+* Symtab: Mfcalc Symtab. Symbol table management subroutines.
+@end menu
+
+@node Mfcalc Decl, Mfcalc Rules, , Multi-function Calc
+@subsection Declarations for @code{mfcalc}
+
+Here are the C and Bison declarations for the multi-function calculator.
+
+@smallexample
+%@{
+#include <math.h> /* For math functions, cos(), sin(), etc. */
+#include "calc.h" /* Contains definition of `symrec' */
+%@}
+%union @{
+double val; /* For returning numbers. */
+symrec *tptr; /* For returning symbol-table pointers */
+@}
+
+%token <val> NUM /* Simple double precision number */
+%token <tptr> VAR FNCT /* Variable and Function */
+%type <val> exp
+
+%right '='
+%left '-' '+'
+%left '*' '/'
+%left NEG /* Negation--unary minus */
+%right '^' /* Exponentiation */
+
+/* Grammar follows */
+
+%%
+@end smallexample
+
+The above grammar introduces only two new features of the Bison language.
+These features allow semantic values to have various data types
+(@pxref{Multiple Types, ,More Than One Value Type}).
+
+The @code{%union} declaration specifies the entire list of possible types;
+this is instead of defining @code{YYSTYPE}. The allowable types are now
+double-floats (for @code{exp} and @code{NUM}) and pointers to entries in
+the symbol table. @xref{Union Decl, ,The Collection of Value Types}.
+
+Since values can now have various types, it is necessary to associate a
+type with each grammar symbol whose semantic value is used. These symbols
+are @code{NUM}, @code{VAR}, @code{FNCT}, and @code{exp}. Their
+declarations are augmented with information about their data type (placed
+between angle brackets).
+
+The Bison construct @code{%type} is used for declaring nonterminal symbols,
+just as @code{%token} is used for declaring token types. We have not used
+@code{%type} before because nonterminal symbols are normally declared
+implicitly by the rules that define them. But @code{exp} must be declared
+explicitly so we can specify its value type. @xref{Type Decl, ,Nonterminal Symbols}.
+
+@node Mfcalc Rules, Mfcalc Symtab, Mfcalc Decl, Multi-function Calc
+@subsection Grammar Rules for @code{mfcalc}
+
+Here are the grammar rules for the multi-function calculator.
+Most of them are copied directly from @code{calc}; three rules,
+those which mention @code{VAR} or @code{FNCT}, are new.
+
+@smallexample
+input: /* empty */
+ | input line
+;
+
+line:
+ '\n'
+ | exp '\n' @{ printf ("\t%.10g\n", $1); @}
+ | error '\n' @{ yyerrok; @}
+;
+
+exp: NUM @{ $$ = $1; @}
+ | VAR @{ $$ = $1->value.var; @}
+ | VAR '=' exp @{ $$ = $3; $1->value.var = $3; @}
+ | FNCT '(' exp ')' @{ $$ = (*($1->value.fnctptr))($3); @}
+ | exp '+' exp @{ $$ = $1 + $3; @}
+ | exp '-' exp @{ $$ = $1 - $3; @}
+ | exp '*' exp @{ $$ = $1 * $3; @}
+ | exp '/' exp @{ $$ = $1 / $3; @}
+ | '-' exp %prec NEG @{ $$ = -$2; @}
+ | exp '^' exp @{ $$ = pow ($1, $3); @}
+ | '(' exp ')' @{ $$ = $2; @}
+;
+/* End of grammar */
+%%
+@end smallexample
+
+@node Mfcalc Symtab, , Mfcalc Rules, Multi-function Calc
+@subsection The @code{mfcalc} Symbol Table
+@cindex symbol table example
+
+The multi-function calculator requires a symbol table to keep track of the
+names and meanings of variables and functions. This doesn't affect the
+grammar rules (except for the actions) or the Bison declarations, but it
+requires some additional C functions for support.
+
+The symbol table itself consists of a linked list of records. Its
+definition, which is kept in the header @file{calc.h}, is as follows. It
+provides for either functions or variables to be placed in the table.
+
+@smallexample
+@group
+/* Data type for links in the chain of symbols. */
+struct symrec
+@{
+ char *name; /* name of symbol */
+ int type; /* type of symbol: either VAR or FNCT */
+ union @{
+ double var; /* value of a VAR */
+ double (*fnctptr)(); /* value of a FNCT */
+ @} value;
+ struct symrec *next; /* link field */
+@};
+@end group
+
+@group
+typedef struct symrec symrec;
+
+/* The symbol table: a chain of `struct symrec'. */
+extern symrec *sym_table;
+
+symrec *putsym ();
+symrec *getsym ();
+@end group
+@end smallexample
+
+The new version of @code{main} includes a call to @code{init_table}, a
+function that initializes the symbol table. Here it is, and
+@code{init_table} as well:
+
+@smallexample
+@group
+#include <stdio.h>
+
+main ()
+@{
+ init_table ();
+ yyparse ();
+@}
+@end group
+
+@group
+yyerror (s) /* Called by yyparse on error */
+ char *s;
+@{
+ printf ("%s\n", s);
+@}
+
+struct init
+@{
+ char *fname;
+ double (*fnct)();
+@};
+@end group
+
+@group
+struct init arith_fncts[]
+ = @{
+ "sin", sin,
+ "cos", cos,
+ "atan", atan,
+ "ln", log,
+ "exp", exp,
+ "sqrt", sqrt,
+ 0, 0
+ @};
+
+/* The symbol table: a chain of `struct symrec'. */
+symrec *sym_table = (symrec *)0;
+@end group
+
+@group
+init_table () /* puts arithmetic functions in table. */
+@{
+ int i;
+ symrec *ptr;
+ for (i = 0; arith_fncts[i].fname != 0; i++)
+ @{
+ ptr = putsym (arith_fncts[i].fname, FNCT);
+ ptr->value.fnctptr = arith_fncts[i].fnct;
+ @}
+@}
+@end group
+@end smallexample
+
+By simply editing the initialization list and adding the necessary include
+files, you can add additional functions to the calculator.
+
+Two important functions allow look-up and installation of symbols in the
+symbol table. The function @code{putsym} is passed a name and the type
+(@code{VAR} or @code{FNCT}) of the object to be installed. The object is
+linked to the front of the list, and a pointer to the object is returned.
+The function @code{getsym} is passed the name of the symbol to look up. If
+found, a pointer to that symbol is returned; otherwise zero is returned.
+
+@smallexample
+symrec *
+putsym (sym_name,sym_type)
+ char *sym_name;
+ int sym_type;
+@{
+ symrec *ptr;
+ ptr = (symrec *) malloc (sizeof (symrec));
+ ptr->name = (char *) malloc (strlen (sym_name) + 1);
+ strcpy (ptr->name,sym_name);
+ ptr->type = sym_type;
+ ptr->value.var = 0; /* set value to 0 even if fctn. */
+ ptr->next = (struct symrec *)sym_table;
+ sym_table = ptr;
+ return ptr;
+@}
+
+symrec *
+getsym (sym_name)
+ char *sym_name;
+@{
+ symrec *ptr;
+ for (ptr = sym_table; ptr != (symrec *) 0;
+ ptr = (symrec *)ptr->next)
+ if (strcmp (ptr->name,sym_name) == 0)
+ return ptr;
+ return 0;
+@}
+@end smallexample
+
+The function @code{yylex} must now recognize variables, numeric values, and
+the single-character arithmetic operators. Strings of alphanumeric
+characters with a leading nondigit are recognized as either variables or
+functions depending on what the symbol table says about them.
+
+The string is passed to @code{getsym} for look up in the symbol table. If
+the name appears in the table, a pointer to its location and its type
+(@code{VAR} or @code{FNCT}) is returned to @code{yyparse}. If it is not
+already in the table, then it is installed as a @code{VAR} using
+@code{putsym}. Again, a pointer and its type (which must be @code{VAR}) is
+returned to @code{yyparse}.@refill
+
+No change is needed in the handling of numeric values and arithmetic
+operators in @code{yylex}.
+
+@smallexample
+@group
+#include <ctype.h>
+yylex ()
+@{
+ int c;
+
+ /* Ignore whitespace, get first nonwhite character. */
+ while ((c = getchar ()) == ' ' || c == '\t');
+
+ if (c == EOF)
+ return 0;
+@end group
+
+@group
+ /* Char starts a number => parse the number. */
+ if (c == '.' || isdigit (c))
+ @{
+ ungetc (c, stdin);
+ scanf ("%lf", &yylval.val);
+ return NUM;
+ @}
+@end group
+
+@group
+ /* Char starts an identifier => read the name. */
+ if (isalpha (c))
+ @{
+ symrec *s;
+ static char *symbuf = 0;
+ static int length = 0;
+ int i;
+@end group
+
+@group
+ /* Initially make the buffer long enough
+ for a 40-character symbol name. */
+ if (length == 0)
+ length = 40, symbuf = (char *)malloc (length + 1);
+
+ i = 0;
+ do
+@end group
+@group
+ @{
+ /* If buffer is full, make it bigger. */
+ if (i == length)
+ @{
+ length *= 2;
+ symbuf = (char *)realloc (symbuf, length + 1);
+ @}
+ /* Add this character to the buffer. */
+ symbuf[i++] = c;
+ /* Get another character. */
+ c = getchar ();
+ @}
+@end group
+@group
+ while (c != EOF && isalnum (c));
+
+ ungetc (c, stdin);
+ symbuf[i] = '\0';
+@end group
+
+@group
+ s = getsym (symbuf);
+ if (s == 0)
+ s = putsym (symbuf, VAR);
+ yylval.tptr = s;
+ return s->type;
+ @}
+
+ /* Any other character is a token by itself. */
+ return c;
+@}
+@end group
+@end smallexample
+
+This program is both powerful and flexible. You may easily add new
+functions, and it is a simple job to modify this code to install predefined
+variables such as @code{pi} or @code{e} as well.
+
+@node Exercises, , Multi-function Calc, Examples
+@section Exercises
+@cindex exercises
+
+@enumerate
+@item
+Add some new functions from @file{math.h} to the initialization list.
+
+@item
+Add another array that contains constants and their values. Then
+modify @code{init_table} to add these constants to the symbol table.
+It will be easiest to give the constants type @code{VAR}.
+
+@item
+Make the program report an error if the user refers to an
+uninitialized variable in any way except to store a value in it.
+@end enumerate
+
+@node Grammar File, Interface, Examples, Top
+@chapter Bison Grammar Files
+
+Bison takes as input a context-free grammar specification and produces a
+C-language function that recognizes correct instances of the grammar.
+
+The Bison grammar input file conventionally has a name ending in @samp{.y}.
+
+@menu
+* Grammar Outline:: Overall layout of the grammar file.
+* Symbols:: Terminal and nonterminal symbols.
+* Rules:: How to write grammar rules.
+* Recursion:: Writing recursive rules.
+* Semantics:: Semantic values and actions.
+* Declarations:: All kinds of Bison declarations are described here.
+* Multiple Parsers:: Putting more than one Bison parser in one program.
+@end menu
+
+@node Grammar Outline, Symbols, , Grammar File
+@section Outline of a Bison Grammar
+
+A Bison grammar file has four main sections, shown here with the
+appropriate delimiters:
+
+@example
+%@{
+@var{C declarations}
+%@}
+
+@var{Bison declarations}
+
+%%
+@var{Grammar rules}
+%%
+
+@var{Additional C code}
+@end example
+
+Comments enclosed in @samp{/* @dots{} */} may appear in any of the sections.
+
+@menu
+* C Declarations:: Syntax and usage of the C declarations section.
+* Bison Declarations:: Syntax and usage of the Bison declarations section.
+* Grammar Rules:: Syntax and usage of the grammar rules section.
+* C Code:: Syntax and usage of the additional C code section.
+@end menu
+
+@node C Declarations, Bison Declarations, , Grammar Outline
+@subsection The C Declarations Section
+@cindex C declarations section
+@cindex declarations, C
+
+The @var{C declarations} section contains macro definitions and
+declarations of functions and variables that are used in the actions in the
+grammar rules. These are copied to the beginning of the parser file so
+that they precede the definition of @code{yyparse}. You can use
+@samp{#include} to get the declarations from a header file. If you don't
+need any C declarations, you may omit the @samp{%@{} and @samp{%@}}
+delimiters that bracket this section.
+
+@node Bison Declarations, Grammar Rules, C Declarations, Grammar Outline
+@subsection The Bison Declarations Section
+@cindex Bison declarations (introduction)
+@cindex declarations, Bison (introduction)
+
+The @var{Bison declarations} section contains declarations that define
+terminal and nonterminal symbols, specify precedence, and so on.
+In some simple grammars you may not need any declarations.
+@xref{Declarations, ,Bison Declarations}.
+
+@node Grammar Rules, C Code, Bison Declarations, Grammar Outline
+@subsection The Grammar Rules Section
+@cindex grammar rules section
+@cindex rules section for grammar
+
+The @dfn{grammar rules} section contains one or more Bison grammar
+rules, and nothing else. @xref{Rules, ,Syntax of Grammar Rules}.
+
+There must always be at least one grammar rule, and the first
+@samp{%%} (which precedes the grammar rules) may never be omitted even
+if it is the first thing in the file.
+
+@node C Code, , Grammar Rules, Grammar Outline
+@subsection The Additional C Code Section
+@cindex additional C code section
+@cindex C code, section for additional
+
+The @var{additional C code} section is copied verbatim to the end of
+the parser file, just as the @var{C declarations} section is copied to
+the beginning. This is the most convenient place to put anything
+that you want to have in the parser file but which need not come before
+the definition of @code{yyparse}. For example, the definitions of
+@code{yylex} and @code{yyerror} often go here. @xref{Interface, ,Parser C-Language Interface}.
+
+If the last section is empty, you may omit the @samp{%%} that separates it
+from the grammar rules.
+
+The Bison parser itself contains many static variables whose names start
+with @samp{yy} and many macros whose names start with @samp{YY}. It is a
+good idea to avoid using any such names (except those documented in this
+manual) in the additional C code section of the grammar file.
+
+@node Symbols, Rules, Grammar Outline, Grammar File
+@section Symbols, Terminal and Nonterminal
+@cindex nonterminal symbol
+@cindex terminal symbol
+@cindex token type
+@cindex symbol
+
+@dfn{Symbols} in Bison grammars represent the grammatical classifications
+of the language.
+
+A @dfn{terminal symbol} (also known as a @dfn{token type}) represents a
+class of syntactically equivalent tokens. You use the symbol in grammar
+rules to mean that a token in that class is allowed. The symbol is
+represented in the Bison parser by a numeric code, and the @code{yylex}
+function returns a token type code to indicate what kind of token has been
+read. You don't need to know what the code value is; you can use the
+symbol to stand for it.
+
+A @dfn{nonterminal symbol} stands for a class of syntactically equivalent
+groupings. The symbol name is used in writing grammar rules. By convention,
+it should be all lower case.
+
+Symbol names can contain letters, digits (not at the beginning),
+underscores and periods. Periods make sense only in nonterminals.
+
+There are three ways of writing terminal symbols in the grammar:
+
+@itemize @bullet
+@item
+A @dfn{named token type} is written with an identifier, like an
+identifier in C. By convention, it should be all upper case. Each
+such name must be defined with a Bison declaration such as
+@code{%token}. @xref{Token Decl, ,Token Type Names}.
+
+@item
+@cindex character token
+@cindex literal token
+@cindex single-character literal
+A @dfn{character token type} (or @dfn{literal character token}) is
+written in the grammar using the same syntax used in C for character
+constants; for example, @code{'+'} is a character token type. A
+character token type doesn't need to be declared unless you need to
+specify its semantic value data type (@pxref{Value Type, ,Data Types of
+Semantic Values}), associativity, or precedence (@pxref{Precedence,
+,Operator Precedence}).
+
+By convention, a character token type is used only to represent a
+token that consists of that particular character. Thus, the token
+type @code{'+'} is used to represent the character @samp{+} as a
+token. Nothing enforces this convention, but if you depart from it,
+your program will confuse other readers.
+
+All the usual escape sequences used in character literals in C can be
+used in Bison as well, but you must not use the null character as a
+character literal because its ASCII code, zero, is the code @code{yylex}
+returns for end-of-input (@pxref{Calling Convention, ,Calling Convention
+for @code{yylex}}).
+
+@item
+@cindex string token
+@cindex literal string token
+@cindex multi-character literal
+A @dfn{literal string token} is written like a C string constant; for
+example, @code{"<="} is a literal string token. A literal string token
+doesn't need to be declared unless you need to specify its semantic
+value data type (@pxref{Value Type}), associativity, precedence
+(@pxref{Precedence}).
+
+You can associate the literal string token with a symbolic name as an
+alias, using the @code{%token} declaration (@pxref{Token Decl, ,Token
+Declarations}). If you don't do that, the lexical analyzer has to
+retrieve the token number for the literal string token from the
+@code{yytname} table (@pxref{Calling Convention}).
+
+@strong{WARNING}: literal string tokens do not work in Yacc.
+
+By convention, a literal string token is used only to represent a token
+that consists of that particular string. Thus, you should use the token
+type @code{"<="} to represent the string @samp{<=} as a token. Bison
+does not enforces this convention, but if you depart from it, people who
+read your program will be confused.
+
+All the escape sequences used in string literals in C can be used in
+Bison as well. A literal string token must contain two or more
+characters; for a token containing just one character, use a character
+token (see above).
+@end itemize
+
+How you choose to write a terminal symbol has no effect on its
+grammatical meaning. That depends only on where it appears in rules and
+on when the parser function returns that symbol.
+
+The value returned by @code{yylex} is always one of the terminal symbols
+(or 0 for end-of-input). Whichever way you write the token type in the
+grammar rules, you write it the same way in the definition of @code{yylex}.
+The numeric code for a character token type is simply the ASCII code for
+the character, so @code{yylex} can use the identical character constant to
+generate the requisite code. Each named token type becomes a C macro in
+the parser file, so @code{yylex} can use the name to stand for the code.
+(This is why periods don't make sense in terminal symbols.)
+@xref{Calling Convention, ,Calling Convention for @code{yylex}}.
+
+If @code{yylex} is defined in a separate file, you need to arrange for the
+token-type macro definitions to be available there. Use the @samp{-d}
+option when you run Bison, so that it will write these macro definitions
+into a separate header file @file{@var{name}.tab.h} which you can include
+in the other source files that need it. @xref{Invocation, ,Invoking Bison}.
+
+The symbol @code{error} is a terminal symbol reserved for error recovery
+(@pxref{Error Recovery}); you shouldn't use it for any other purpose.
+In particular, @code{yylex} should never return this value.
+
+@node Rules, Recursion, Symbols, Grammar File
+@section Syntax of Grammar Rules
+@cindex rule syntax
+@cindex grammar rule syntax
+@cindex syntax of grammar rules
+
+A Bison grammar rule has the following general form:
+
+@example
+@group
+@var{result}: @var{components}@dots{}
+ ;
+@end group
+@end example
+
+@noindent
+where @var{result} is the nonterminal symbol that this rule describes
+and @var{components} are various terminal and nonterminal symbols that
+are put together by this rule (@pxref{Symbols}).
+
+For example,
+
+@example
+@group
+exp: exp '+' exp
+ ;
+@end group
+@end example
+
+@noindent
+says that two groupings of type @code{exp}, with a @samp{+} token in between,
+can be combined into a larger grouping of type @code{exp}.
+
+Whitespace in rules is significant only to separate symbols. You can add
+extra whitespace as you wish.
+
+Scattered among the components can be @var{actions} that determine
+the semantics of the rule. An action looks like this:
+
+@example
+@{@var{C statements}@}
+@end example
+
+@noindent
+Usually there is only one action and it follows the components.
+@xref{Actions}.
+
+@findex |
+Multiple rules for the same @var{result} can be written separately or can
+be joined with the vertical-bar character @samp{|} as follows:
+
+@ifinfo
+@example
+@var{result}: @var{rule1-components}@dots{}
+ | @var{rule2-components}@dots{}
+ @dots{}
+ ;
+@end example
+@end ifinfo
+@iftex
+@example
+@group
+@var{result}: @var{rule1-components}@dots{}
+ | @var{rule2-components}@dots{}
+ @dots{}
+ ;
+@end group
+@end example
+@end iftex
+
+@noindent
+They are still considered distinct rules even when joined in this way.
+
+If @var{components} in a rule is empty, it means that @var{result} can
+match the empty string. For example, here is how to define a
+comma-separated sequence of zero or more @code{exp} groupings:
+
+@example
+@group
+expseq: /* empty */
+ | expseq1
+ ;
+@end group
+
+@group
+expseq1: exp
+ | expseq1 ',' exp
+ ;
+@end group
+@end example
+
+@noindent
+It is customary to write a comment @samp{/* empty */} in each rule
+with no components.
+
+@node Recursion, Semantics, Rules, Grammar File
+@section Recursive Rules
+@cindex recursive rule
+
+A rule is called @dfn{recursive} when its @var{result} nonterminal appears
+also on its right hand side. Nearly all Bison grammars need to use
+recursion, because that is the only way to define a sequence of any number
+of somethings. Consider this recursive definition of a comma-separated
+sequence of one or more expressions:
+
+@example
+@group
+expseq1: exp
+ | expseq1 ',' exp
+ ;
+@end group
+@end example
+
+@cindex left recursion
+@cindex right recursion
+@noindent
+Since the recursive use of @code{expseq1} is the leftmost symbol in the
+right hand side, we call this @dfn{left recursion}. By contrast, here
+the same construct is defined using @dfn{right recursion}:
+
+@example
+@group
+expseq1: exp
+ | exp ',' expseq1
+ ;
+@end group
+@end example
+
+@noindent
+Any kind of sequence can be defined using either left recursion or
+right recursion, but you should always use left recursion, because it
+can parse a sequence of any number of elements with bounded stack
+space. Right recursion uses up space on the Bison stack in proportion
+to the number of elements in the sequence, because all the elements
+must be shifted onto the stack before the rule can be applied even
+once. @xref{Algorithm, ,The Bison Parser Algorithm }, for
+further explanation of this.
+
+@cindex mutual recursion
+@dfn{Indirect} or @dfn{mutual} recursion occurs when the result of the
+rule does not appear directly on its right hand side, but does appear
+in rules for other nonterminals which do appear on its right hand
+side.
+
+For example:
+
+@example
+@group
+expr: primary
+ | primary '+' primary
+ ;
+@end group
+
+@group
+primary: constant
+ | '(' expr ')'
+ ;
+@end group
+@end example
+
+@noindent
+defines two mutually-recursive nonterminals, since each refers to the
+other.
+
+@node Semantics, Declarations, Recursion, Grammar File
+@section Defining Language Semantics
+@cindex defining language semantics
+@cindex language semantics, defining
+
+The grammar rules for a language determine only the syntax. The semantics
+are determined by the semantic values associated with various tokens and
+groupings, and by the actions taken when various groupings are recognized.
+
+For example, the calculator calculates properly because the value
+associated with each expression is the proper number; it adds properly
+because the action for the grouping @w{@samp{@var{x} + @var{y}}} is to add
+the numbers associated with @var{x} and @var{y}.
+
+@menu
+* Value Type:: Specifying one data type for all semantic values.
+* Multiple Types:: Specifying several alternative data types.
+* Actions:: An action is the semantic definition of a grammar rule.
+* Action Types:: Specifying data types for actions to operate on.
+* Mid-Rule Actions:: Most actions go at the end of a rule.
+ This says when, why and how to use the exceptional
+ action in the middle of a rule.
+@end menu
+
+@node Value Type, Multiple Types, , Semantics
+@subsection Data Types of Semantic Values
+@cindex semantic value type
+@cindex value type, semantic
+@cindex data types of semantic values
+@cindex default data type
+
+In a simple program it may be sufficient to use the same data type for
+the semantic values of all language constructs. This was true in the
+RPN and infix calculator examples (@pxref{RPN Calc, ,Reverse Polish Notation Calculator}).
+
+Bison's default is to use type @code{int} for all semantic values. To
+specify some other type, define @code{YYSTYPE} as a macro, like this:
+
+@example
+#define YYSTYPE double
+@end example
+
+@noindent
+This macro definition must go in the C declarations section of the grammar
+file (@pxref{Grammar Outline, ,Outline of a Bison Grammar}).
+
+@node Multiple Types, Actions, Value Type, Semantics
+@subsection More Than One Value Type
+
+In most programs, you will need different data types for different kinds
+of tokens and groupings. For example, a numeric constant may need type
+@code{int} or @code{long}, while a string constant needs type @code{char *},
+and an identifier might need a pointer to an entry in the symbol table.
+
+To use more than one data type for semantic values in one parser, Bison
+requires you to do two things:
+
+@itemize @bullet
+@item
+Specify the entire collection of possible data types, with the
+@code{%union} Bison declaration (@pxref{Union Decl, ,The Collection of Value Types}).
+
+@item
+Choose one of those types for each symbol (terminal or nonterminal)
+for which semantic values are used. This is done for tokens with the
+@code{%token} Bison declaration (@pxref{Token Decl, ,Token Type Names}) and for groupings
+with the @code{%type} Bison declaration (@pxref{Type Decl, ,Nonterminal Symbols}).
+@end itemize
+
+@node Actions, Action Types, Multiple Types, Semantics
+@subsection Actions
+@cindex action
+@vindex $$
+@vindex $@var{n}
+
+An action accompanies a syntactic rule and contains C code to be executed
+each time an instance of that rule is recognized. The task of most actions
+is to compute a semantic value for the grouping built by the rule from the
+semantic values associated with tokens or smaller groupings.
+
+An action consists of C statements surrounded by braces, much like a
+compound statement in C. It can be placed at any position in the rule; it
+is executed at that position. Most rules have just one action at the end
+of the rule, following all the components. Actions in the middle of a rule
+are tricky and used only for special purposes (@pxref{Mid-Rule Actions, ,Actions in Mid-Rule}).
+
+The C code in an action can refer to the semantic values of the components
+matched by the rule with the construct @code{$@var{n}}, which stands for
+the value of the @var{n}th component. The semantic value for the grouping
+being constructed is @code{$$}. (Bison translates both of these constructs
+into array element references when it copies the actions into the parser
+file.)
+
+Here is a typical example:
+
+@example
+@group
+exp: @dots{}
+ | exp '+' exp
+ @{ $$ = $1 + $3; @}
+@end group
+@end example
+
+@noindent
+This rule constructs an @code{exp} from two smaller @code{exp} groupings
+connected by a plus-sign token. In the action, @code{$1} and @code{$3}
+refer to the semantic values of the two component @code{exp} groupings,
+which are the first and third symbols on the right hand side of the rule.
+The sum is stored into @code{$$} so that it becomes the semantic value of
+the addition-expression just recognized by the rule. If there were a
+useful semantic value associated with the @samp{+} token, it could be
+referred to as @code{$2}.@refill
+
+@cindex default action
+If you don't specify an action for a rule, Bison supplies a default:
+@w{@code{$$ = $1}.} Thus, the value of the first symbol in the rule becomes
+the value of the whole rule. Of course, the default rule is valid only
+if the two data types match. There is no meaningful default action for
+an empty rule; every empty rule must have an explicit action unless the
+rule's value does not matter.
+
+@code{$@var{n}} with @var{n} zero or negative is allowed for reference
+to tokens and groupings on the stack @emph{before} those that match the
+current rule. This is a very risky practice, and to use it reliably
+you must be certain of the context in which the rule is applied. Here
+is a case in which you can use this reliably:
+
+@example
+@group
+foo: expr bar '+' expr @{ @dots{} @}
+ | expr bar '-' expr @{ @dots{} @}
+ ;
+@end group
+
+@group
+bar: /* empty */
+ @{ previous_expr = $0; @}
+ ;
+@end group
+@end example
+
+As long as @code{bar} is used only in the fashion shown here, @code{$0}
+always refers to the @code{expr} which precedes @code{bar} in the
+definition of @code{foo}.
+
+@node Action Types, Mid-Rule Actions, Actions, Semantics
+@subsection Data Types of Values in Actions
+@cindex action data types
+@cindex data types in actions
+
+If you have chosen a single data type for semantic values, the @code{$$}
+and @code{$@var{n}} constructs always have that data type.
+
+If you have used @code{%union} to specify a variety of data types, then you
+must declare a choice among these types for each terminal or nonterminal
+symbol that can have a semantic value. Then each time you use @code{$$} or
+@code{$@var{n}}, its data type is determined by which symbol it refers to
+in the rule. In this example,@refill
+
+@example
+@group
+exp: @dots{}
+ | exp '+' exp
+ @{ $$ = $1 + $3; @}
+@end group
+@end example
+
+@noindent
+@code{$1} and @code{$3} refer to instances of @code{exp}, so they all
+have the data type declared for the nonterminal symbol @code{exp}. If
+@code{$2} were used, it would have the data type declared for the
+terminal symbol @code{'+'}, whatever that might be.@refill
+
+Alternatively, you can specify the data type when you refer to the value,
+by inserting @samp{<@var{type}>} after the @samp{$} at the beginning of the
+reference. For example, if you have defined types as shown here:
+
+@example
+@group
+%union @{
+ int itype;
+ double dtype;
+@}
+@end group
+@end example
+
+@noindent
+then you can write @code{$<itype>1} to refer to the first subunit of the
+rule as an integer, or @code{$<dtype>1} to refer to it as a double.
+
+@node Mid-Rule Actions, , Action Types, Semantics
+@subsection Actions in Mid-Rule
+@cindex actions in mid-rule
+@cindex mid-rule actions
+
+Occasionally it is useful to put an action in the middle of a rule.
+These actions are written just like usual end-of-rule actions, but they
+are executed before the parser even recognizes the following components.
+
+A mid-rule action may refer to the components preceding it using
+@code{$@var{n}}, but it may not refer to subsequent components because
+it is run before they are parsed.
+
+The mid-rule action itself counts as one of the components of the rule.
+This makes a difference when there is another action later in the same rule
+(and usually there is another at the end): you have to count the actions
+along with the symbols when working out which number @var{n} to use in
+@code{$@var{n}}.
+
+The mid-rule action can also have a semantic value. The action can set
+its value with an assignment to @code{$$}, and actions later in the rule
+can refer to the value using @code{$@var{n}}. Since there is no symbol
+to name the action, there is no way to declare a data type for the value
+in advance, so you must use the @samp{$<@dots{}>} construct to specify a
+data type each time you refer to this value.
+
+There is no way to set the value of the entire rule with a mid-rule
+action, because assignments to @code{$$} do not have that effect. The
+only way to set the value for the entire rule is with an ordinary action
+at the end of the rule.
+
+Here is an example from a hypothetical compiler, handling a @code{let}
+statement that looks like @samp{let (@var{variable}) @var{statement}} and
+serves to create a variable named @var{variable} temporarily for the
+duration of @var{statement}. To parse this construct, we must put
+@var{variable} into the symbol table while @var{statement} is parsed, then
+remove it afterward. Here is how it is done:
+
+@example
+@group
+stmt: LET '(' var ')'
+ @{ $<context>$ = push_context ();
+ declare_variable ($3); @}
+ stmt @{ $$ = $6;
+ pop_context ($<context>5); @}
+@end group
+@end example
+
+@noindent
+As soon as @samp{let (@var{variable})} has been recognized, the first
+action is run. It saves a copy of the current semantic context (the
+list of accessible variables) as its semantic value, using alternative
+@code{context} in the data-type union. Then it calls
+@code{declare_variable} to add the new variable to that list. Once the
+first action is finished, the embedded statement @code{stmt} can be
+parsed. Note that the mid-rule action is component number 5, so the
+@samp{stmt} is component number 6.
+
+After the embedded statement is parsed, its semantic value becomes the
+value of the entire @code{let}-statement. Then the semantic value from the
+earlier action is used to restore the prior list of variables. This
+removes the temporary @code{let}-variable from the list so that it won't
+appear to exist while the rest of the program is parsed.
+
+Taking action before a rule is completely recognized often leads to
+conflicts since the parser must commit to a parse in order to execute the
+action. For example, the following two rules, without mid-rule actions,
+can coexist in a working parser because the parser can shift the open-brace
+token and look at what follows before deciding whether there is a
+declaration or not:
+
+@example
+@group
+compound: '@{' declarations statements '@}'
+ | '@{' statements '@}'
+ ;
+@end group
+@end example
+
+@noindent
+But when we add a mid-rule action as follows, the rules become nonfunctional:
+
+@example
+@group
+compound: @{ prepare_for_local_variables (); @}
+ '@{' declarations statements '@}'
+@end group
+@group
+ | '@{' statements '@}'
+ ;
+@end group
+@end example
+
+@noindent
+Now the parser is forced to decide whether to run the mid-rule action
+when it has read no farther than the open-brace. In other words, it
+must commit to using one rule or the other, without sufficient
+information to do it correctly. (The open-brace token is what is called
+the @dfn{look-ahead} token at this time, since the parser is still
+deciding what to do about it. @xref{Look-Ahead, ,Look-Ahead Tokens}.)
+
+You might think that you could correct the problem by putting identical
+actions into the two rules, like this:
+
+@example
+@group
+compound: @{ prepare_for_local_variables (); @}
+ '@{' declarations statements '@}'
+ | @{ prepare_for_local_variables (); @}
+ '@{' statements '@}'
+ ;
+@end group
+@end example
+
+@noindent
+But this does not help, because Bison does not realize that the two actions
+are identical. (Bison never tries to understand the C code in an action.)
+
+If the grammar is such that a declaration can be distinguished from a
+statement by the first token (which is true in C), then one solution which
+does work is to put the action after the open-brace, like this:
+
+@example
+@group
+compound: '@{' @{ prepare_for_local_variables (); @}
+ declarations statements '@}'
+ | '@{' statements '@}'
+ ;
+@end group
+@end example
+
+@noindent
+Now the first token of the following declaration or statement,
+which would in any case tell Bison which rule to use, can still do so.
+
+Another solution is to bury the action inside a nonterminal symbol which
+serves as a subroutine:
+
+@example
+@group
+subroutine: /* empty */
+ @{ prepare_for_local_variables (); @}
+ ;
+
+@end group
+
+@group
+compound: subroutine
+ '@{' declarations statements '@}'
+ | subroutine
+ '@{' statements '@}'
+ ;
+@end group
+@end example
+
+@noindent
+Now Bison can execute the action in the rule for @code{subroutine} without
+deciding which rule for @code{compound} it will eventually use. Note that
+the action is now at the end of its rule. Any mid-rule action can be
+converted to an end-of-rule action in this way, and this is what Bison
+actually does to implement mid-rule actions.
+
+@node Declarations, Multiple Parsers, Semantics, Grammar File
+@section Bison Declarations
+@cindex declarations, Bison
+@cindex Bison declarations
+
+The @dfn{Bison declarations} section of a Bison grammar defines the symbols
+used in formulating the grammar and the data types of semantic values.
+@xref{Symbols}.
+
+All token type names (but not single-character literal tokens such as
+@code{'+'} and @code{'*'}) must be declared. Nonterminal symbols must be
+declared if you need to specify which data type to use for the semantic
+value (@pxref{Multiple Types, ,More Than One Value Type}).
+
+The first rule in the file also specifies the start symbol, by default.
+If you want some other symbol to be the start symbol, you must declare
+it explicitly (@pxref{Language and Grammar, ,Languages and Context-Free Grammars}).
+
+@menu
+* Token Decl:: Declaring terminal symbols.
+* Precedence Decl:: Declaring terminals with precedence and associativity.
+* Union Decl:: Declaring the set of all semantic value types.
+* Type Decl:: Declaring the choice of type for a nonterminal symbol.
+* Expect Decl:: Suppressing warnings about shift/reduce conflicts.
+* Start Decl:: Specifying the start symbol.
+* Pure Decl:: Requesting a reentrant parser.
+* Decl Summary:: Table of all Bison declarations.
+@end menu
+
+@node Token Decl, Precedence Decl, , Declarations
+@subsection Token Type Names
+@cindex declaring token type names
+@cindex token type names, declaring
+@cindex declaring literal string tokens
+@findex %token
+
+The basic way to declare a token type name (terminal symbol) is as follows:
+
+@example
+%token @var{name}
+@end example
+
+Bison will convert this into a @code{#define} directive in
+the parser, so that the function @code{yylex} (if it is in this file)
+can use the name @var{name} to stand for this token type's code.
+
+Alternatively, you can use @code{%left}, @code{%right}, or @code{%nonassoc}
+instead of @code{%token}, if you wish to specify precedence.
+@xref{Precedence Decl, ,Operator Precedence}.
+
+You can explicitly specify the numeric code for a token type by appending
+an integer value in the field immediately following the token name:
+
+@example
+%token NUM 300
+@end example
+
+@noindent
+It is generally best, however, to let Bison choose the numeric codes for
+all token types. Bison will automatically select codes that don't conflict
+with each other or with ASCII characters.
+
+In the event that the stack type is a union, you must augment the
+@code{%token} or other token declaration to include the data type
+alternative delimited by angle-brackets (@pxref{Multiple Types, ,More Than One Value Type}).
+
+For example:
+
+@example
+@group
+%union @{ /* define stack type */
+ double val;
+ symrec *tptr;
+@}
+%token <val> NUM /* define token NUM and its type */
+@end group
+@end example
+
+You can associate a literal string token with a token type name by
+writing the literal string at the end of a @code{%token}
+declaration which declares the name. For example:
+
+@example
+%token arrow "=>"
+@end example
+
+@noindent
+For example, a grammar for the C language might specify these names with
+equivalent literal string tokens:
+
+@example
+%token <operator> OR "||"
+%token <operator> LE 134 "<="
+%left OR "<="
+@end example
+
+@noindent
+Once you equate the literal string and the token name, you can use them
+interchangeably in further declarations or the grammar rules. The
+@code{yylex} function can use the token name or the literal string to
+obtain the token type code number (@pxref{Calling Convention}).
+
+@node Precedence Decl, Union Decl, Token Decl, Declarations
+@subsection Operator Precedence
+@cindex precedence declarations
+@cindex declaring operator precedence
+@cindex operator precedence, declaring
+
+Use the @code{%left}, @code{%right} or @code{%nonassoc} declaration to
+declare a token and specify its precedence and associativity, all at
+once. These are called @dfn{precedence declarations}.
+@xref{Precedence, ,Operator Precedence}, for general information on operator precedence.
+
+The syntax of a precedence declaration is the same as that of
+@code{%token}: either
+
+@example
+%left @var{symbols}@dots{}
+@end example
+
+@noindent
+or
+
+@example
+%left <@var{type}> @var{symbols}@dots{}
+@end example
+
+And indeed any of these declarations serves the purposes of @code{%token}.
+But in addition, they specify the associativity and relative precedence for
+all the @var{symbols}:
+
+@itemize @bullet
+@item
+The associativity of an operator @var{op} determines how repeated uses
+of the operator nest: whether @samp{@var{x} @var{op} @var{y} @var{op}
+@var{z}} is parsed by grouping @var{x} with @var{y} first or by
+grouping @var{y} with @var{z} first. @code{%left} specifies
+left-associativity (grouping @var{x} with @var{y} first) and
+@code{%right} specifies right-associativity (grouping @var{y} with
+@var{z} first). @code{%nonassoc} specifies no associativity, which
+means that @samp{@var{x} @var{op} @var{y} @var{op} @var{z}} is
+considered a syntax error.
+
+@item
+The precedence of an operator determines how it nests with other operators.
+All the tokens declared in a single precedence declaration have equal
+precedence and nest together according to their associativity.
+When two tokens declared in different precedence declarations associate,
+the one declared later has the higher precedence and is grouped first.
+@end itemize
+
+@node Union Decl, Type Decl, Precedence Decl, Declarations
+@subsection The Collection of Value Types
+@cindex declaring value types
+@cindex value types, declaring
+@findex %union
+
+The @code{%union} declaration specifies the entire collection of possible
+data types for semantic values. The keyword @code{%union} is followed by a
+pair of braces containing the same thing that goes inside a @code{union} in
+C.
+
+For example:
+
+@example
+@group
+%union @{
+ double val;
+ symrec *tptr;
+@}
+@end group
+@end example
+
+@noindent
+This says that the two alternative types are @code{double} and @code{symrec
+*}. They are given names @code{val} and @code{tptr}; these names are used
+in the @code{%token} and @code{%type} declarations to pick one of the types
+for a terminal or nonterminal symbol (@pxref{Type Decl, ,Nonterminal Symbols}).
+
+Note that, unlike making a @code{union} declaration in C, you do not write
+a semicolon after the closing brace.
+
+@node Type Decl, Expect Decl, Union Decl, Declarations
+@subsection Nonterminal Symbols
+@cindex declaring value types, nonterminals
+@cindex value types, nonterminals, declaring
+@findex %type
+
+@noindent
+When you use @code{%union} to specify multiple value types, you must
+declare the value type of each nonterminal symbol for which values are
+used. This is done with a @code{%type} declaration, like this:
+
+@example
+%type <@var{type}> @var{nonterminal}@dots{}
+@end example
+
+@noindent
+Here @var{nonterminal} is the name of a nonterminal symbol, and @var{type}
+is the name given in the @code{%union} to the alternative that you want
+(@pxref{Union Decl, ,The Collection of Value Types}). You can give any number of nonterminal symbols in
+the same @code{%type} declaration, if they have the same value type. Use
+spaces to separate the symbol names.
+
+You can also declare the value type of a terminal symbol. To do this,
+use the same @code{<@var{type}>} construction in a declaration for the
+terminal symbol. All kinds of token declarations allow
+@code{<@var{type}>}.
+
+@node Expect Decl, Start Decl, Type Decl, Declarations
+@subsection Suppressing Conflict Warnings
+@cindex suppressing conflict warnings
+@cindex preventing warnings about conflicts
+@cindex warnings, preventing
+@cindex conflicts, suppressing warnings of
+@findex %expect
+
+Bison normally warns if there are any conflicts in the grammar
+(@pxref{Shift/Reduce, ,Shift/Reduce Conflicts}), but most real grammars have harmless shift/reduce
+conflicts which are resolved in a predictable way and would be difficult to
+eliminate. It is desirable to suppress the warning about these conflicts
+unless the number of conflicts changes. You can do this with the
+@code{%expect} declaration.
+
+The declaration looks like this:
+
+@example
+%expect @var{n}
+@end example
+
+Here @var{n} is a decimal integer. The declaration says there should be no
+warning if there are @var{n} shift/reduce conflicts and no reduce/reduce
+conflicts. The usual warning is given if there are either more or fewer
+conflicts, or if there are any reduce/reduce conflicts.
+
+In general, using @code{%expect} involves these steps:
+
+@itemize @bullet
+@item
+Compile your grammar without @code{%expect}. Use the @samp{-v} option
+to get a verbose list of where the conflicts occur. Bison will also
+print the number of conflicts.
+
+@item
+Check each of the conflicts to make sure that Bison's default
+resolution is what you really want. If not, rewrite the grammar and
+go back to the beginning.
+
+@item
+Add an @code{%expect} declaration, copying the number @var{n} from the
+number which Bison printed.
+@end itemize
+
+Now Bison will stop annoying you about the conflicts you have checked, but
+it will warn you again if changes in the grammar result in additional
+conflicts.
+
+@node Start Decl, Pure Decl, Expect Decl, Declarations
+@subsection The Start-Symbol
+@cindex declaring the start symbol
+@cindex start symbol, declaring
+@cindex default start symbol
+@findex %start
+
+Bison assumes by default that the start symbol for the grammar is the first
+nonterminal specified in the grammar specification section. The programmer
+may override this restriction with the @code{%start} declaration as follows:
+
+@example
+%start @var{symbol}
+@end example
+
+@node Pure Decl, Decl Summary, Start Decl, Declarations
+@subsection A Pure (Reentrant) Parser
+@cindex reentrant parser
+@cindex pure parser
+@findex %pure_parser
+
+A @dfn{reentrant} program is one which does not alter in the course of
+execution; in other words, it consists entirely of @dfn{pure} (read-only)
+code. Reentrancy is important whenever asynchronous execution is possible;
+for example, a nonreentrant program may not be safe to call from a signal
+handler. In systems with multiple threads of control, a nonreentrant
+program must be called only within interlocks.
+
+Normally, Bison generates a parser which is not reentrant. This is
+suitable for most uses, and it permits compatibility with YACC. (The
+standard YACC interfaces are inherently nonreentrant, because they use
+statically allocated variables for communication with @code{yylex},
+including @code{yylval} and @code{yylloc}.)
+
+Alternatively, you can generate a pure, reentrant parser. The Bison
+declaration @code{%pure_parser} says that you want the parser to be
+reentrant. It looks like this:
+
+@example
+%pure_parser
+@end example
+
+The result is that the communication variables @code{yylval} and
+@code{yylloc} become local variables in @code{yyparse}, and a different
+calling convention is used for the lexical analyzer function
+@code{yylex}. @xref{Pure Calling, ,Calling Conventions for Pure
+Parsers}, for the details of this. The variable @code{yynerrs} also
+becomes local in @code{yyparse} (@pxref{Error Reporting, ,The Error
+Reporting Function @code{yyerror}}). The convention for calling
+@code{yyparse} itself is unchanged.
+
+Whether the parser is pure has nothing to do with the grammar rules.
+You can generate either a pure parser or a nonreentrant parser from any
+valid grammar.
+
+@node Decl Summary, , Pure Decl, Declarations
+@subsection Bison Declaration Summary
+@cindex Bison declaration summary
+@cindex declaration summary
+@cindex summary, Bison declaration
+
+Here is a summary of all Bison declarations:
+
+@table @code
+@item %union
+Declare the collection of data types that semantic values may have
+(@pxref{Union Decl, ,The Collection of Value Types}).
+
+@item %token
+Declare a terminal symbol (token type name) with no precedence
+or associativity specified (@pxref{Token Decl, ,Token Type Names}).
+
+@item %right
+Declare a terminal symbol (token type name) that is right-associative
+(@pxref{Precedence Decl, ,Operator Precedence}).
+
+@item %left
+Declare a terminal symbol (token type name) that is left-associative
+(@pxref{Precedence Decl, ,Operator Precedence}).
+
+@item %nonassoc
+Declare a terminal symbol (token type name) that is nonassociative
+(using it in a way that would be associative is a syntax error)
+(@pxref{Precedence Decl, ,Operator Precedence}).
+
+@item %type
+Declare the type of semantic values for a nonterminal symbol
+(@pxref{Type Decl, ,Nonterminal Symbols}).
+
+@item %start
+Specify the grammar's start symbol (@pxref{Start Decl, ,The Start-Symbol}).
+
+@item %expect
+Declare the expected number of shift-reduce conflicts
+(@pxref{Expect Decl, ,Suppressing Conflict Warnings}).
+
+@item %pure_parser
+Request a pure (reentrant) parser program (@pxref{Pure Decl, ,A Pure (Reentrant) Parser}).
+
+@item %no_lines
+Don't generate any @code{#line} preprocessor commands in the parser
+file. Ordinarily Bison writes these commands in the parser file so that
+the C compiler and debuggers will associate errors and object code with
+your source file (the grammar file). This directive causes them to
+associate errors with the parser file, treating it an independent source
+file in its own right.
+
+@item %raw
+The output file @file{@var{name}.h} normally defines the tokens with
+Yacc-compatible token numbers. If this option is specified, the
+internal Bison numbers are used instead. (Yacc-compatible numbers start
+at 257 except for single character tokens; Bison assigns token numbers
+sequentially for all tokens starting at 3.)
+
+@item %token_table
+Generate an array of token names in the parser file. The name of the
+array is @code{yytname}; @code{yytname[@var{i}]} is the name of the
+token whose internal Bison token code number is @var{i}. The first three
+elements of @code{yytname} are always @code{"$"}, @code{"error"}, and
+@code{"$illegal"}; after these come the symbols defined in the grammar
+file.
+
+For single-character literal tokens and literal string tokens, the name
+in the table includes the single-quote or double-quote characters: for
+example, @code{"'+'"} is a single-character literal and @code{"\"<=\""}
+is a literal string token. All the characters of the literal string
+token appear verbatim in the string found in the table; even
+double-quote characters are not escaped. For example, if the token
+consists of three characters @samp{*"*}, its string in @code{yytname}
+contains @samp{"*"*"}. (In C, that would be written as
+@code{"\"*\"*\""}).
+
+When you specify @code{%token_table}, Bison also generates macro
+definitions for macros @code{YYNTOKENS}, @code{YYNNTS}, and
+@code{YYNRULES}, and @code{YYNSTATES}:
+
+@table @code
+@item YYNTOKENS
+The highest token number, plus one.
+@item YYNNTS
+The number of non-terminal symbols.
+@item YYNRULES
+The number of grammar rules,
+@item YYNSTATES
+The number of parser states (@pxref{Parser States}).
+@end table
+@end table
+
+@node Multiple Parsers,, Declarations, Grammar File
+@section Multiple Parsers in the Same Program
+
+Most programs that use Bison parse only one language and therefore contain
+only one Bison parser. But what if you want to parse more than one
+language with the same program? Then you need to avoid a name conflict
+between different definitions of @code{yyparse}, @code{yylval}, and so on.
+
+The easy way to do this is to use the option @samp{-p @var{prefix}}
+(@pxref{Invocation, ,Invoking Bison}). This renames the interface functions and
+variables of the Bison parser to start with @var{prefix} instead of
+@samp{yy}. You can use this to give each parser distinct names that do
+not conflict.
+
+The precise list of symbols renamed is @code{yyparse}, @code{yylex},
+@code{yyerror}, @code{yynerrs}, @code{yylval}, @code{yychar} and
+@code{yydebug}. For example, if you use @samp{-p c}, the names become
+@code{cparse}, @code{clex}, and so on.
+
+@strong{All the other variables and macros associated with Bison are not
+renamed.} These others are not global; there is no conflict if the same
+name is used in different parsers. For example, @code{YYSTYPE} is not
+renamed, but defining this in different ways in different parsers causes
+no trouble (@pxref{Value Type, ,Data Types of Semantic Values}).
+
+The @samp{-p} option works by adding macro definitions to the beginning
+of the parser source file, defining @code{yyparse} as
+@code{@var{prefix}parse}, and so on. This effectively substitutes one
+name for the other in the entire parser file.
+
+@node Interface, Algorithm, Grammar File, Top
+@chapter Parser C-Language Interface
+@cindex C-language interface
+@cindex interface
+
+The Bison parser is actually a C function named @code{yyparse}. Here we
+describe the interface conventions of @code{yyparse} and the other
+functions that it needs to use.
+
+Keep in mind that the parser uses many C identifiers starting with
+@samp{yy} and @samp{YY} for internal purposes. If you use such an
+identifier (aside from those in this manual) in an action or in additional
+C code in the grammar file, you are likely to run into trouble.
+
+@menu
+* Parser Function:: How to call @code{yyparse} and what it returns.
+* Lexical:: You must supply a function @code{yylex}
+ which reads tokens.
+* Error Reporting:: You must supply a function @code{yyerror}.
+* Action Features:: Special features for use in actions.
+@end menu
+
+@node Parser Function, Lexical, , Interface
+@section The Parser Function @code{yyparse}
+@findex yyparse
+
+You call the function @code{yyparse} to cause parsing to occur. This
+function reads tokens, executes actions, and ultimately returns when it
+encounters end-of-input or an unrecoverable syntax error. You can also
+write an action which directs @code{yyparse} to return immediately without
+reading further.
+
+The value returned by @code{yyparse} is 0 if parsing was successful (return
+is due to end-of-input).
+
+The value is 1 if parsing failed (return is due to a syntax error).
+
+In an action, you can cause immediate return from @code{yyparse} by using
+these macros:
+
+@table @code
+@item YYACCEPT
+@findex YYACCEPT
+Return immediately with value 0 (to report success).
+
+@item YYABORT
+@findex YYABORT
+Return immediately with value 1 (to report failure).
+@end table
+
+@node Lexical, Error Reporting, Parser Function, Interface
+@section The Lexical Analyzer Function @code{yylex}
+@findex yylex
+@cindex lexical analyzer
+
+The @dfn{lexical analyzer} function, @code{yylex}, recognizes tokens from
+the input stream and returns them to the parser. Bison does not create
+this function automatically; you must write it so that @code{yyparse} can
+call it. The function is sometimes referred to as a lexical scanner.
+
+In simple programs, @code{yylex} is often defined at the end of the Bison
+grammar file. If @code{yylex} is defined in a separate source file, you
+need to arrange for the token-type macro definitions to be available there.
+To do this, use the @samp{-d} option when you run Bison, so that it will
+write these macro definitions into a separate header file
+@file{@var{name}.tab.h} which you can include in the other source files
+that need it. @xref{Invocation, ,Invoking Bison}.@refill
+
+@menu
+* Calling Convention:: How @code{yyparse} calls @code{yylex}.
+* Token Values:: How @code{yylex} must return the semantic value
+ of the token it has read.
+* Token Positions:: How @code{yylex} must return the text position
+ (line number, etc.) of the token, if the
+ actions want that.
+* Pure Calling:: How the calling convention differs
+ in a pure parser (@pxref{Pure Decl, ,A Pure (Reentrant) Parser}).
+@end menu
+
+@node Calling Convention, Token Values, , Lexical
+@subsection Calling Convention for @code{yylex}
+
+The value that @code{yylex} returns must be the numeric code for the type
+of token it has just found, or 0 for end-of-input.
+
+When a token is referred to in the grammar rules by a name, that name
+in the parser file becomes a C macro whose definition is the proper
+numeric code for that token type. So @code{yylex} can use the name
+to indicate that type. @xref{Symbols}.
+
+When a token is referred to in the grammar rules by a character literal,
+the numeric code for that character is also the code for the token type.
+So @code{yylex} can simply return that character code. The null character
+must not be used this way, because its code is zero and that is what
+signifies end-of-input.
+
+Here is an example showing these things:
+
+@example
+yylex ()
+@{
+ @dots{}
+ if (c == EOF) /* Detect end of file. */
+ return 0;
+ @dots{}
+ if (c == '+' || c == '-')
+ return c; /* Assume token type for `+' is '+'. */
+ @dots{}
+ return INT; /* Return the type of the token. */
+ @dots{}
+@}
+@end example
+
+@noindent
+This interface has been designed so that the output from the @code{lex}
+utility can be used without change as the definition of @code{yylex}.
+
+If the grammar uses literal string tokens, there are two ways that
+@code{yylex} can determine the token type codes for them:
+
+@itemize @bullet
+@item
+If the grammar defines symbolic token names as aliases for the
+literal string tokens, @code{yylex} can use these symbolic names like
+all others. In this case, the use of the literal string tokens in
+the grammar file has no effect on @code{yylex}.
+
+@item
+@code{yylex} can find the multi-character token in the @code{yytname}
+table. The index of the token in the table is the token type's code.
+The name of a multi-character token is recorded in @code{yytname} with a
+double-quote, the token's characters, and another double-quote. The
+token's characters are not escaped in any way; they appear verbatim in
+the contents of the string in the table.
+
+Here's code for looking up a token in @code{yytname}, assuming that the
+characters of the token are stored in @code{token_buffer}.
+
+@smallexample
+for (i = 0; i < YYNTOKENS; i++)
+ @{
+ if (yytname[i] != 0
+ && yytname[i][0] == '"'
+ && strncmp (yytname[i] + 1, token_buffer,
+ strlen (token_buffer))
+ && yytname[i][strlen (token_buffer) + 1] == '"'
+ && yytname[i][strlen (token_buffer) + 2] == 0)
+ break;
+ @}
+@end smallexample
+
+The @code{yytname} table is generated only if you use the
+@code{%token_table} declaration. @xref{Decl Summary}.
+@end itemize
+
+@node Token Values, Token Positions, Calling Convention, Lexical
+@subsection Semantic Values of Tokens
+
+@vindex yylval
+In an ordinary (nonreentrant) parser, the semantic value of the token must
+be stored into the global variable @code{yylval}. When you are using
+just one data type for semantic values, @code{yylval} has that type.
+Thus, if the type is @code{int} (the default), you might write this in
+@code{yylex}:
+
+@example
+@group
+ @dots{}
+ yylval = value; /* Put value onto Bison stack. */
+ return INT; /* Return the type of the token. */
+ @dots{}
+@end group
+@end example
+
+When you are using multiple data types, @code{yylval}'s type is a union
+made from the @code{%union} declaration (@pxref{Union Decl, ,The Collection of Value Types}). So when
+you store a token's value, you must use the proper member of the union.
+If the @code{%union} declaration looks like this:
+
+@example
+@group
+%union @{
+ int intval;
+ double val;
+ symrec *tptr;
+@}
+@end group
+@end example
+
+@noindent
+then the code in @code{yylex} might look like this:
+
+@example
+@group
+ @dots{}
+ yylval.intval = value; /* Put value onto Bison stack. */
+ return INT; /* Return the type of the token. */
+ @dots{}
+@end group
+@end example
+
+@node Token Positions, Pure Calling, Token Values, Lexical
+@subsection Textual Positions of Tokens
+
+@vindex yylloc
+If you are using the @samp{@@@var{n}}-feature (@pxref{Action Features, ,Special Features for Use in Actions}) in
+actions to keep track of the textual locations of tokens and groupings,
+then you must provide this information in @code{yylex}. The function
+@code{yyparse} expects to find the textual location of a token just parsed
+in the global variable @code{yylloc}. So @code{yylex} must store the
+proper data in that variable. The value of @code{yylloc} is a structure
+and you need only initialize the members that are going to be used by the
+actions. The four members are called @code{first_line},
+@code{first_column}, @code{last_line} and @code{last_column}. Note that
+the use of this feature makes the parser noticeably slower.
+
+@tindex YYLTYPE
+The data type of @code{yylloc} has the name @code{YYLTYPE}.
+
+@node Pure Calling, , Token Positions, Lexical
+@subsection Calling Conventions for Pure Parsers
+
+When you use the Bison declaration @code{%pure_parser} to request a
+pure, reentrant parser, the global communication variables @code{yylval}
+and @code{yylloc} cannot be used. (@xref{Pure Decl, ,A Pure (Reentrant)
+Parser}.) In such parsers the two global variables are replaced by
+pointers passed as arguments to @code{yylex}. You must declare them as
+shown here, and pass the information back by storing it through those
+pointers.
+
+@example
+yylex (lvalp, llocp)
+ YYSTYPE *lvalp;
+ YYLTYPE *llocp;
+@{
+ @dots{}
+ *lvalp = value; /* Put value onto Bison stack. */
+ return INT; /* Return the type of the token. */
+ @dots{}
+@}
+@end example
+
+If the grammar file does not use the @samp{@@} constructs to refer to
+textual positions, then the type @code{YYLTYPE} will not be defined. In
+this case, omit the second argument; @code{yylex} will be called with
+only one argument.
+
+@vindex YYPARSE_PARAM
+If you use a reentrant parser, you can optionally pass additional
+parameter information to it in a reentrant way. To do so, define the
+macro @code{YYPARSE_PARAM} as a variable name. This modifies the
+@code{yyparse} function to accept one argument, of type @code{void *},
+with that name.
+
+When you call @code{yyparse}, pass the address of an object, casting the
+address to @code{void *}. The grammar actions can refer to the contents
+of the object by casting the pointer value back to its proper type and
+then dereferencing it. Here's an example. Write this in the parser:
+
+@example
+%@{
+struct parser_control
+@{
+ int nastiness;
+ int randomness;
+@};
+
+#define YYPARSE_PARAM parm
+%@}
+@end example
+
+@noindent
+Then call the parser like this:
+
+@example
+struct parser_control
+@{
+ int nastiness;
+ int randomness;
+@};
+
+@dots{}
+
+@{
+ struct parser_control foo;
+ @dots{} /* @r{Store proper data in @code{foo}.} */
+ value = yyparse ((void *) &foo);
+ @dots{}
+@}
+@end example
+
+@noindent
+In the grammar actions, use expressions like this to refer to the data:
+
+@example
+((struct parser_control *) parm)->randomness
+@end example
+
+@vindex YYLEX_PARAM
+If you wish to pass the additional parameter data to @code{yylex},
+define the macro @code{YYLEX_PARAM} just like @code{YYPARSE_PARAM}, as
+shown here:
+
+@example
+%@{
+struct parser_control
+@{
+ int nastiness;
+ int randomness;
+@};
+
+#define YYPARSE_PARAM parm
+#define YYLEX_PARAM parm
+%@}
+@end example
+
+You should then define @code{yylex} to accept one additional
+argument---the value of @code{parm}. (This makes either two or three
+arguments in total, depending on whether an argument of type
+@code{YYLTYPE} is passed.) You can declare the argument as a pointer to
+the proper object type, or you can declare it as @code{void *} and
+access the contents as shown above.
+
+You can use @samp{%pure_parser} to request a reentrant parser without
+also using @code{YYPARSE_PARAM}. Then you should call @code{yyparse}
+with no arguments, as usual.
+
+@node Error Reporting, Action Features, Lexical, Interface
+@section The Error Reporting Function @code{yyerror}
+@cindex error reporting function
+@findex yyerror
+@cindex parse error
+@cindex syntax error
+
+The Bison parser detects a @dfn{parse error} or @dfn{syntax error}
+whenever it reads a token which cannot satisfy any syntax rule. A
+action in the grammar can also explicitly proclaim an error, using the
+macro @code{YYERROR} (@pxref{Action Features, ,Special Features for Use in Actions}).
+
+The Bison parser expects to report the error by calling an error
+reporting function named @code{yyerror}, which you must supply. It is
+called by @code{yyparse} whenever a syntax error is found, and it
+receives one argument. For a parse error, the string is normally
+@w{@code{"parse error"}}.
+
+@findex YYERROR_VERBOSE
+If you define the macro @code{YYERROR_VERBOSE} in the Bison declarations
+section (@pxref{Bison Declarations, ,The Bison Declarations Section}), then Bison provides a more verbose
+and specific error message string instead of just plain @w{@code{"parse
+error"}}. It doesn't matter what definition you use for
+@code{YYERROR_VERBOSE}, just whether you define it.
+
+The parser can detect one other kind of error: stack overflow. This
+happens when the input contains constructions that are very deeply
+nested. It isn't likely you will encounter this, since the Bison
+parser extends its stack automatically up to a very large limit. But
+if overflow happens, @code{yyparse} calls @code{yyerror} in the usual
+fashion, except that the argument string is @w{@code{"parser stack
+overflow"}}.
+
+The following definition suffices in simple programs:
+
+@example
+@group
+yyerror (s)
+ char *s;
+@{
+@end group
+@group
+ fprintf (stderr, "%s\n", s);
+@}
+@end group
+@end example
+
+After @code{yyerror} returns to @code{yyparse}, the latter will attempt
+error recovery if you have written suitable error recovery grammar rules
+(@pxref{Error Recovery}). If recovery is impossible, @code{yyparse} will
+immediately return 1.
+
+@vindex yynerrs
+The variable @code{yynerrs} contains the number of syntax errors
+encountered so far. Normally this variable is global; but if you
+request a pure parser (@pxref{Pure Decl, ,A Pure (Reentrant) Parser}) then it is a local variable
+which only the actions can access.
+
+@node Action Features, , Error Reporting, Interface
+@section Special Features for Use in Actions
+@cindex summary, action features
+@cindex action features summary
+
+Here is a table of Bison constructs, variables and macros that
+are useful in actions.
+
+@table @samp
+@item $$
+Acts like a variable that contains the semantic value for the
+grouping made by the current rule. @xref{Actions}.
+
+@item $@var{n}
+Acts like a variable that contains the semantic value for the
+@var{n}th component of the current rule. @xref{Actions}.
+
+@item $<@var{typealt}>$
+Like @code{$$} but specifies alternative @var{typealt} in the union
+specified by the @code{%union} declaration. @xref{Action Types, ,Data Types of Values in Actions}.
+
+@item $<@var{typealt}>@var{n}
+Like @code{$@var{n}} but specifies alternative @var{typealt} in the
+union specified by the @code{%union} declaration.
+@xref{Action Types, ,Data Types of Values in Actions}.@refill
+
+@item YYABORT;
+Return immediately from @code{yyparse}, indicating failure.
+@xref{Parser Function, ,The Parser Function @code{yyparse}}.
+
+@item YYACCEPT;
+Return immediately from @code{yyparse}, indicating success.
+@xref{Parser Function, ,The Parser Function @code{yyparse}}.
+
+@item YYBACKUP (@var{token}, @var{value});
+@findex YYBACKUP
+Unshift a token. This macro is allowed only for rules that reduce
+a single value, and only when there is no look-ahead token.
+It installs a look-ahead token with token type @var{token} and
+semantic value @var{value}; then it discards the value that was
+going to be reduced by this rule.
+
+If the macro is used when it is not valid, such as when there is
+a look-ahead token already, then it reports a syntax error with
+a message @samp{cannot back up} and performs ordinary error
+recovery.
+
+In either case, the rest of the action is not executed.
+
+@item YYEMPTY
+@vindex YYEMPTY
+Value stored in @code{yychar} when there is no look-ahead token.
+
+@item YYERROR;
+@findex YYERROR
+Cause an immediate syntax error. This statement initiates error
+recovery just as if the parser itself had detected an error; however, it
+does not call @code{yyerror}, and does not print any message. If you
+want to print an error message, call @code{yyerror} explicitly before
+the @samp{YYERROR;} statement. @xref{Error Recovery}.
+
+@item YYRECOVERING
+This macro stands for an expression that has the value 1 when the parser
+is recovering from a syntax error, and 0 the rest of the time.
+@xref{Error Recovery}.
+
+@item yychar
+Variable containing the current look-ahead token. (In a pure parser,
+this is actually a local variable within @code{yyparse}.) When there is
+no look-ahead token, the value @code{YYEMPTY} is stored in the variable.
+@xref{Look-Ahead, ,Look-Ahead Tokens}.
+
+@item yyclearin;
+Discard the current look-ahead token. This is useful primarily in
+error rules. @xref{Error Recovery}.
+
+@item yyerrok;
+Resume generating error messages immediately for subsequent syntax
+errors. This is useful primarily in error rules.
+@xref{Error Recovery}.
+
+@item @@@var{n}
+@findex @@@var{n}
+Acts like a structure variable containing information on the line
+numbers and column numbers of the @var{n}th component of the current
+rule. The structure has four members, like this:
+
+@example
+struct @{
+ int first_line, last_line;
+ int first_column, last_column;
+@};
+@end example
+
+Thus, to get the starting line number of the third component, you would
+use @samp{@@3.first_line}.
+
+In order for the members of this structure to contain valid information,
+you must make @code{yylex} supply this information about each token.
+If you need only certain members, then @code{yylex} need only fill in
+those members.
+
+The use of this feature makes the parser noticeably slower.
+@end table
+
+@node Algorithm, Error Recovery, Interface, Top
+@chapter The Bison Parser Algorithm
+@cindex Bison parser algorithm
+@cindex algorithm of parser
+@cindex shifting
+@cindex reduction
+@cindex parser stack
+@cindex stack, parser
+
+As Bison reads tokens, it pushes them onto a stack along with their
+semantic values. The stack is called the @dfn{parser stack}. Pushing a
+token is traditionally called @dfn{shifting}.
+
+For example, suppose the infix calculator has read @samp{1 + 5 *}, with a
+@samp{3} to come. The stack will have four elements, one for each token
+that was shifted.
+
+But the stack does not always have an element for each token read. When
+the last @var{n} tokens and groupings shifted match the components of a
+grammar rule, they can be combined according to that rule. This is called
+@dfn{reduction}. Those tokens and groupings are replaced on the stack by a
+single grouping whose symbol is the result (left hand side) of that rule.
+Running the rule's action is part of the process of reduction, because this
+is what computes the semantic value of the resulting grouping.
+
+For example, if the infix calculator's parser stack contains this:
+
+@example
+1 + 5 * 3
+@end example
+
+@noindent
+and the next input token is a newline character, then the last three
+elements can be reduced to 15 via the rule:
+
+@example
+expr: expr '*' expr;
+@end example
+
+@noindent
+Then the stack contains just these three elements:
+
+@example
+1 + 15
+@end example
+
+@noindent
+At this point, another reduction can be made, resulting in the single value
+16. Then the newline token can be shifted.
+
+The parser tries, by shifts and reductions, to reduce the entire input down
+to a single grouping whose symbol is the grammar's start-symbol
+(@pxref{Language and Grammar, ,Languages and Context-Free Grammars}).
+
+This kind of parser is known in the literature as a bottom-up parser.
+
+@menu
+* Look-Ahead:: Parser looks one token ahead when deciding what to do.
+* Shift/Reduce:: Conflicts: when either shifting or reduction is valid.
+* Precedence:: Operator precedence works by resolving conflicts.
+* Contextual Precedence:: When an operator's precedence depends on context.
+* Parser States:: The parser is a finite-state-machine with stack.
+* Reduce/Reduce:: When two rules are applicable in the same situation.
+* Mystery Conflicts:: Reduce/reduce conflicts that look unjustified.
+* Stack Overflow:: What happens when stack gets full. How to avoid it.
+@end menu
+
+@node Look-Ahead, Shift/Reduce, , Algorithm
+@section Look-Ahead Tokens
+@cindex look-ahead token
+
+The Bison parser does @emph{not} always reduce immediately as soon as the
+last @var{n} tokens and groupings match a rule. This is because such a
+simple strategy is inadequate to handle most languages. Instead, when a
+reduction is possible, the parser sometimes ``looks ahead'' at the next
+token in order to decide what to do.
+
+When a token is read, it is not immediately shifted; first it becomes the
+@dfn{look-ahead token}, which is not on the stack. Now the parser can
+perform one or more reductions of tokens and groupings on the stack, while
+the look-ahead token remains off to the side. When no more reductions
+should take place, the look-ahead token is shifted onto the stack. This
+does not mean that all possible reductions have been done; depending on the
+token type of the look-ahead token, some rules may choose to delay their
+application.
+
+Here is a simple case where look-ahead is needed. These three rules define
+expressions which contain binary addition operators and postfix unary
+factorial operators (@samp{!}), and allow parentheses for grouping.
+
+@example
+@group
+expr: term '+' expr
+ | term
+ ;
+@end group
+
+@group
+term: '(' expr ')'
+ | term '!'
+ | NUMBER
+ ;
+@end group
+@end example
+
+Suppose that the tokens @w{@samp{1 + 2}} have been read and shifted; what
+should be done? If the following token is @samp{)}, then the first three
+tokens must be reduced to form an @code{expr}. This is the only valid
+course, because shifting the @samp{)} would produce a sequence of symbols
+@w{@code{term ')'}}, and no rule allows this.
+
+If the following token is @samp{!}, then it must be shifted immediately so
+that @w{@samp{2 !}} can be reduced to make a @code{term}. If instead the
+parser were to reduce before shifting, @w{@samp{1 + 2}} would become an
+@code{expr}. It would then be impossible to shift the @samp{!} because
+doing so would produce on the stack the sequence of symbols @code{expr
+'!'}. No rule allows that sequence.
+
+@vindex yychar
+The current look-ahead token is stored in the variable @code{yychar}.
+@xref{Action Features, ,Special Features for Use in Actions}.
+
+@node Shift/Reduce, Precedence, Look-Ahead, Algorithm
+@section Shift/Reduce Conflicts
+@cindex conflicts
+@cindex shift/reduce conflicts
+@cindex dangling @code{else}
+@cindex @code{else}, dangling
+
+Suppose we are parsing a language which has if-then and if-then-else
+statements, with a pair of rules like this:
+
+@example
+@group
+if_stmt:
+ IF expr THEN stmt
+ | IF expr THEN stmt ELSE stmt
+ ;
+@end group
+@end example
+
+@noindent
+Here we assume that @code{IF}, @code{THEN} and @code{ELSE} are
+terminal symbols for specific keyword tokens.
+
+When the @code{ELSE} token is read and becomes the look-ahead token, the
+contents of the stack (assuming the input is valid) are just right for
+reduction by the first rule. But it is also legitimate to shift the
+@code{ELSE}, because that would lead to eventual reduction by the second
+rule.
+
+This situation, where either a shift or a reduction would be valid, is
+called a @dfn{shift/reduce conflict}. Bison is designed to resolve
+these conflicts by choosing to shift, unless otherwise directed by
+operator precedence declarations. To see the reason for this, let's
+contrast it with the other alternative.
+
+Since the parser prefers to shift the @code{ELSE}, the result is to attach
+the else-clause to the innermost if-statement, making these two inputs
+equivalent:
+
+@example
+if x then if y then win (); else lose;
+
+if x then do; if y then win (); else lose; end;
+@end example
+
+But if the parser chose to reduce when possible rather than shift, the
+result would be to attach the else-clause to the outermost if-statement,
+making these two inputs equivalent:
+
+@example
+if x then if y then win (); else lose;
+
+if x then do; if y then win (); end; else lose;
+@end example
+
+The conflict exists because the grammar as written is ambiguous: either
+parsing of the simple nested if-statement is legitimate. The established
+convention is that these ambiguities are resolved by attaching the
+else-clause to the innermost if-statement; this is what Bison accomplishes
+by choosing to shift rather than reduce. (It would ideally be cleaner to
+write an unambiguous grammar, but that is very hard to do in this case.)
+This particular ambiguity was first encountered in the specifications of
+Algol 60 and is called the ``dangling @code{else}'' ambiguity.
+
+To avoid warnings from Bison about predictable, legitimate shift/reduce
+conflicts, use the @code{%expect @var{n}} declaration. There will be no
+warning as long as the number of shift/reduce conflicts is exactly @var{n}.
+@xref{Expect Decl, ,Suppressing Conflict Warnings}.
+
+The definition of @code{if_stmt} above is solely to blame for the
+conflict, but the conflict does not actually appear without additional
+rules. Here is a complete Bison input file that actually manifests the
+conflict:
+
+@example
+@group
+%token IF THEN ELSE variable
+%%
+@end group
+@group
+stmt: expr
+ | if_stmt
+ ;
+@end group
+
+@group
+if_stmt:
+ IF expr THEN stmt
+ | IF expr THEN stmt ELSE stmt
+ ;
+@end group
+
+expr: variable
+ ;
+@end example
+
+@node Precedence, Contextual Precedence, Shift/Reduce, Algorithm
+@section Operator Precedence
+@cindex operator precedence
+@cindex precedence of operators
+
+Another situation where shift/reduce conflicts appear is in arithmetic
+expressions. Here shifting is not always the preferred resolution; the
+Bison declarations for operator precedence allow you to specify when to
+shift and when to reduce.
+
+@menu
+* Why Precedence:: An example showing why precedence is needed.
+* Using Precedence:: How to specify precedence in Bison grammars.
+* Precedence Examples:: How these features are used in the previous example.
+* How Precedence:: How they work.
+@end menu
+
+@node Why Precedence, Using Precedence, , Precedence
+@subsection When Precedence is Needed
+
+Consider the following ambiguous grammar fragment (ambiguous because the
+input @w{@samp{1 - 2 * 3}} can be parsed in two different ways):
+
+@example
+@group
+expr: expr '-' expr
+ | expr '*' expr
+ | expr '<' expr
+ | '(' expr ')'
+ @dots{}
+ ;
+@end group
+@end example
+
+@noindent
+Suppose the parser has seen the tokens @samp{1}, @samp{-} and @samp{2};
+should it reduce them via the rule for the addition operator? It depends
+on the next token. Of course, if the next token is @samp{)}, we must
+reduce; shifting is invalid because no single rule can reduce the token
+sequence @w{@samp{- 2 )}} or anything starting with that. But if the next
+token is @samp{*} or @samp{<}, we have a choice: either shifting or
+reduction would allow the parse to complete, but with different
+results.
+
+To decide which one Bison should do, we must consider the
+results. If the next operator token @var{op} is shifted, then it
+must be reduced first in order to permit another opportunity to
+reduce the sum. The result is (in effect) @w{@samp{1 - (2
+@var{op} 3)}}. On the other hand, if the subtraction is reduced
+before shifting @var{op}, the result is @w{@samp{(1 - 2) @var{op}
+3}}. Clearly, then, the choice of shift or reduce should depend
+on the relative precedence of the operators @samp{-} and
+@var{op}: @samp{*} should be shifted first, but not @samp{<}.
+
+@cindex associativity
+What about input such as @w{@samp{1 - 2 - 5}}; should this be
+@w{@samp{(1 - 2) - 5}} or should it be @w{@samp{1 - (2 - 5)}}? For
+most operators we prefer the former, which is called @dfn{left
+association}. The latter alternative, @dfn{right association}, is
+desirable for assignment operators. The choice of left or right
+association is a matter of whether the parser chooses to shift or
+reduce when the stack contains @w{@samp{1 - 2}} and the look-ahead
+token is @samp{-}: shifting makes right-associativity.
+
+@node Using Precedence, Precedence Examples, Why Precedence, Precedence
+@subsection Specifying Operator Precedence
+@findex %left
+@findex %right
+@findex %nonassoc
+
+Bison allows you to specify these choices with the operator precedence
+declarations @code{%left} and @code{%right}. Each such declaration
+contains a list of tokens, which are operators whose precedence and
+associativity is being declared. The @code{%left} declaration makes all
+those operators left-associative and the @code{%right} declaration makes
+them right-associative. A third alternative is @code{%nonassoc}, which
+declares that it is a syntax error to find the same operator twice ``in a
+row''.
+
+The relative precedence of different operators is controlled by the
+order in which they are declared. The first @code{%left} or
+@code{%right} declaration in the file declares the operators whose
+precedence is lowest, the next such declaration declares the operators
+whose precedence is a little higher, and so on.
+
+@node Precedence Examples, How Precedence, Using Precedence, Precedence
+@subsection Precedence Examples
+
+In our example, we would want the following declarations:
+
+@example
+%left '<'
+%left '-'
+%left '*'
+@end example
+
+In a more complete example, which supports other operators as well, we
+would declare them in groups of equal precedence. For example, @code{'+'} is
+declared with @code{'-'}:
+
+@example
+%left '<' '>' '=' NE LE GE
+%left '+' '-'
+%left '*' '/'
+@end example
+
+@noindent
+(Here @code{NE} and so on stand for the operators for ``not equal''
+and so on. We assume that these tokens are more than one character long
+and therefore are represented by names, not character literals.)
+
+@node How Precedence, , Precedence Examples, Precedence
+@subsection How Precedence Works
+
+The first effect of the precedence declarations is to assign precedence
+levels to the terminal symbols declared. The second effect is to assign
+precedence levels to certain rules: each rule gets its precedence from the
+last terminal symbol mentioned in the components. (You can also specify
+explicitly the precedence of a rule. @xref{Contextual Precedence, ,Context-Dependent Precedence}.)
+
+Finally, the resolution of conflicts works by comparing the
+precedence of the rule being considered with that of the
+look-ahead token. If the token's precedence is higher, the
+choice is to shift. If the rule's precedence is higher, the
+choice is to reduce. If they have equal precedence, the choice
+is made based on the associativity of that precedence level. The
+verbose output file made by @samp{-v} (@pxref{Invocation, ,Invoking Bison}) says
+how each conflict was resolved.
+
+Not all rules and not all tokens have precedence. If either the rule or
+the look-ahead token has no precedence, then the default is to shift.
+
+@node Contextual Precedence, Parser States, Precedence, Algorithm
+@section Context-Dependent Precedence
+@cindex context-dependent precedence
+@cindex unary operator precedence
+@cindex precedence, context-dependent
+@cindex precedence, unary operator
+@findex %prec
+
+Often the precedence of an operator depends on the context. This sounds
+outlandish at first, but it is really very common. For example, a minus
+sign typically has a very high precedence as a unary operator, and a
+somewhat lower precedence (lower than multiplication) as a binary operator.
+
+The Bison precedence declarations, @code{%left}, @code{%right} and
+@code{%nonassoc}, can only be used once for a given token; so a token has
+only one precedence declared in this way. For context-dependent
+precedence, you need to use an additional mechanism: the @code{%prec}
+modifier for rules.@refill
+
+The @code{%prec} modifier declares the precedence of a particular rule by
+specifying a terminal symbol whose precedence should be used for that rule.
+It's not necessary for that symbol to appear otherwise in the rule. The
+modifier's syntax is:
+
+@example
+%prec @var{terminal-symbol}
+@end example
+
+@noindent
+and it is written after the components of the rule. Its effect is to
+assign the rule the precedence of @var{terminal-symbol}, overriding
+the precedence that would be deduced for it in the ordinary way. The
+altered rule precedence then affects how conflicts involving that rule
+are resolved (@pxref{Precedence, ,Operator Precedence}).
+
+Here is how @code{%prec} solves the problem of unary minus. First, declare
+a precedence for a fictitious terminal symbol named @code{UMINUS}. There
+are no tokens of this type, but the symbol serves to stand for its
+precedence:
+
+@example
+@dots{}
+%left '+' '-'
+%left '*'
+%left UMINUS
+@end example
+
+Now the precedence of @code{UMINUS} can be used in specific rules:
+
+@example
+@group
+exp: @dots{}
+ | exp '-' exp
+ @dots{}
+ | '-' exp %prec UMINUS
+@end group
+@end example
+
+@node Parser States, Reduce/Reduce, Contextual Precedence, Algorithm
+@section Parser States
+@cindex finite-state machine
+@cindex parser state
+@cindex state (of parser)
+
+The function @code{yyparse} is implemented using a finite-state machine.
+The values pushed on the parser stack are not simply token type codes; they
+represent the entire sequence of terminal and nonterminal symbols at or
+near the top of the stack. The current state collects all the information
+about previous input which is relevant to deciding what to do next.
+
+Each time a look-ahead token is read, the current parser state together
+with the type of look-ahead token are looked up in a table. This table
+entry can say, ``Shift the look-ahead token.'' In this case, it also
+specifies the new parser state, which is pushed onto the top of the
+parser stack. Or it can say, ``Reduce using rule number @var{n}.''
+This means that a certain number of tokens or groupings are taken off
+the top of the stack, and replaced by one grouping. In other words,
+that number of states are popped from the stack, and one new state is
+pushed.
+
+There is one other alternative: the table can say that the look-ahead token
+is erroneous in the current state. This causes error processing to begin
+(@pxref{Error Recovery}).
+
+@node Reduce/Reduce, Mystery Conflicts, Parser States, Algorithm
+@section Reduce/Reduce Conflicts
+@cindex reduce/reduce conflict
+@cindex conflicts, reduce/reduce
+
+A reduce/reduce conflict occurs if there are two or more rules that apply
+to the same sequence of input. This usually indicates a serious error
+in the grammar.
+
+For example, here is an erroneous attempt to define a sequence
+of zero or more @code{word} groupings.
+
+@example
+sequence: /* empty */
+ @{ printf ("empty sequence\n"); @}
+ | maybeword
+ | sequence word
+ @{ printf ("added word %s\n", $2); @}
+ ;
+
+maybeword: /* empty */
+ @{ printf ("empty maybeword\n"); @}
+ | word
+ @{ printf ("single word %s\n", $1); @}
+ ;
+@end example
+
+@noindent
+The error is an ambiguity: there is more than one way to parse a single
+@code{word} into a @code{sequence}. It could be reduced to a
+@code{maybeword} and then into a @code{sequence} via the second rule.
+Alternatively, nothing-at-all could be reduced into a @code{sequence}
+via the first rule, and this could be combined with the @code{word}
+using the third rule for @code{sequence}.
+
+There is also more than one way to reduce nothing-at-all into a
+@code{sequence}. This can be done directly via the first rule,
+or indirectly via @code{maybeword} and then the second rule.
+
+You might think that this is a distinction without a difference, because it
+does not change whether any particular input is valid or not. But it does
+affect which actions are run. One parsing order runs the second rule's
+action; the other runs the first rule's action and the third rule's action.
+In this example, the output of the program changes.
+
+Bison resolves a reduce/reduce conflict by choosing to use the rule that
+appears first in the grammar, but it is very risky to rely on this. Every
+reduce/reduce conflict must be studied and usually eliminated. Here is the
+proper way to define @code{sequence}:
+
+@example
+sequence: /* empty */
+ @{ printf ("empty sequence\n"); @}
+ | sequence word
+ @{ printf ("added word %s\n", $2); @}
+ ;
+@end example
+
+Here is another common error that yields a reduce/reduce conflict:
+
+@example
+sequence: /* empty */
+ | sequence words
+ | sequence redirects
+ ;
+
+words: /* empty */
+ | words word
+ ;
+
+redirects:/* empty */
+ | redirects redirect
+ ;
+@end example
+
+@noindent
+The intention here is to define a sequence which can contain either
+@code{word} or @code{redirect} groupings. The individual definitions of
+@code{sequence}, @code{words} and @code{redirects} are error-free, but the
+three together make a subtle ambiguity: even an empty input can be parsed
+in infinitely many ways!
+
+Consider: nothing-at-all could be a @code{words}. Or it could be two
+@code{words} in a row, or three, or any number. It could equally well be a
+@code{redirects}, or two, or any number. Or it could be a @code{words}
+followed by three @code{redirects} and another @code{words}. And so on.
+
+Here are two ways to correct these rules. First, to make it a single level
+of sequence:
+
+@example
+sequence: /* empty */
+ | sequence word
+ | sequence redirect
+ ;
+@end example
+
+Second, to prevent either a @code{words} or a @code{redirects}
+from being empty:
+
+@example
+sequence: /* empty */
+ | sequence words
+ | sequence redirects
+ ;
+
+words: word
+ | words word
+ ;
+
+redirects:redirect
+ | redirects redirect
+ ;
+@end example
+
+@node Mystery Conflicts, Stack Overflow, Reduce/Reduce, Algorithm
+@section Mysterious Reduce/Reduce Conflicts
+
+Sometimes reduce/reduce conflicts can occur that don't look warranted.
+Here is an example:
+
+@example
+@group
+%token ID
+
+%%
+def: param_spec return_spec ','
+ ;
+param_spec:
+ type
+ | name_list ':' type
+ ;
+@end group
+@group
+return_spec:
+ type
+ | name ':' type
+ ;
+@end group
+@group
+type: ID
+ ;
+@end group
+@group
+name: ID
+ ;
+name_list:
+ name
+ | name ',' name_list
+ ;
+@end group
+@end example
+
+It would seem that this grammar can be parsed with only a single token
+of look-ahead: when a @code{param_spec} is being read, an @code{ID} is
+a @code{name} if a comma or colon follows, or a @code{type} if another
+@code{ID} follows. In other words, this grammar is LR(1).
+
+@cindex LR(1)
+@cindex LALR(1)
+However, Bison, like most parser generators, cannot actually handle all
+LR(1) grammars. In this grammar, two contexts, that after an @code{ID}
+at the beginning of a @code{param_spec} and likewise at the beginning of
+a @code{return_spec}, are similar enough that Bison assumes they are the
+same. They appear similar because the same set of rules would be
+active---the rule for reducing to a @code{name} and that for reducing to
+a @code{type}. Bison is unable to determine at that stage of processing
+that the rules would require different look-ahead tokens in the two
+contexts, so it makes a single parser state for them both. Combining
+the two contexts causes a conflict later. In parser terminology, this
+occurrence means that the grammar is not LALR(1).
+
+In general, it is better to fix deficiencies than to document them. But
+this particular deficiency is intrinsically hard to fix; parser
+generators that can handle LR(1) grammars are hard to write and tend to
+produce parsers that are very large. In practice, Bison is more useful
+as it is now.
+
+When the problem arises, you can often fix it by identifying the two
+parser states that are being confused, and adding something to make them
+look distinct. In the above example, adding one rule to
+@code{return_spec} as follows makes the problem go away:
+
+@example
+@group
+%token BOGUS
+@dots{}
+%%
+@dots{}
+return_spec:
+ type
+ | name ':' type
+ /* This rule is never used. */
+ | ID BOGUS
+ ;
+@end group
+@end example
+
+This corrects the problem because it introduces the possibility of an
+additional active rule in the context after the @code{ID} at the beginning of
+@code{return_spec}. This rule is not active in the corresponding context
+in a @code{param_spec}, so the two contexts receive distinct parser states.
+As long as the token @code{BOGUS} is never generated by @code{yylex},
+the added rule cannot alter the way actual input is parsed.
+
+In this particular example, there is another way to solve the problem:
+rewrite the rule for @code{return_spec} to use @code{ID} directly
+instead of via @code{name}. This also causes the two confusing
+contexts to have different sets of active rules, because the one for
+@code{return_spec} activates the altered rule for @code{return_spec}
+rather than the one for @code{name}.
+
+@example
+param_spec:
+ type
+ | name_list ':' type
+ ;
+return_spec:
+ type
+ | ID ':' type
+ ;
+@end example
+
+@node Stack Overflow, , Mystery Conflicts, Algorithm
+@section Stack Overflow, and How to Avoid It
+@cindex stack overflow
+@cindex parser stack overflow
+@cindex overflow of parser stack
+
+The Bison parser stack can overflow if too many tokens are shifted and
+not reduced. When this happens, the parser function @code{yyparse}
+returns a nonzero value, pausing only to call @code{yyerror} to report
+the overflow.
+
+@vindex YYMAXDEPTH
+By defining the macro @code{YYMAXDEPTH}, you can control how deep the
+parser stack can become before a stack overflow occurs. Define the
+macro with a value that is an integer. This value is the maximum number
+of tokens that can be shifted (and not reduced) before overflow.
+It must be a constant expression whose value is known at compile time.
+
+The stack space allowed is not necessarily allocated. If you specify a
+large value for @code{YYMAXDEPTH}, the parser actually allocates a small
+stack at first, and then makes it bigger by stages as needed. This
+increasing allocation happens automatically and silently. Therefore,
+you do not need to make @code{YYMAXDEPTH} painfully small merely to save
+space for ordinary inputs that do not need much stack.
+
+@cindex default stack limit
+The default value of @code{YYMAXDEPTH}, if you do not define it, is
+10000.
+
+@vindex YYINITDEPTH
+You can control how much stack is allocated initially by defining the
+macro @code{YYINITDEPTH}. This value too must be a compile-time
+constant integer. The default is 200.
+
+@node Error Recovery, Context Dependency, Algorithm, Top
+@chapter Error Recovery
+@cindex error recovery
+@cindex recovery from errors
+
+It is not usually acceptable to have a program terminate on a parse
+error. For example, a compiler should recover sufficiently to parse the
+rest of the input file and check it for errors; a calculator should accept
+another expression.
+
+In a simple interactive command parser where each input is one line, it may
+be sufficient to allow @code{yyparse} to return 1 on error and have the
+caller ignore the rest of the input line when that happens (and then call
+@code{yyparse} again). But this is inadequate for a compiler, because it
+forgets all the syntactic context leading up to the error. A syntax error
+deep within a function in the compiler input should not cause the compiler
+to treat the following line like the beginning of a source file.
+
+@findex error
+You can define how to recover from a syntax error by writing rules to
+recognize the special token @code{error}. This is a terminal symbol that
+is always defined (you need not declare it) and reserved for error
+handling. The Bison parser generates an @code{error} token whenever a
+syntax error happens; if you have provided a rule to recognize this token
+in the current context, the parse can continue.
+
+For example:
+
+@example
+stmnts: /* empty string */
+ | stmnts '\n'
+ | stmnts exp '\n'
+ | stmnts error '\n'
+@end example
+
+The fourth rule in this example says that an error followed by a newline
+makes a valid addition to any @code{stmnts}.
+
+What happens if a syntax error occurs in the middle of an @code{exp}? The
+error recovery rule, interpreted strictly, applies to the precise sequence
+of a @code{stmnts}, an @code{error} and a newline. If an error occurs in
+the middle of an @code{exp}, there will probably be some additional tokens
+and subexpressions on the stack after the last @code{stmnts}, and there
+will be tokens to read before the next newline. So the rule is not
+applicable in the ordinary way.
+
+But Bison can force the situation to fit the rule, by discarding part of
+the semantic context and part of the input. First it discards states and
+objects from the stack until it gets back to a state in which the
+@code{error} token is acceptable. (This means that the subexpressions
+already parsed are discarded, back to the last complete @code{stmnts}.) At
+this point the @code{error} token can be shifted. Then, if the old
+look-ahead token is not acceptable to be shifted next, the parser reads
+tokens and discards them until it finds a token which is acceptable. In
+this example, Bison reads and discards input until the next newline
+so that the fourth rule can apply.
+
+The choice of error rules in the grammar is a choice of strategies for
+error recovery. A simple and useful strategy is simply to skip the rest of
+the current input line or current statement if an error is detected:
+
+@example
+stmnt: error ';' /* on error, skip until ';' is read */
+@end example
+
+It is also useful to recover to the matching close-delimiter of an
+opening-delimiter that has already been parsed. Otherwise the
+close-delimiter will probably appear to be unmatched, and generate another,
+spurious error message:
+
+@example
+primary: '(' expr ')'
+ | '(' error ')'
+ @dots{}
+ ;
+@end example
+
+Error recovery strategies are necessarily guesses. When they guess wrong,
+one syntax error often leads to another. In the above example, the error
+recovery rule guesses that an error is due to bad input within one
+@code{stmnt}. Suppose that instead a spurious semicolon is inserted in the
+middle of a valid @code{stmnt}. After the error recovery rule recovers
+from the first error, another syntax error will be found straightaway,
+since the text following the spurious semicolon is also an invalid
+@code{stmnt}.
+
+To prevent an outpouring of error messages, the parser will output no error
+message for another syntax error that happens shortly after the first; only
+after three consecutive input tokens have been successfully shifted will
+error messages resume.
+
+Note that rules which accept the @code{error} token may have actions, just
+as any other rules can.
+
+@findex yyerrok
+You can make error messages resume immediately by using the macro
+@code{yyerrok} in an action. If you do this in the error rule's action, no
+error messages will be suppressed. This macro requires no arguments;
+@samp{yyerrok;} is a valid C statement.
+
+@findex yyclearin
+The previous look-ahead token is reanalyzed immediately after an error. If
+this is unacceptable, then the macro @code{yyclearin} may be used to clear
+this token. Write the statement @samp{yyclearin;} in the error rule's
+action.
+
+For example, suppose that on a parse error, an error handling routine is
+called that advances the input stream to some point where parsing should
+once again commence. The next symbol returned by the lexical scanner is
+probably correct. The previous look-ahead token ought to be discarded
+with @samp{yyclearin;}.
+
+@vindex YYRECOVERING
+The macro @code{YYRECOVERING} stands for an expression that has the
+value 1 when the parser is recovering from a syntax error, and 0 the
+rest of the time. A value of 1 indicates that error messages are
+currently suppressed for new syntax errors.
+
+@node Context Dependency, Debugging, Error Recovery, Top
+@chapter Handling Context Dependencies
+
+The Bison paradigm is to parse tokens first, then group them into larger
+syntactic units. In many languages, the meaning of a token is affected by
+its context. Although this violates the Bison paradigm, certain techniques
+(known as @dfn{kludges}) may enable you to write Bison parsers for such
+languages.
+
+@menu
+* Semantic Tokens:: Token parsing can depend on the semantic context.
+* Lexical Tie-ins:: Token parsing can depend on the syntactic context.
+* Tie-in Recovery:: Lexical tie-ins have implications for how
+ error recovery rules must be written.
+@end menu
+
+(Actually, ``kludge'' means any technique that gets its job done but is
+neither clean nor robust.)
+
+@node Semantic Tokens, Lexical Tie-ins, , Context Dependency
+@section Semantic Info in Token Types
+
+The C language has a context dependency: the way an identifier is used
+depends on what its current meaning is. For example, consider this:
+
+@example
+foo (x);
+@end example
+
+This looks like a function call statement, but if @code{foo} is a typedef
+name, then this is actually a declaration of @code{x}. How can a Bison
+parser for C decide how to parse this input?
+
+The method used in GNU C is to have two different token types,
+@code{IDENTIFIER} and @code{TYPENAME}. When @code{yylex} finds an
+identifier, it looks up the current declaration of the identifier in order
+to decide which token type to return: @code{TYPENAME} if the identifier is
+declared as a typedef, @code{IDENTIFIER} otherwise.
+
+The grammar rules can then express the context dependency by the choice of
+token type to recognize. @code{IDENTIFIER} is accepted as an expression,
+but @code{TYPENAME} is not. @code{TYPENAME} can start a declaration, but
+@code{IDENTIFIER} cannot. In contexts where the meaning of the identifier
+is @emph{not} significant, such as in declarations that can shadow a
+typedef name, either @code{TYPENAME} or @code{IDENTIFIER} is
+accepted---there is one rule for each of the two token types.
+
+This technique is simple to use if the decision of which kinds of
+identifiers to allow is made at a place close to where the identifier is
+parsed. But in C this is not always so: C allows a declaration to
+redeclare a typedef name provided an explicit type has been specified
+earlier:
+
+@example
+typedef int foo, bar, lose;
+static foo (bar); /* @r{redeclare @code{bar} as static variable} */
+static int foo (lose); /* @r{redeclare @code{foo} as function} */
+@end example
+
+Unfortunately, the name being declared is separated from the declaration
+construct itself by a complicated syntactic structure---the ``declarator''.
+
+As a result, the part of Bison parser for C needs to be duplicated, with
+all the nonterminal names changed: once for parsing a declaration in which
+a typedef name can be redefined, and once for parsing a declaration in
+which that can't be done. Here is a part of the duplication, with actions
+omitted for brevity:
+
+@example
+initdcl:
+ declarator maybeasm '='
+ init
+ | declarator maybeasm
+ ;
+
+notype_initdcl:
+ notype_declarator maybeasm '='
+ init
+ | notype_declarator maybeasm
+ ;
+@end example
+
+@noindent
+Here @code{initdcl} can redeclare a typedef name, but @code{notype_initdcl}
+cannot. The distinction between @code{declarator} and
+@code{notype_declarator} is the same sort of thing.
+
+There is some similarity between this technique and a lexical tie-in
+(described next), in that information which alters the lexical analysis is
+changed during parsing by other parts of the program. The difference is
+here the information is global, and is used for other purposes in the
+program. A true lexical tie-in has a special-purpose flag controlled by
+the syntactic context.
+
+@node Lexical Tie-ins, Tie-in Recovery, Semantic Tokens, Context Dependency
+@section Lexical Tie-ins
+@cindex lexical tie-in
+
+One way to handle context-dependency is the @dfn{lexical tie-in}: a flag
+which is set by Bison actions, whose purpose is to alter the way tokens are
+parsed.
+
+For example, suppose we have a language vaguely like C, but with a special
+construct @samp{hex (@var{hex-expr})}. After the keyword @code{hex} comes
+an expression in parentheses in which all integers are hexadecimal. In
+particular, the token @samp{a1b} must be treated as an integer rather than
+as an identifier if it appears in that context. Here is how you can do it:
+
+@example
+@group
+%@{
+int hexflag;
+%@}
+%%
+@dots{}
+@end group
+@group
+expr: IDENTIFIER
+ | constant
+ | HEX '('
+ @{ hexflag = 1; @}
+ expr ')'
+ @{ hexflag = 0;
+ $$ = $4; @}
+ | expr '+' expr
+ @{ $$ = make_sum ($1, $3); @}
+ @dots{}
+ ;
+@end group
+
+@group
+constant:
+ INTEGER
+ | STRING
+ ;
+@end group
+@end example
+
+@noindent
+Here we assume that @code{yylex} looks at the value of @code{hexflag}; when
+it is nonzero, all integers are parsed in hexadecimal, and tokens starting
+with letters are parsed as integers if possible.
+
+The declaration of @code{hexflag} shown in the C declarations section of
+the parser file is needed to make it accessible to the actions
+(@pxref{C Declarations, ,The C Declarations Section}). You must also write the code in @code{yylex}
+to obey the flag.
+
+@node Tie-in Recovery, , Lexical Tie-ins, Context Dependency
+@section Lexical Tie-ins and Error Recovery
+
+Lexical tie-ins make strict demands on any error recovery rules you have.
+@xref{Error Recovery}.
+
+The reason for this is that the purpose of an error recovery rule is to
+abort the parsing of one construct and resume in some larger construct.
+For example, in C-like languages, a typical error recovery rule is to skip
+tokens until the next semicolon, and then start a new statement, like this:
+
+@example
+stmt: expr ';'
+ | IF '(' expr ')' stmt @{ @dots{} @}
+ @dots{}
+ error ';'
+ @{ hexflag = 0; @}
+ ;
+@end example
+
+If there is a syntax error in the middle of a @samp{hex (@var{expr})}
+construct, this error rule will apply, and then the action for the
+completed @samp{hex (@var{expr})} will never run. So @code{hexflag} would
+remain set for the entire rest of the input, or until the next @code{hex}
+keyword, causing identifiers to be misinterpreted as integers.
+
+To avoid this problem the error recovery rule itself clears @code{hexflag}.
+
+There may also be an error recovery rule that works within expressions.
+For example, there could be a rule which applies within parentheses
+and skips to the close-parenthesis:
+
+@example
+@group
+expr: @dots{}
+ | '(' expr ')'
+ @{ $$ = $2; @}
+ | '(' error ')'
+ @dots{}
+@end group
+@end example
+
+If this rule acts within the @code{hex} construct, it is not going to abort
+that construct (since it applies to an inner level of parentheses within
+the construct). Therefore, it should not clear the flag: the rest of
+the @code{hex} construct should be parsed with the flag still in effect.
+
+What if there is an error recovery rule which might abort out of the
+@code{hex} construct or might not, depending on circumstances? There is no
+way you can write the action to determine whether a @code{hex} construct is
+being aborted or not. So if you are using a lexical tie-in, you had better
+make sure your error recovery rules are not of this kind. Each rule must
+be such that you can be sure that it always will, or always won't, have to
+clear the flag.
+
+@node Debugging, Invocation, Context Dependency, Top
+@chapter Debugging Your Parser
+@findex YYDEBUG
+@findex yydebug
+@cindex debugging
+@cindex tracing the parser
+
+If a Bison grammar compiles properly but doesn't do what you want when it
+runs, the @code{yydebug} parser-trace feature can help you figure out why.
+
+To enable compilation of trace facilities, you must define the macro
+@code{YYDEBUG} when you compile the parser. You could use
+@samp{-DYYDEBUG=1} as a compiler option or you could put @samp{#define
+YYDEBUG 1} in the C declarations section of the grammar file
+(@pxref{C Declarations, ,The C Declarations Section}). Alternatively, use the @samp{-t} option when
+you run Bison (@pxref{Invocation, ,Invoking Bison}). We always define @code{YYDEBUG} so that
+debugging is always possible.
+
+The trace facility uses @code{stderr}, so you must add @w{@code{#include
+<stdio.h>}} to the C declarations section unless it is already there.
+
+Once you have compiled the program with trace facilities, the way to
+request a trace is to store a nonzero value in the variable @code{yydebug}.
+You can do this by making the C code do it (in @code{main}, perhaps), or
+you can alter the value with a C debugger.
+
+Each step taken by the parser when @code{yydebug} is nonzero produces a
+line or two of trace information, written on @code{stderr}. The trace
+messages tell you these things:
+
+@itemize @bullet
+@item
+Each time the parser calls @code{yylex}, what kind of token was read.
+
+@item
+Each time a token is shifted, the depth and complete contents of the
+state stack (@pxref{Parser States}).
+
+@item
+Each time a rule is reduced, which rule it is, and the complete contents
+of the state stack afterward.
+@end itemize
+
+To make sense of this information, it helps to refer to the listing file
+produced by the Bison @samp{-v} option (@pxref{Invocation, ,Invoking Bison}). This file
+shows the meaning of each state in terms of positions in various rules, and
+also what each state will do with each possible input token. As you read
+the successive trace messages, you can see that the parser is functioning
+according to its specification in the listing file. Eventually you will
+arrive at the place where something undesirable happens, and you will see
+which parts of the grammar are to blame.
+
+The parser file is a C program and you can use C debuggers on it, but it's
+not easy to interpret what it is doing. The parser function is a
+finite-state machine interpreter, and aside from the actions it executes
+the same code over and over. Only the values of variables show where in
+the grammar it is working.
+
+@findex YYPRINT
+The debugging information normally gives the token type of each token
+read, but not its semantic value. You can optionally define a macro
+named @code{YYPRINT} to provide a way to print the value. If you define
+@code{YYPRINT}, it should take three arguments. The parser will pass a
+standard I/O stream, the numeric code for the token type, and the token
+value (from @code{yylval}).
+
+Here is an example of @code{YYPRINT} suitable for the multi-function
+calculator (@pxref{Mfcalc Decl, ,Declarations for @code{mfcalc}}):
+
+@smallexample
+#define YYPRINT(file, type, value) yyprint (file, type, value)
+
+static void
+yyprint (file, type, value)
+ FILE *file;
+ int type;
+ YYSTYPE value;
+@{
+ if (type == VAR)
+ fprintf (file, " %s", value.tptr->name);
+ else if (type == NUM)
+ fprintf (file, " %d", value.val);
+@}
+@end smallexample
+
+@node Invocation, Table of Symbols, Debugging, Top
+@chapter Invoking Bison
+@cindex invoking Bison
+@cindex Bison invocation
+@cindex options for invoking Bison
+
+The usual way to invoke Bison is as follows:
+
+@example
+bison @var{infile}
+@end example
+
+Here @var{infile} is the grammar file name, which usually ends in
+@samp{.y}. The parser file's name is made by replacing the @samp{.y}
+with @samp{.tab.c}. Thus, the @samp{bison foo.y} filename yields
+@file{foo.tab.c}, and the @samp{bison hack/foo.y} filename yields
+@file{hack/foo.tab.c}.@refill
+
+@menu
+* Bison Options:: All the options described in detail,
+ in alphabetical order by short options.
+* Option Cross Key:: Alphabetical list of long options.
+* VMS Invocation:: Bison command syntax on VMS.
+@end menu
+
+@node Bison Options, Option Cross Key, , Invocation
+@section Bison Options
+
+Bison supports both traditional single-letter options and mnemonic long
+option names. Long option names are indicated with @samp{--} instead of
+@samp{-}. Abbreviations for option names are allowed as long as they
+are unique. When a long option takes an argument, like
+@samp{--file-prefix}, connect the option name and the argument with
+@samp{=}.
+
+Here is a list of options that can be used with Bison, alphabetized by
+short option. It is followed by a cross key alphabetized by long
+option.
+
+@table @samp
+@item -b @var{file-prefix}
+@itemx --file-prefix=@var{prefix}
+Specify a prefix to use for all Bison output file names. The names are
+chosen as if the input file were named @file{@var{prefix}.c}.
+
+@item -d
+@itemx --defines
+Write an extra output file containing macro definitions for the token
+type names defined in the grammar and the semantic value type
+@code{YYSTYPE}, as well as a few @code{extern} variable declarations.
+
+If the parser output file is named @file{@var{name}.c} then this file
+is named @file{@var{name}.h}.@refill
+
+This output file is essential if you wish to put the definition of
+@code{yylex} in a separate source file, because @code{yylex} needs to
+be able to refer to token type codes and the variable
+@code{yylval}. @xref{Token Values, ,Semantic Values of Tokens}.@refill
+
+@item -l
+@itemx --no-lines
+Don't put any @code{#line} preprocessor commands in the parser file.
+Ordinarily Bison puts them in the parser file so that the C compiler
+and debuggers will associate errors with your source file, the
+grammar file. This option causes them to associate errors with the
+parser file, treating it as an independent source file in its own right.
+
+@item -n
+@itemx --no-parser
+Do not include any C code in the parser file; generate tables only. The
+parser file contains just @code{#define} directives and static variable
+declarations.
+
+This option also tells Bison to write the C code for the grammar actions
+into a file named @file{@var{filename}.act}, in the form of a
+brace-surrounded body fit for a @code{switch} statement.
+
+@item -o @var{outfile}
+@itemx --output-file=@var{outfile}
+Specify the name @var{outfile} for the parser file.
+
+The other output files' names are constructed from @var{outfile}
+as described under the @samp{-v} and @samp{-d} options.
+
+@item -p @var{prefix}
+@itemx --name-prefix=@var{prefix}
+Rename the external symbols used in the parser so that they start with
+@var{prefix} instead of @samp{yy}. The precise list of symbols renamed
+is @code{yyparse}, @code{yylex}, @code{yyerror}, @code{yynerrs},
+@code{yylval}, @code{yychar} and @code{yydebug}.
+
+For example, if you use @samp{-p c}, the names become @code{cparse},
+@code{clex}, and so on.
+
+@xref{Multiple Parsers, ,Multiple Parsers in the Same Program}.
+
+@item -r
+@itemx --raw
+Pretend that @code{%raw} was specified. @xref{Decl Summary}.
+
+@item -t
+@itemx --debug
+Output a definition of the macro @code{YYDEBUG} into the parser file,
+so that the debugging facilities are compiled. @xref{Debugging, ,Debugging Your Parser}.
+
+@item -v
+@itemx --verbose
+Write an extra output file containing verbose descriptions of the
+parser states and what is done for each type of look-ahead token in
+that state.
+
+This file also describes all the conflicts, both those resolved by
+operator precedence and the unresolved ones.
+
+The file's name is made by removing @samp{.tab.c} or @samp{.c} from
+the parser output file name, and adding @samp{.output} instead.@refill
+
+Therefore, if the input file is @file{foo.y}, then the parser file is
+called @file{foo.tab.c} by default. As a consequence, the verbose
+output file is called @file{foo.output}.@refill
+
+@item -V
+@itemx --version
+Print the version number of Bison and exit.
+
+@item -h
+@itemx --help
+Print a summary of the command-line options to Bison and exit.
+
+@need 1750
+@item -y
+@itemx --yacc
+@itemx --fixed-output-files
+Equivalent to @samp{-o y.tab.c}; the parser output file is called
+@file{y.tab.c}, and the other outputs are called @file{y.output} and
+@file{y.tab.h}. The purpose of this option is to imitate Yacc's output
+file name conventions. Thus, the following shell script can substitute
+for Yacc:@refill
+
+@example
+bison -y $*
+@end example
+@end table
+
+@node Option Cross Key, VMS Invocation, Bison Options, Invocation
+@section Option Cross Key
+
+Here is a list of options, alphabetized by long option, to help you find
+the corresponding short option.
+
+@tex
+\def\leaderfill{\leaders\hbox to 1em{\hss.\hss}\hfill}
+
+{\tt
+\line{ --debug \leaderfill -t}
+\line{ --defines \leaderfill -d}
+\line{ --file-prefix \leaderfill -b}
+\line{ --fixed-output-files \leaderfill -y}
+\line{ --help \leaderfill -h}
+\line{ --name-prefix \leaderfill -p}
+\line{ --no-lines \leaderfill -l}
+\line{ --no-parser \leaderfill -n}
+\line{ --output-file \leaderfill -o}
+\line{ --raw \leaderfill -r}
+\line{ --token-table \leaderfill -k}
+\line{ --verbose \leaderfill -v}
+\line{ --version \leaderfill -V}
+\line{ --yacc \leaderfill -y}
+}
+@end tex
+
+@ifinfo
+@example
+--debug -t
+--defines -d
+--file-prefix=@var{prefix} -b @var{file-prefix}
+--fixed-output-files --yacc -y
+--help -h
+--name-prefix=@var{prefix} -p @var{name-prefix}
+--no-lines -l
+--no-parser -n
+--output-file=@var{outfile} -o @var{outfile}
+--raw -r
+--token-table -k
+--verbose -v
+--version -V
+@end example
+@end ifinfo
+
+@node VMS Invocation, , Option Cross Key, Invocation
+@section Invoking Bison under VMS
+@cindex invoking Bison under VMS
+@cindex VMS
+
+The command line syntax for Bison on VMS is a variant of the usual
+Bison command syntax---adapted to fit VMS conventions.
+
+To find the VMS equivalent for any Bison option, start with the long
+option, and substitute a @samp{/} for the leading @samp{--}, and
+substitute a @samp{_} for each @samp{-} in the name of the long option.
+For example, the following invocation under VMS:
+
+@example
+bison /debug/name_prefix=bar foo.y
+@end example
+
+@noindent
+is equivalent to the following command under POSIX.
+
+@example
+bison --debug --name-prefix=bar foo.y
+@end example
+
+The VMS file system does not permit filenames such as
+@file{foo.tab.c}. In the above example, the output file
+would instead be named @file{foo_tab.c}.
+
+@node Table of Symbols, Glossary, Invocation, Top
+@appendix Bison Symbols
+@cindex Bison symbols, table of
+@cindex symbols in Bison, table of
+
+@table @code
+@item error
+A token name reserved for error recovery. This token may be used in
+grammar rules so as to allow the Bison parser to recognize an error in
+the grammar without halting the process. In effect, a sentence
+containing an error may be recognized as valid. On a parse error, the
+token @code{error} becomes the current look-ahead token. Actions
+corresponding to @code{error} are then executed, and the look-ahead
+token is reset to the token that originally caused the violation.
+@xref{Error Recovery}.
+
+@item YYABORT
+Macro to pretend that an unrecoverable syntax error has occurred, by
+making @code{yyparse} return 1 immediately. The error reporting
+function @code{yyerror} is not called. @xref{Parser Function, ,The Parser Function @code{yyparse}}.
+
+@item YYACCEPT
+Macro to pretend that a complete utterance of the language has been
+read, by making @code{yyparse} return 0 immediately.
+@xref{Parser Function, ,The Parser Function @code{yyparse}}.
+
+@item YYBACKUP
+Macro to discard a value from the parser stack and fake a look-ahead
+token. @xref{Action Features, ,Special Features for Use in Actions}.
+
+@item YYERROR
+Macro to pretend that a syntax error has just been detected: call
+@code{yyerror} and then perform normal error recovery if possible
+(@pxref{Error Recovery}), or (if recovery is impossible) make
+@code{yyparse} return 1. @xref{Error Recovery}.
+
+@item YYERROR_VERBOSE
+Macro that you define with @code{#define} in the Bison declarations
+section to request verbose, specific error message strings when
+@code{yyerror} is called.
+
+@item YYINITDEPTH
+Macro for specifying the initial size of the parser stack.
+@xref{Stack Overflow}.
+
+@item YYLEX_PARAM
+Macro for specifying an extra argument (or list of extra arguments) for
+@code{yyparse} to pass to @code{yylex}. @xref{Pure Calling,, Calling
+Conventions for Pure Parsers}.
+
+@item YYLTYPE
+Macro for the data type of @code{yylloc}; a structure with four
+members. @xref{Token Positions, ,Textual Positions of Tokens}.
+
+@item yyltype
+Default value for YYLTYPE.
+
+@item YYMAXDEPTH
+Macro for specifying the maximum size of the parser stack.
+@xref{Stack Overflow}.
+
+@item YYPARSE_PARAM
+Macro for specifying the name of a parameter that @code{yyparse} should
+accept. @xref{Pure Calling,, Calling Conventions for Pure Parsers}.
+
+@item YYRECOVERING
+Macro whose value indicates whether the parser is recovering from a
+syntax error. @xref{Action Features, ,Special Features for Use in Actions}.
+
+@item YYSTYPE
+Macro for the data type of semantic values; @code{int} by default.
+@xref{Value Type, ,Data Types of Semantic Values}.
+
+@item yychar
+External integer variable that contains the integer value of the
+current look-ahead token. (In a pure parser, it is a local variable
+within @code{yyparse}.) Error-recovery rule actions may examine this
+variable. @xref{Action Features, ,Special Features for Use in Actions}.
+
+@item yyclearin
+Macro used in error-recovery rule actions. It clears the previous
+look-ahead token. @xref{Error Recovery}.
+
+@item yydebug
+External integer variable set to zero by default. If @code{yydebug}
+is given a nonzero value, the parser will output information on input
+symbols and parser action. @xref{Debugging, ,Debugging Your Parser}.
+
+@item yyerrok
+Macro to cause parser to recover immediately to its normal mode
+after a parse error. @xref{Error Recovery}.
+
+@item yyerror
+User-supplied function to be called by @code{yyparse} on error. The
+function receives one argument, a pointer to a character string
+containing an error message. @xref{Error Reporting, ,The Error Reporting Function @code{yyerror}}.
+
+@item yylex
+User-supplied lexical analyzer function, called with no arguments
+to get the next token. @xref{Lexical, ,The Lexical Analyzer Function @code{yylex}}.
+
+@item yylval
+External variable in which @code{yylex} should place the semantic
+value associated with a token. (In a pure parser, it is a local
+variable within @code{yyparse}, and its address is passed to
+@code{yylex}.) @xref{Token Values, ,Semantic Values of Tokens}.
+
+@item yylloc
+External variable in which @code{yylex} should place the line and
+column numbers associated with a token. (In a pure parser, it is a
+local variable within @code{yyparse}, and its address is passed to
+@code{yylex}.) You can ignore this variable if you don't use the
+@samp{@@} feature in the grammar actions. @xref{Token Positions, ,Textual Positions of Tokens}.
+
+@item yynerrs
+Global variable which Bison increments each time there is a parse
+error. (In a pure parser, it is a local variable within
+@code{yyparse}.) @xref{Error Reporting, ,The Error Reporting Function @code{yyerror}}.
+
+@item yyparse
+The parser function produced by Bison; call this function to start
+parsing. @xref{Parser Function, ,The Parser Function @code{yyparse}}.
+
+@item %left
+Bison declaration to assign left associativity to token(s).
+@xref{Precedence Decl, ,Operator Precedence}.
+
+@item %no_lines
+Bison declaration to avoid generating @code{#line} directives in the
+parser file. @xref{Decl Summary}.
+
+@item %nonassoc
+Bison declaration to assign nonassociativity to token(s).
+@xref{Precedence Decl, ,Operator Precedence}.
+
+@item %prec
+Bison declaration to assign a precedence to a specific rule.
+@xref{Contextual Precedence, ,Context-Dependent Precedence}.
+
+@item %pure_parser
+Bison declaration to request a pure (reentrant) parser.
+@xref{Pure Decl, ,A Pure (Reentrant) Parser}.
+
+@item %raw
+Bison declaration to use Bison internal token code numbers in token
+tables instead of the usual Yacc-compatible token code numbers.
+@xref{Decl Summary}.
+
+@item %right
+Bison declaration to assign right associativity to token(s).
+@xref{Precedence Decl, ,Operator Precedence}.
+
+@item %start
+Bison declaration to specify the start symbol. @xref{Start Decl, ,The Start-Symbol}.
+
+@item %token
+Bison declaration to declare token(s) without specifying precedence.
+@xref{Token Decl, ,Token Type Names}.
+
+@item %token_table
+Bison declaration to include a token name table in the parser file.
+@xref{Decl Summary}.
+
+@item %type
+Bison declaration to declare nonterminals. @xref{Type Decl, ,Nonterminal Symbols}.
+
+@item %union
+Bison declaration to specify several possible data types for semantic
+values. @xref{Union Decl, ,The Collection of Value Types}.
+@end table
+
+These are the punctuation and delimiters used in Bison input:
+
+@table @samp
+@item %%
+Delimiter used to separate the grammar rule section from the
+Bison declarations section or the additional C code section.
+@xref{Grammar Layout, ,The Overall Layout of a Bison Grammar}.
+
+@item %@{ %@}
+All code listed between @samp{%@{} and @samp{%@}} is copied directly
+to the output file uninterpreted. Such code forms the ``C
+declarations'' section of the input file. @xref{Grammar Outline, ,Outline of a Bison Grammar}.
+
+@item /*@dots{}*/
+Comment delimiters, as in C.
+
+@item :
+Separates a rule's result from its components. @xref{Rules, ,Syntax of Grammar Rules}.
+
+@item ;
+Terminates a rule. @xref{Rules, ,Syntax of Grammar Rules}.
+
+@item |
+Separates alternate rules for the same result nonterminal.
+@xref{Rules, ,Syntax of Grammar Rules}.
+@end table
+
+@node Glossary, Index, Table of Symbols, Top
+@appendix Glossary
+@cindex glossary
+
+@table @asis
+@item Backus-Naur Form (BNF)
+Formal method of specifying context-free grammars. BNF was first used
+in the @cite{ALGOL-60} report, 1963. @xref{Language and Grammar, ,Languages and Context-Free Grammars}.
+
+@item Context-free grammars
+Grammars specified as rules that can be applied regardless of context.
+Thus, if there is a rule which says that an integer can be used as an
+expression, integers are allowed @emph{anywhere} an expression is
+permitted. @xref{Language and Grammar, ,Languages and Context-Free Grammars}.
+
+@item Dynamic allocation
+Allocation of memory that occurs during execution, rather than at
+compile time or on entry to a function.
+
+@item Empty string
+Analogous to the empty set in set theory, the empty string is a
+character string of length zero.
+
+@item Finite-state stack machine
+A ``machine'' that has discrete states in which it is said to exist at
+each instant in time. As input to the machine is processed, the
+machine moves from state to state as specified by the logic of the
+machine. In the case of the parser, the input is the language being
+parsed, and the states correspond to various stages in the grammar
+rules. @xref{Algorithm, ,The Bison Parser Algorithm }.
+
+@item Grouping
+A language construct that is (in general) grammatically divisible;
+for example, `expression' or `declaration' in C.
+@xref{Language and Grammar, ,Languages and Context-Free Grammars}.
+
+@item Infix operator
+An arithmetic operator that is placed between the operands on which it
+performs some operation.
+
+@item Input stream
+A continuous flow of data between devices or programs.
+
+@item Language construct
+One of the typical usage schemas of the language. For example, one of
+the constructs of the C language is the @code{if} statement.
+@xref{Language and Grammar, ,Languages and Context-Free Grammars}.
+
+@item Left associativity
+Operators having left associativity are analyzed from left to right:
+@samp{a+b+c} first computes @samp{a+b} and then combines with
+@samp{c}. @xref{Precedence, ,Operator Precedence}.
+
+@item Left recursion
+A rule whose result symbol is also its first component symbol;
+for example, @samp{expseq1 : expseq1 ',' exp;}. @xref{Recursion, ,Recursive Rules}.
+
+@item Left-to-right parsing
+Parsing a sentence of a language by analyzing it token by token from
+left to right. @xref{Algorithm, ,The Bison Parser Algorithm }.
+
+@item Lexical analyzer (scanner)
+A function that reads an input stream and returns tokens one by one.
+@xref{Lexical, ,The Lexical Analyzer Function @code{yylex}}.
+
+@item Lexical tie-in
+A flag, set by actions in the grammar rules, which alters the way
+tokens are parsed. @xref{Lexical Tie-ins}.
+
+@item Literal string token
+A token which constists of two or more fixed characters.
+@xref{Symbols}.
+
+@item Look-ahead token
+A token already read but not yet shifted. @xref{Look-Ahead, ,Look-Ahead Tokens}.
+
+@item LALR(1)
+The class of context-free grammars that Bison (like most other parser
+generators) can handle; a subset of LR(1). @xref{Mystery Conflicts, ,
+Mysterious Reduce/Reduce Conflicts}.
+
+@item LR(1)
+The class of context-free grammars in which at most one token of
+look-ahead is needed to disambiguate the parsing of any piece of input.
+
+@item Nonterminal symbol
+A grammar symbol standing for a grammatical construct that can
+be expressed through rules in terms of smaller constructs; in other
+words, a construct that is not a token. @xref{Symbols}.
+
+@item Parse error
+An error encountered during parsing of an input stream due to invalid
+syntax. @xref{Error Recovery}.
+
+@item Parser
+A function that recognizes valid sentences of a language by analyzing
+the syntax structure of a set of tokens passed to it from a lexical
+analyzer.
+
+@item Postfix operator
+An arithmetic operator that is placed after the operands upon which it
+performs some operation.
+
+@item Reduction
+Replacing a string of nonterminals and/or terminals with a single
+nonterminal, according to a grammar rule. @xref{Algorithm, ,The Bison Parser Algorithm }.
+
+@item Reentrant
+A reentrant subprogram is a subprogram which can be in invoked any
+number of times in parallel, without interference between the various
+invocations. @xref{Pure Decl, ,A Pure (Reentrant) Parser}.
+
+@item Reverse polish notation
+A language in which all operators are postfix operators.
+
+@item Right recursion
+A rule whose result symbol is also its last component symbol;
+for example, @samp{expseq1: exp ',' expseq1;}. @xref{Recursion, ,Recursive Rules}.
+
+@item Semantics
+In computer languages, the semantics are specified by the actions
+taken for each instance of the language, i.e., the meaning of
+each statement. @xref{Semantics, ,Defining Language Semantics}.
+
+@item Shift
+A parser is said to shift when it makes the choice of analyzing
+further input from the stream rather than reducing immediately some
+already-recognized rule. @xref{Algorithm, ,The Bison Parser Algorithm }.
+
+@item Single-character literal
+A single character that is recognized and interpreted as is.
+@xref{Grammar in Bison, ,From Formal Rules to Bison Input}.
+
+@item Start symbol
+The nonterminal symbol that stands for a complete valid utterance in
+the language being parsed. The start symbol is usually listed as the
+first nonterminal symbol in a language specification.
+@xref{Start Decl, ,The Start-Symbol}.
+
+@item Symbol table
+A data structure where symbol names and associated data are stored
+during parsing to allow for recognition and use of existing
+information in repeated uses of a symbol. @xref{Multi-function Calc}.
+
+@item Token
+A basic, grammatically indivisible unit of a language. The symbol
+that describes a token in the grammar is a terminal symbol.
+The input of the Bison parser is a stream of tokens which comes from
+the lexical analyzer. @xref{Symbols}.
+
+@item Terminal symbol
+A grammar symbol that has no rules in the grammar and therefore
+is grammatically indivisible. The piece of text it represents
+is a token. @xref{Language and Grammar, ,Languages and Context-Free Grammars}.
+@end table
+
+@node Index, , Glossary, Top
+@unnumbered Index
+
+@printindex cp
+
+@contents
+
+@bye
+
+
+
+
+@c old menu
+
+* Introduction::
+* Conditions::
+* Copying:: The GNU General Public License says
+ how you can copy and share Bison
+
+Tutorial sections:
+* Concepts:: Basic concepts for understanding Bison.
+* Examples:: Three simple explained examples of using Bison.
+
+Reference sections:
+* Grammar File:: Writing Bison declarations and rules.
+* Interface:: C-language interface to the parser function @code{yyparse}.
+* Algorithm:: How the Bison parser works at run-time.
+* Error Recovery:: Writing rules for error recovery.
+* Context Dependency::What to do if your language syntax is too
+ messy for Bison to handle straightforwardly.
+* Debugging:: Debugging Bison parsers that parse wrong.
+* Invocation:: How to run Bison (to produce the parser source file).
+* Table of Symbols:: All the keywords of the Bison language are explained.
+* Glossary:: Basic concepts are explained.
+* Index:: Cross-references to the text.
+
diff --git a/tools/bison++/bison_pp.mak b/tools/bison++/bison_pp.mak
new file mode 100644
index 000000000..339705c15
--- /dev/null
+++ b/tools/bison++/bison_pp.mak
@@ -0,0 +1,369 @@
+ORIGIN = PWB
+ORIGIN_VER = 2.0
+PROJ = BISON_PP
+PROJFILE = BISON_PP.MAK
+DEBUG = 1
+
+CC = cl
+CFLAGS_G = /AL /W4 /Za /BATCH /Gt8 /DSTDC_HEADERS /DHAVE_STRERROR
+CFLAGS_D = /f /Od /Zi /Zr
+CFLAGS_R = /f- /Ot /Ol /Og /Oe /Oi /Gs
+CXX = cl
+CXXFLAGS_G = /W2 /BATCH
+CXXFLAGS_D = /f /Zi /Od
+CXXFLAGS_R = /f- /Ot /Oi /Ol /Oe /Og /Gs
+MAPFILE_D = NUL
+MAPFILE_R = NUL
+LFLAGS_G = /NOI /STACK:32000 /BATCH /ONERROR:NOEXE
+LFLAGS_D = /CO /FAR /PACKC
+LFLAGS_R = /EXE /FAR /PACKC
+LINKER = link
+ILINK = ilink
+LRF = echo > NUL
+ILFLAGS = /a /e
+RUNFLAGS = -dtv -o d:\tmp\test.cpp -h d:\tmp\test.h d:\tmp\test.y
+
+FILES = ALLOCATE.C CLOSURE.C DERIVES.C FILES.C GETARGS.C GETOPT.C GETOPT1.C\
+ GRAM.C LALR.C LEX.C MAIN.C NULLABLE.C OUTPUT.C PRINT.C READER.C\
+ REDUCE.C SYMTAB.C VERSION.C WARSHALL.C LR0.C CONFLICT.C
+OBJS = ALLOCATE.obj CLOSURE.obj DERIVES.obj FILES.obj GETARGS.obj GETOPT.obj\
+ GETOPT1.obj GRAM.obj LALR.obj LEX.obj MAIN.obj NULLABLE.obj OUTPUT.obj\
+ PRINT.obj READER.obj REDUCE.obj SYMTAB.obj VERSION.obj WARSHALL.obj\
+ LR0.obj CONFLICT.obj
+
+all: $(PROJ).exe
+
+.SUFFIXES:
+.SUFFIXES: .obj .c
+.SUFFIXES: .obj .c
+
+ALLOCATE.obj : ALLOCATE.C
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoALLOCATE.obj ALLOCATE.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoALLOCATE.obj ALLOCATE.C
+<<
+!ENDIF
+
+CLOSURE.obj : CLOSURE.C system.h machine.h new.h gram.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoCLOSURE.obj CLOSURE.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoCLOSURE.obj CLOSURE.C
+<<
+!ENDIF
+
+DERIVES.obj : DERIVES.C system.h new.h types.h gram.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoDERIVES.obj DERIVES.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoDERIVES.obj DERIVES.C
+<<
+!ENDIF
+
+FILES.obj : FILES.C system.h files.h new.h gram.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoFILES.obj FILES.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoFILES.obj FILES.C
+<<
+!ENDIF
+
+GETARGS.obj : GETARGS.C getopt.h system.h files.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoGETARGS.obj GETARGS.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoGETARGS.obj GETARGS.C
+<<
+!ENDIF
+
+GETOPT.obj : GETOPT.C getopt.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoGETOPT.obj GETOPT.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoGETOPT.obj GETOPT.C
+<<
+!ENDIF
+
+GETOPT1.obj : GETOPT1.C getopt.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoGETOPT1.obj GETOPT1.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoGETOPT1.obj GETOPT1.C
+<<
+!ENDIF
+
+GRAM.obj : GRAM.C
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoGRAM.obj GRAM.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoGRAM.obj GRAM.C
+<<
+!ENDIF
+
+LALR.obj : LALR.C system.h machine.h types.h state.h new.h gram.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoLALR.obj LALR.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoLALR.obj LALR.C
+<<
+!ENDIF
+
+LEX.obj : LEX.C system.h files.h symtab.h lex.h new.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoLEX.obj LEX.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoLEX.obj LEX.C
+<<
+!ENDIF
+
+MAIN.obj : MAIN.C system.h machine.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoMAIN.obj MAIN.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoMAIN.obj MAIN.C
+<<
+!ENDIF
+
+NULLABLE.obj : NULLABLE.C system.h types.h gram.h new.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoNULLABLE.obj NULLABLE.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoNULLABLE.obj NULLABLE.C
+<<
+!ENDIF
+
+OUTPUT.obj : OUTPUT.C system.h machine.h new.h files.h gram.h state.h symtab.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoOUTPUT.obj OUTPUT.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoOUTPUT.obj OUTPUT.C
+<<
+!ENDIF
+
+PRINT.obj : PRINT.C system.h machine.h new.h files.h gram.h state.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoPRINT.obj PRINT.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoPRINT.obj PRINT.C
+<<
+!ENDIF
+
+READER.obj : READER.C system.h files.h new.h symtab.h lex.h gram.h machine.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoREADER.obj READER.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoREADER.obj READER.C
+<<
+!ENDIF
+
+REDUCE.obj : REDUCE.C system.h files.h gram.h machine.h new.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoREDUCE.obj REDUCE.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoREDUCE.obj REDUCE.C
+<<
+!ENDIF
+
+SYMTAB.obj : SYMTAB.C system.h new.h symtab.h gram.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoSYMTAB.obj SYMTAB.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoSYMTAB.obj SYMTAB.C
+<<
+!ENDIF
+
+VERSION.obj : VERSION.C
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoVERSION.obj VERSION.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoVERSION.obj VERSION.C
+<<
+!ENDIF
+
+WARSHALL.obj : WARSHALL.C system.h machine.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoWARSHALL.obj WARSHALL.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoWARSHALL.obj WARSHALL.C
+<<
+!ENDIF
+
+LR0.obj : LR0.C system.h machine.h new.h gram.h state.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoLR0.obj LR0.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoLR0.obj LR0.C
+<<
+!ENDIF
+
+CONFLICT.obj : CONFLICT.C system.h machine.h new.h files.h gram.h state.h
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /FoCONFLICT.obj CONFLICT.C
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /FoCONFLICT.obj CONFLICT.C
+<<
+!ENDIF
+
+
+$(PROJ).exe : $(OBJS)
+!IF $(DEBUG)
+ $(LRF) @<<$(PROJ).lrf
+$(RT_OBJS: = +^
+) $(OBJS: = +^
+)
+$@
+$(MAPFILE_D)
+$(LIBS: = +^
+) +
+$(LLIBS_G: = +^
+) +
+$(LLIBS_D: = +^
+)
+$(DEF_FILE) $(LFLAGS_G) $(LFLAGS_D);
+<<
+!ELSE
+ $(LRF) @<<$(PROJ).lrf
+$(RT_OBJS: = +^
+) $(OBJS: = +^
+)
+$@
+$(MAPFILE_R)
+$(LIBS: = +^
+) +
+$(LLIBS_G: = +^
+) +
+$(LLIBS_R: = +^
+)
+$(DEF_FILE) $(LFLAGS_G) $(LFLAGS_R);
+<<
+!ENDIF
+ $(LINKER) @$(PROJ).lrf
+
+
+.c.obj :
+!IF $(DEBUG)
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_D) /Fo$@ $<
+<<
+!ELSE
+ @$(CC) @<<$(PROJ).rsp
+/c $(CFLAGS_G)
+$(CFLAGS_R) /Fo$@ $<
+<<
+!ENDIF
+
+
+run: $(PROJ).exe
+ $(PROJ).exe $(RUNFLAGS)
+
+debug: $(PROJ).exe
+ CV $(CVFLAGS) $(PROJ).exe $(RUNFLAGS)
+
+# << User_supplied_information >>
diff --git a/tools/bison++/build-stamp b/tools/bison++/build-stamp
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tools/bison++/build-stamp
diff --git a/tools/bison++/closure.cc b/tools/bison++/closure.cc
new file mode 100644
index 000000000..0a3a68446
--- /dev/null
+++ b/tools/bison++/closure.cc
@@ -0,0 +1,347 @@
+/* Subroutines for bison
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* subroutines of file LR0.c.
+
+Entry points:
+
+ closure (items, n)
+
+Given a vector of item numbers items, of length n,
+set up ruleset and itemset to indicate what rules could be run
+and which items could be accepted when those items are the active ones.
+
+ruleset contains a bit for each rule. closure sets the bits
+for all rules which could potentially describe the next input to be read.
+
+itemset is a vector of item numbers; itemsetend points to just beyond the end
+ of the part of it that is significant.
+closure places there the indices of all items which represent units of
+input that could arrive next.
+
+ initialize_closure (n)
+
+Allocates the itemset and ruleset vectors,
+and precomputes useful data so that closure can be called.
+n is the number of elements to allocate for itemset.
+
+ finalize_closure ()
+
+Frees itemset, ruleset and internal data.
+
+*/
+
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+#include "new.h"
+#include "gram.h"
+
+
+extern short **derives;
+extern char **tags;
+
+void set_fderives();
+void set_firsts();
+
+extern void RTC(unsigned* R, int n);
+
+short *itemset;
+short *itemsetend;
+static unsigned *ruleset;
+
+/* internal data. See comments before set_fderives and set_firsts. */
+static unsigned *fderives;
+static unsigned *firsts;
+
+/* number of words required to hold a bit for each rule */
+static int rulesetsize;
+
+/* number of words required to hold a bit for each variable */
+static int varsetsize;
+
+
+void
+initialize_closure(int n)
+{
+ itemset = NEW2(n, short);
+
+ rulesetsize = WORDSIZE(nrules + 1);
+ ruleset = NEW2(rulesetsize, unsigned);
+
+ set_fderives();
+}
+
+
+
+/* set fderives to an nvars by nrules matrix of bits
+ indicating which rules can help derive the beginning of the data
+ for each nonterminal. For example, if symbol 5 can be derived as
+ the sequence of symbols 8 3 20, and one of the rules for deriving
+ symbol 8 is rule 4, then the [5 - ntokens, 4] bit in fderives is set. */
+void
+set_fderives()
+{
+ register unsigned *rrow;
+ register unsigned *vrow;
+ register int j;
+ register unsigned cword;
+ register short *rp;
+ register int b;
+
+ int ruleno;
+ int i;
+
+ fderives = NEW2(nvars * rulesetsize, unsigned) - ntokens * rulesetsize;
+
+ set_firsts();
+
+ rrow = fderives + ntokens * rulesetsize;
+
+ for (i = ntokens; i < nsyms; i++)
+ {
+ vrow = firsts + ((i - ntokens) * varsetsize);
+ cword = *vrow++;
+ b = 0;
+ for (j = ntokens; j < nsyms; j++)
+ {
+ if (cword & (1 << b))
+ {
+ rp = derives[j];
+ while ((ruleno = *rp++) > 0)
+ {
+ SETBIT(rrow, ruleno);
+ }
+ }
+
+ b++;
+ if (b >= BITS_PER_WORD && j + 1 < nsyms)
+ {
+ cword = *vrow++;
+ b = 0;
+ }
+ }
+
+ rrow += rulesetsize;
+ }
+
+#ifdef DEBUG
+ print_fderives();
+#endif
+
+ FREE(firsts);
+}
+
+
+
+/* set firsts to be an nvars by nvars bit matrix indicating which items
+ can represent the beginning of the input corresponding to which other items.
+ For example, if some rule expands symbol 5 into the sequence of symbols 8 3 20,
+ the symbol 8 can be the beginning of the data for symbol 5,
+ so the bit [8 - ntokens, 5 - ntokens] in firsts is set. */
+void
+set_firsts()
+{
+ register unsigned *row;
+/* register int done; JF unused */
+ register int symbol;
+ register short *sp;
+ register int rowsize;
+
+ int i;
+
+ varsetsize = rowsize = WORDSIZE(nvars);
+
+ firsts = NEW2(nvars * rowsize, unsigned);
+
+ row = firsts;
+ for (i = ntokens; i < nsyms; i++)
+ {
+ sp = derives[i];
+ while (*sp >= 0)
+ {
+ symbol = ritem[rrhs[*sp++]];
+ if (ISVAR(symbol))
+ {
+ symbol -= ntokens;
+ SETBIT(row, symbol);
+ }
+ }
+
+ row += rowsize;
+ }
+
+ RTC(firsts, nvars);
+
+#ifdef DEBUG
+ print_firsts();
+#endif
+}
+
+
+void
+closure(short* core, int n)
+{
+ register int ruleno;
+ register unsigned word;
+ register short *csp;
+ register unsigned *dsp;
+ register unsigned *rsp;
+
+ short *csend;
+ unsigned *rsend;
+ int symbol;
+ int itemno;
+
+ rsp = ruleset;
+ rsend = ruleset + rulesetsize;
+ csend = core + n;
+
+ if (n == 0)
+ {
+ dsp = fderives + start_symbol * rulesetsize;
+ while (rsp < rsend)
+ *rsp++ = *dsp++;
+ }
+ else
+ {
+ while (rsp < rsend)
+ *rsp++ = 0;
+
+ csp = core;
+ while (csp < csend)
+ {
+ symbol = ritem[*csp++];
+ if (ISVAR(symbol))
+ {
+ dsp = fderives + symbol * rulesetsize;
+ rsp = ruleset;
+ while (rsp < rsend)
+ *rsp++ |= *dsp++;
+ }
+ }
+ }
+
+ ruleno = 0;
+ itemsetend = itemset;
+ csp = core;
+ rsp = ruleset;
+ while (rsp < rsend)
+ {
+ word = *rsp++;
+ if (word == 0)
+ {
+ ruleno += BITS_PER_WORD;
+ }
+ else
+ {
+ register int b;
+
+ for (b = 0; b < BITS_PER_WORD; b++)
+ {
+ if (word & (1 << b))
+ {
+ itemno = rrhs[ruleno];
+ while (csp < csend && *csp < itemno)
+ *itemsetend++ = *csp++;
+ *itemsetend++ = itemno;
+ }
+
+ ruleno++;
+ }
+ }
+ }
+
+ while (csp < csend)
+ *itemsetend++ = *csp++;
+
+#ifdef DEBUG
+ print_closure(n);
+#endif
+}
+
+
+void
+finalize_closure()
+{
+ FREE(itemset);
+ FREE(ruleset);
+ FREE(fderives + ntokens * rulesetsize);
+}
+
+
+
+#ifdef DEBUG
+
+print_closure(int n)
+{
+ register short *isp;
+
+ printf("\n\nn = %d\n\n", n);
+ for (isp = itemset; isp < itemsetend; isp++)
+ printf(" %d\n", *isp);
+}
+
+
+
+print_firsts()
+{
+ register int i;
+ register int j;
+ register unsigned *rowp;
+
+ printf("\n\n\nFIRSTS\n\n");
+
+ for (i = ntokens; i < nsyms; i++)
+ {
+ printf("\n\n%s firsts\n\n", tags[i]);
+
+ rowp = firsts + ((i - ntokens) * varsetsize);
+
+ for (j = 0; j < nvars; j++)
+ if (BITISSET (rowp, j))
+ printf(" %s\n", tags[j + ntokens]);
+ }
+}
+
+
+
+print_fderives()
+{
+ register int i;
+ register int j;
+ register unsigned *rp;
+
+ printf("\n\n\nFDERIVES\n");
+
+ for (i = ntokens; i < nsyms; i++)
+ {
+ printf("\n\n%s derives\n\n", tags[i]);
+ rp = fderives + i * rulesetsize;
+
+ for (j = 0; j <= nrules; j++)
+ if (BITISSET (rp, j))
+ printf(" %d\n", j);
+ }
+
+ fflush(stdout);
+}
+
+#endif
diff --git a/tools/bison++/config.log b/tools/bison++/config.log
new file mode 100644
index 000000000..9e62ad789
--- /dev/null
+++ b/tools/bison++/config.log
@@ -0,0 +1,577 @@
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by FULL-PACKAGE-NAME configure VERSION, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ $ ./configure --prefix=/usr --mandir=${prefix}/share/man --infodir=${prefix}/share/info
+
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = oxtan
+uname -m = i686
+uname -r = 2.4.20-pre5
+uname -s = Linux
+uname -v = #2 SMP Mon Sep 2 12:32:10 CEST 2002
+
+/usr/bin/uname -p = unknown
+/bin/uname -X = unknown
+
+/bin/arch = i686
+/usr/bin/arch -k = unknown
+/usr/convex/getsysinfo = unknown
+hostinfo = unknown
+/bin/machine = unknown
+/usr/bin/oslevel = unknown
+/bin/universe = unknown
+
+PATH: /usr/local/bin
+PATH: /usr/bin
+PATH: /bin
+PATH: /usr/bin/X11
+PATH: /usr/games
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+configure:1321: checking for a BSD-compatible install
+configure:1375: result: /usr/bin/install -c
+configure:1386: checking whether build environment is sane
+configure:1429: result: yes
+configure:1444: checking whether make sets $(MAKE)
+configure:1464: result: yes
+configure:1496: checking for working aclocal
+configure:1503: result: found
+configure:1511: checking for working autoconf
+configure:1518: result: found
+configure:1526: checking for working automake
+configure:1533: result: found
+configure:1541: checking for working autoheader
+configure:1548: result: found
+configure:1556: checking for working makeinfo
+configure:1563: result: found
+configure:1580: checking for gawk
+configure:1609: result: no
+configure:1580: checking for mawk
+configure:1596: found /usr/bin/mawk
+configure:1606: result: mawk
+configure:1668: checking for g++
+configure:1684: found /usr/bin/g++
+configure:1694: result: g++
+configure:1710: checking for C++ compiler version
+configure:1713: g++ --version </dev/null >&5
+g++ (GCC) 3.3 (Debian)
+Copyright (C) 2003 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+configure:1716: $? = 0
+configure:1718: g++ -v </dev/null >&5
+Reading specs from /usr/lib/gcc-lib/i386-linux/3.3/specs
+Configured with: ../src/configure -v --enable-languages=c,c++,java,f77,pascal,objc,ada,treelang --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-gxx-include-dir=/usr/include/c++/3.3 --enable-shared --with-system-zlib --enable-nls --without-included-gettext --enable-__cxa_atexit --enable-clocale=gnu --enable-debug --enable-java-gc=boehm --enable-java-awt=xlib --enable-objc-gc i386-linux
+Thread model: posix
+gcc version 3.3 (Debian)
+configure:1721: $? = 0
+configure:1723: g++ -V </dev/null >&5
+g++: `-V' option must have argument
+configure:1726: $? = 1
+configure:1750: checking for C++ compiler default output
+configure:1753: g++ conftest.cc >&5
+configure:1756: $? = 0
+configure:1802: result: a.out
+configure:1807: checking whether the C++ compiler works
+configure:1813: ./a.out
+configure:1816: $? = 0
+configure:1833: result: yes
+configure:1840: checking whether we are cross compiling
+configure:1842: result: no
+configure:1845: checking for suffix of executables
+configure:1847: g++ -o conftest conftest.cc >&5
+configure:1850: $? = 0
+configure:1875: result:
+configure:1881: checking for suffix of object files
+configure:1903: g++ -c conftest.cc >&5
+configure:1906: $? = 0
+configure:1928: result: o
+configure:1932: checking whether we are using the GNU C++ compiler
+configure:1957: g++ -c conftest.cc >&5
+configure:1960: $? = 0
+configure:1963: test -s conftest.o
+configure:1966: $? = 0
+configure:1979: result: yes
+configure:1985: checking whether g++ accepts -g
+configure:2007: g++ -c -g conftest.cc >&5
+configure:2010: $? = 0
+configure:2013: test -s conftest.o
+configure:2016: $? = 0
+configure:2027: result: yes
+configure:2071: g++ -c -g -O2 conftest.cc >&5
+configure:2074: $? = 0
+configure:2077: test -s conftest.o
+configure:2080: $? = 0
+configure:2107: g++ -c -g -O2 conftest.cc >&5
+configure: In function `int main()':
+configure:2104: error: `exit' undeclared (first use this function)
+configure:2104: error: (Each undeclared identifier is reported only once for
+ each function it appears in.)
+configure:2110: $? = 1
+configure: failed program was:
+| #line 2090 "configure"
+| /* confdefs.h. */
+|
+| #define PACKAGE_NAME "FULL-PACKAGE-NAME"
+| #define PACKAGE_TARNAME "full-package-name"
+| #define PACKAGE_VERSION "VERSION"
+| #define PACKAGE_STRING "FULL-PACKAGE-NAME VERSION"
+| #define PACKAGE_BUGREPORT "BUG-REPORT-ADDRESS"
+| #define PACKAGE "bison++"
+| #define VERSION "2.21.5"
+| /* end confdefs.h. */
+|
+| int
+| main ()
+| {
+| exit (42);
+| ;
+| return 0;
+| }
+configure:2071: g++ -c -g -O2 conftest.cc >&5
+configure:2074: $? = 0
+configure:2077: test -s conftest.o
+configure:2080: $? = 0
+configure:2107: g++ -c -g -O2 conftest.cc >&5
+configure:2110: $? = 0
+configure:2113: test -s conftest.o
+configure:2116: $? = 0
+configure:2185: checking for gcc
+configure:2201: found /usr/bin/gcc
+configure:2211: result: gcc
+configure:2455: checking for C compiler version
+configure:2458: gcc --version </dev/null >&5
+gcc (GCC) 3.3 (Debian)
+Copyright (C) 2003 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+configure:2461: $? = 0
+configure:2463: gcc -v </dev/null >&5
+Reading specs from /usr/lib/gcc-lib/i386-linux/3.3/specs
+Configured with: ../src/configure -v --enable-languages=c,c++,java,f77,pascal,objc,ada,treelang --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-gxx-include-dir=/usr/include/c++/3.3 --enable-shared --with-system-zlib --enable-nls --without-included-gettext --enable-__cxa_atexit --enable-clocale=gnu --enable-debug --enable-java-gc=boehm --enable-java-awt=xlib --enable-objc-gc i386-linux
+Thread model: posix
+gcc version 3.3 (Debian)
+configure:2466: $? = 0
+configure:2468: gcc -V </dev/null >&5
+gcc: `-V' option must have argument
+configure:2471: $? = 1
+configure:2474: checking whether we are using the GNU C compiler
+configure:2499: gcc -c conftest.c >&5
+configure:2502: $? = 0
+configure:2505: test -s conftest.o
+configure:2508: $? = 0
+configure:2521: result: yes
+configure:2527: checking whether gcc accepts -g
+configure:2549: gcc -c -g conftest.c >&5
+configure:2552: $? = 0
+configure:2555: test -s conftest.o
+configure:2558: $? = 0
+configure:2569: result: yes
+configure:2586: checking for gcc option to accept ANSI C
+configure:2647: gcc -c -g -O2 conftest.c >&5
+configure:2650: $? = 0
+configure:2653: test -s conftest.o
+configure:2656: $? = 0
+configure:2674: result: none needed
+configure:2692: gcc -c -g -O2 conftest.c >&5
+conftest.c:2: error: parse error before "me"
+configure:2695: $? = 1
+configure: failed program was:
+| #ifndef __cplusplus
+| choke me
+| #endif
+configure:2816: checking for a BSD-compatible install
+configure:2870: result: /usr/bin/install -c
+configure:2881: checking whether ln -s works
+configure:2885: result: yes
+configure:2901: checking how to run the C preprocessor
+configure:2937: gcc -E conftest.c
+configure:2943: $? = 0
+configure:2975: gcc -E conftest.c
+configure:2979:28: ac_nonexistent.h: No such file or directory
+configure:2981: $? = 1
+configure: failed program was:
+| #line 2966 "configure"
+| /* confdefs.h. */
+|
+| #define PACKAGE_NAME "FULL-PACKAGE-NAME"
+| #define PACKAGE_TARNAME "full-package-name"
+| #define PACKAGE_VERSION "VERSION"
+| #define PACKAGE_STRING "FULL-PACKAGE-NAME VERSION"
+| #define PACKAGE_BUGREPORT "BUG-REPORT-ADDRESS"
+| #define PACKAGE "bison++"
+| #define VERSION "2.21.5"
+| #ifdef __cplusplus
+| extern "C" void std::exit (int) throw (); using std::exit;
+| #endif
+| /* end confdefs.h. */
+| #include <ac_nonexistent.h>
+configure:3019: result: gcc -E
+configure:3044: gcc -E conftest.c
+configure:3050: $? = 0
+configure:3082: gcc -E conftest.c
+configure:3086:28: ac_nonexistent.h: No such file or directory
+configure:3088: $? = 1
+configure: failed program was:
+| #line 3073 "configure"
+| /* confdefs.h. */
+|
+| #define PACKAGE_NAME "FULL-PACKAGE-NAME"
+| #define PACKAGE_TARNAME "full-package-name"
+| #define PACKAGE_VERSION "VERSION"
+| #define PACKAGE_STRING "FULL-PACKAGE-NAME VERSION"
+| #define PACKAGE_BUGREPORT "BUG-REPORT-ADDRESS"
+| #define PACKAGE "bison++"
+| #define VERSION "2.21.5"
+| #ifdef __cplusplus
+| extern "C" void std::exit (int) throw (); using std::exit;
+| #endif
+| /* end confdefs.h. */
+| #include <ac_nonexistent.h>
+configure:3131: checking for egrep
+configure:3141: result: grep -E
+configure:3146: checking for ANSI C header files
+configure:3172: gcc -c -g -O2 conftest.c >&5
+configure:3175: $? = 0
+configure:3178: test -s conftest.o
+configure:3181: $? = 0
+configure:3270: gcc -o conftest -g -O2 conftest.c >&5
+configure:3273: $? = 0
+configure:3275: ./conftest
+configure:3278: $? = 0
+configure:3293: result: yes
+configure:3317: checking for sys/types.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for sys/stat.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for stdlib.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for string.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for memory.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for strings.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for inttypes.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for stdint.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3317: checking for unistd.h
+configure:3334: gcc -c -g -O2 conftest.c >&5
+configure:3337: $? = 0
+configure:3340: test -s conftest.o
+configure:3343: $? = 0
+configure:3354: result: yes
+configure:3386: checking alloca.h usability
+configure:3399: gcc -c -g -O2 conftest.c >&5
+configure:3402: $? = 0
+configure:3405: test -s conftest.o
+configure:3408: $? = 0
+configure:3418: result: yes
+configure:3422: checking alloca.h presence
+configure:3433: gcc -E conftest.c
+configure:3439: $? = 0
+configure:3458: result: yes
+configure:3494: checking for alloca.h
+configure:3501: result: yes
+configure:3386: checking malloc.h usability
+configure:3399: gcc -c -g -O2 conftest.c >&5
+configure:3402: $? = 0
+configure:3405: test -s conftest.o
+configure:3408: $? = 0
+configure:3418: result: yes
+configure:3422: checking malloc.h presence
+configure:3433: gcc -E conftest.c
+configure:3439: $? = 0
+configure:3458: result: yes
+configure:3494: checking for malloc.h
+configure:3501: result: yes
+configure:3377: checking for memory.h
+configure:3382: result: yes
+configure:3386: checking stddef.h usability
+configure:3399: gcc -c -g -O2 conftest.c >&5
+configure:3402: $? = 0
+configure:3405: test -s conftest.o
+configure:3408: $? = 0
+configure:3418: result: yes
+configure:3422: checking stddef.h presence
+configure:3433: gcc -E conftest.c
+configure:3439: $? = 0
+configure:3458: result: yes
+configure:3494: checking for stddef.h
+configure:3501: result: yes
+configure:3377: checking for stdlib.h
+configure:3382: result: yes
+configure:3377: checking for string.h
+configure:3382: result: yes
+configure:3377: checking for strings.h
+configure:3382: result: yes
+configure:3516: checking for an ANSI C-conforming const
+configure:3584: gcc -c -g -O2 conftest.c >&5
+configure:3587: $? = 0
+configure:3590: test -s conftest.o
+configure:3593: $? = 0
+configure:3604: result: yes
+configure:3614: checking for size_t
+configure:3639: gcc -c -g -O2 conftest.c >&5
+configure:3642: $? = 0
+configure:3645: test -s conftest.o
+configure:3648: $? = 0
+configure:3659: result: yes
+configure:3675: checking for working alloca.h
+configure:3697: gcc -o conftest -g -O2 conftest.c >&5
+configure:3700: $? = 0
+configure:3703: test -s conftest
+configure:3706: $? = 0
+configure:3717: result: yes
+configure:3727: checking for alloca
+configure:3769: gcc -o conftest -g -O2 conftest.c >&5
+configure:3772: $? = 0
+configure:3775: test -s conftest
+configure:3778: $? = 0
+configure:3789: result: yes
+configure:4001: checking for stdlib.h
+configure:4006: result: yes
+configure:4138: checking for GNU libc compatible malloc
+configure:4168: gcc -o conftest -g -O2 conftest.c >&5
+configure:4171: $? = 0
+configure:4173: ./conftest
+configure:4176: $? = 0
+configure:4190: result: yes
+configure:4342: creating ./config.status
+
+## ---------------------- ##
+## Running config.status. ##
+## ---------------------- ##
+
+This file was extended by FULL-PACKAGE-NAME config.status VERSION, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ CONFIG_FILES =
+ CONFIG_HEADERS =
+ CONFIG_LINKS =
+ CONFIG_COMMANDS =
+ $ ./config.status
+
+on oxtan
+
+config.status:621: creating Makefile
+
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+
+ac_cv_c_compiler_gnu=yes
+ac_cv_c_const=yes
+ac_cv_cxx_compiler_gnu=yes
+ac_cv_env_CC_set=
+ac_cv_env_CC_value=
+ac_cv_env_CFLAGS_set=
+ac_cv_env_CFLAGS_value=
+ac_cv_env_CPPFLAGS_set=
+ac_cv_env_CPPFLAGS_value=
+ac_cv_env_CPP_set=
+ac_cv_env_CPP_value=
+ac_cv_env_CXXFLAGS_set=
+ac_cv_env_CXXFLAGS_value=
+ac_cv_env_CXX_set=
+ac_cv_env_CXX_value=
+ac_cv_env_LDFLAGS_set=
+ac_cv_env_LDFLAGS_value=
+ac_cv_env_build_alias_set=
+ac_cv_env_build_alias_value=
+ac_cv_env_host_alias_set=
+ac_cv_env_host_alias_value=
+ac_cv_env_target_alias_set=
+ac_cv_env_target_alias_value=
+ac_cv_exeext=
+ac_cv_func_alloca_works=yes
+ac_cv_func_malloc_0_nonnull=yes
+ac_cv_header_alloca_h=yes
+ac_cv_header_inttypes_h=yes
+ac_cv_header_malloc_h=yes
+ac_cv_header_memory_h=yes
+ac_cv_header_stdc=yes
+ac_cv_header_stddef_h=yes
+ac_cv_header_stdint_h=yes
+ac_cv_header_stdlib_h=yes
+ac_cv_header_string_h=yes
+ac_cv_header_strings_h=yes
+ac_cv_header_sys_stat_h=yes
+ac_cv_header_sys_types_h=yes
+ac_cv_header_unistd_h=yes
+ac_cv_objext=o
+ac_cv_path_install='/usr/bin/install -c'
+ac_cv_prog_AWK=mawk
+ac_cv_prog_CPP='gcc -E'
+ac_cv_prog_ac_ct_CC=gcc
+ac_cv_prog_ac_ct_CXX=g++
+ac_cv_prog_cc_g=yes
+ac_cv_prog_cc_stdc=
+ac_cv_prog_cxx_g=yes
+ac_cv_prog_egrep='grep -E'
+ac_cv_prog_make_make_set=yes
+ac_cv_type_size_t=yes
+ac_cv_working_alloca_h=yes
+
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+
+ACLOCAL='aclocal'
+ALLOCA=''
+AUTOCONF='autoconf'
+AUTOHEADER='autoheader'
+AUTOMAKE='automake'
+AWK='mawk'
+CC='gcc'
+CFLAGS='-g -O2'
+CPP='gcc -E'
+CPPFLAGS=''
+CXX='g++'
+CXXFLAGS='-g -O2'
+DEFS='-DPACKAGE_NAME=\"FULL-PACKAGE-NAME\" -DPACKAGE_TARNAME=\"full-package-name\" -DPACKAGE_VERSION=\"VERSION\" -DPACKAGE_STRING=\"FULL-PACKAGE-NAME\ VERSION\" -DPACKAGE_BUGREPORT=\"BUG-REPORT-ADDRESS\" -DPACKAGE=\"bison++\" -DVERSION=\"2.21.5\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_ALLOCA_H=1 -DHAVE_MALLOC_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STDDEF_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_STRINGS_H=1 -DHAVE_ALLOCA_H=1 -DHAVE_ALLOCA=1 -DHAVE_STDLIB_H=1 -DHAVE_MALLOC=1 '
+ECHO_C=''
+ECHO_N='-n'
+ECHO_T=''
+EGREP='grep -E'
+EXEEXT=''
+INSTALL_DATA='${INSTALL} -m 644'
+INSTALL_PROGRAM='${INSTALL}'
+INSTALL_SCRIPT='${INSTALL}'
+LDFLAGS=''
+LIBOBJS=''
+LIBS=''
+LN_S='ln -s'
+LTLIBOBJS=''
+MAKEINFO='makeinfo'
+OBJEXT='o'
+PACKAGE='bison++'
+PACKAGE_BUGREPORT='BUG-REPORT-ADDRESS'
+PACKAGE_NAME='FULL-PACKAGE-NAME'
+PACKAGE_STRING='FULL-PACKAGE-NAME VERSION'
+PACKAGE_TARNAME='full-package-name'
+PACKAGE_VERSION='VERSION'
+PATH_SEPARATOR=':'
+SET_MAKE=''
+SHELL='/bin/sh'
+VERSION='2.21.5'
+ac_ct_CC='gcc'
+ac_ct_CXX='g++'
+bindir='${exec_prefix}/bin'
+build_alias=''
+datadir='${prefix}/share'
+exec_prefix='${prefix}'
+host_alias=''
+includedir='${prefix}/include'
+infodir='${prefix}/share/info'
+libdir='${exec_prefix}/lib'
+libexecdir='${exec_prefix}/libexec'
+localstatedir='${prefix}/var'
+mandir='${prefix}/share/man'
+oldincludedir='/usr/include'
+prefix='/usr'
+program_transform_name='s,x,x,'
+sbindir='${exec_prefix}/sbin'
+sharedstatedir='${prefix}/com'
+sysconfdir='${prefix}/etc'
+target_alias=''
+
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+
+#define HAVE_ALLOCA 1
+#define HAVE_ALLOCA_H 1
+#define HAVE_ALLOCA_H 1
+#define HAVE_INTTYPES_H 1
+#define HAVE_MALLOC 1
+#define HAVE_MALLOC_H 1
+#define HAVE_MEMORY_H 1
+#define HAVE_MEMORY_H 1
+#define HAVE_STDDEF_H 1
+#define HAVE_STDINT_H 1
+#define HAVE_STDLIB_H 1
+#define HAVE_STDLIB_H 1
+#define HAVE_STDLIB_H 1
+#define HAVE_STRINGS_H 1
+#define HAVE_STRINGS_H 1
+#define HAVE_STRING_H 1
+#define HAVE_STRING_H 1
+#define HAVE_SYS_STAT_H 1
+#define HAVE_SYS_TYPES_H 1
+#define HAVE_UNISTD_H 1
+#define PACKAGE "bison++"
+#define PACKAGE_BUGREPORT "BUG-REPORT-ADDRESS"
+#define PACKAGE_NAME "FULL-PACKAGE-NAME"
+#define PACKAGE_STRING "FULL-PACKAGE-NAME VERSION"
+#define PACKAGE_TARNAME "full-package-name"
+#define PACKAGE_VERSION "VERSION"
+#define STDC_HEADERS 1
+#define VERSION "2.21.5"
+#endif
+#ifdef __cplusplus
+extern "C" void std::exit (int) throw (); using std::exit;
+
+configure: exit 0
+
+## ---------------------- ##
+## Running config.status. ##
+## ---------------------- ##
+
+This file was extended by FULL-PACKAGE-NAME config.status VERSION, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ CONFIG_FILES = Makefile
+ CONFIG_HEADERS =
+ CONFIG_LINKS =
+ CONFIG_COMMANDS =
+ $ ./config.status
+
+on oxtan
+
+config.status:621: creating Makefile
diff --git a/tools/bison++/config.status b/tools/bison++/config.status
new file mode 100644
index 000000000..5be880d67
--- /dev/null
+++ b/tools/bison++/config.status
@@ -0,0 +1,695 @@
+#! /bin/sh
+# Generated by configure.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=${CONFIG_SHELL-/bin/sh}
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+
+# Support unset when possible.
+if (FOO=FOO; unset FOO) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5
+echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5
+echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;}
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+exec 6>&1
+
+# Open the log real soon, to keep \$[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling. Logging --version etc. is OK.
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+} >&5
+cat >&5 <<_CSEOF
+
+This file was extended by FULL-PACKAGE-NAME $as_me VERSION, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+_CSEOF
+echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5
+echo >&5
+config_files=" Makefile"
+
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+
+Configuration files:
+$config_files
+
+Report bugs to <bug-autoconf@gnu.org>."
+ac_cs_version="\
+FULL-PACKAGE-NAME config.status VERSION
+configured by ./configure, generated by GNU Autoconf 2.57,
+ with options \"'--prefix=/usr' '--mandir=\${prefix}/share/man' '--infodir=\${prefix}/share/info'\"
+
+Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+srcdir=.
+INSTALL="/usr/bin/install -c"
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "x$1" : 'x\([^=]*\)='`
+ ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ -*)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ *) # This is not an option, so the user has probably given explicit
+ # arguments.
+ ac_option=$1
+ ac_need_defaults=false;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --vers* | -V )
+ echo "$ac_cs_version"; exit 0 ;;
+ --he | --h)
+ # Conflict between --help and --header
+ { { echo "$as_me:$LINENO: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ echo "$ac_cs_usage"; exit 0 ;;
+ --debug | --d* | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+ ac_need_defaults=false;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1" ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+if $ac_cs_recheck; then
+ echo "running /bin/sh ./configure " '--prefix=/usr' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' $ac_configure_extra_args " --no-create --no-recursion" >&6
+ exec /bin/sh ./configure '--prefix=/usr' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' $ac_configure_extra_args --no-create --no-recursion
+fi
+
+for ac_config_target in $ac_config_targets
+do
+ case "$ac_config_target" in
+ # Handling of arguments.
+ "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason to put it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Create a temporary directory, and hook for its removal unless debugging.
+$debug ||
+{
+ trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./confstat$$-$RANDOM
+ (umask 077 && mkdir $tmp)
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+
+#
+# CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "$CONFIG_FILES"; then
+ # Protect against being on the right side of a sed subst in config.status.
+ sed 's/,@/@@/; s/@,/@@/; s/,;t t$/@;t t/; /@;t t$/s/[\\&,]/\\&/g;
+ s/@@/,@/; s/@@/@,/; s/@;t t$/,;t t/' >$tmp/subs.sed <<\CEOF
+s,@SHELL@,/bin/sh,;t t
+s,@PATH_SEPARATOR@,:,;t t
+s,@PACKAGE_NAME@,FULL-PACKAGE-NAME,;t t
+s,@PACKAGE_TARNAME@,full-package-name,;t t
+s,@PACKAGE_VERSION@,VERSION,;t t
+s,@PACKAGE_STRING@,FULL-PACKAGE-NAME VERSION,;t t
+s,@PACKAGE_BUGREPORT@,BUG-REPORT-ADDRESS,;t t
+s,@exec_prefix@,${prefix},;t t
+s,@prefix@,/usr,;t t
+s,@program_transform_name@,s,x,x,,;t t
+s,@bindir@,${exec_prefix}/bin,;t t
+s,@sbindir@,${exec_prefix}/sbin,;t t
+s,@libexecdir@,${exec_prefix}/libexec,;t t
+s,@datadir@,${prefix}/share,;t t
+s,@sysconfdir@,${prefix}/etc,;t t
+s,@sharedstatedir@,${prefix}/com,;t t
+s,@localstatedir@,${prefix}/var,;t t
+s,@libdir@,${exec_prefix}/lib,;t t
+s,@includedir@,${prefix}/include,;t t
+s,@oldincludedir@,/usr/include,;t t
+s,@infodir@,${prefix}/share/info,;t t
+s,@mandir@,${prefix}/share/man,;t t
+s,@build_alias@,,;t t
+s,@host_alias@,,;t t
+s,@target_alias@,,;t t
+s,@DEFS@,-DPACKAGE_NAME=\"FULL-PACKAGE-NAME\" -DPACKAGE_TARNAME=\"full-package-name\" -DPACKAGE_VERSION=\"VERSION\" -DPACKAGE_STRING=\"FULL-PACKAGE-NAME\ VERSION\" -DPACKAGE_BUGREPORT=\"BUG-REPORT-ADDRESS\" -DPACKAGE=\"bison++\" -DVERSION=\"2.21.5\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_ALLOCA_H=1 -DHAVE_MALLOC_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STDDEF_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_STRINGS_H=1 -DHAVE_ALLOCA_H=1 -DHAVE_ALLOCA=1 -DHAVE_STDLIB_H=1 -DHAVE_MALLOC=1 ,;t t
+s,@ECHO_C@,,;t t
+s,@ECHO_N@,-n,;t t
+s,@ECHO_T@,,;t t
+s,@LIBS@,,;t t
+s,@INSTALL_PROGRAM@,${INSTALL},;t t
+s,@INSTALL_SCRIPT@,${INSTALL},;t t
+s,@INSTALL_DATA@,${INSTALL} -m 644,;t t
+s,@PACKAGE@,bison++,;t t
+s,@VERSION@,2.21.5,;t t
+s,@ACLOCAL@,aclocal,;t t
+s,@AUTOCONF@,autoconf,;t t
+s,@AUTOMAKE@,automake,;t t
+s,@AUTOHEADER@,autoheader,;t t
+s,@MAKEINFO@,makeinfo,;t t
+s,@SET_MAKE@,,;t t
+s,@AWK@,mawk,;t t
+s,@CXX@,g++,;t t
+s,@CXXFLAGS@,-g -O2,;t t
+s,@LDFLAGS@,,;t t
+s,@CPPFLAGS@,,;t t
+s,@ac_ct_CXX@,g++,;t t
+s,@EXEEXT@,,;t t
+s,@OBJEXT@,o,;t t
+s,@CC@,gcc,;t t
+s,@CFLAGS@,-g -O2,;t t
+s,@ac_ct_CC@,gcc,;t t
+s,@LN_S@,ln -s,;t t
+s,@CPP@,gcc -E,;t t
+s,@EGREP@,grep -E,;t t
+s,@ALLOCA@,,;t t
+s,@LIBOBJS@,,;t t
+s,@LTLIBOBJS@,,;t t
+CEOF
+
+ # Split the substitutions into bite-sized pieces for seds with
+ # small command number limits, like on Digital OSF/1 and HP-UX.
+ ac_max_sed_lines=48
+ ac_sed_frag=1 # Number of current file.
+ ac_beg=1 # First line for current file.
+ ac_end=$ac_max_sed_lines # Line after last line for current file.
+ ac_more_lines=:
+ ac_sed_cmds=
+ while $ac_more_lines; do
+ if test $ac_beg -gt 1; then
+ sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ else
+ sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ fi
+ if test ! -s $tmp/subs.frag; then
+ ac_more_lines=false
+ else
+ # The purpose of the label and of the branching condition is to
+ # speed up the sed processing (if there are no `@' at all, there
+ # is no need to browse any of the substitutions).
+ # These are the two extra sed commands mentioned above.
+ (echo ':t
+ /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed"
+ else
+ ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed"
+ fi
+ ac_sed_frag=`expr $ac_sed_frag + 1`
+ ac_beg=$ac_end
+ ac_end=`expr $ac_end + $ac_max_sed_lines`
+ fi
+ done
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds=cat
+ fi
+fi # test -n "$CONFIG_FILES"
+
+for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case $ac_file in
+ - | *:- | *:-:* ) # input from stdin
+ cat >$tmp/stdin
+ ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ * ) ac_file_in=$ac_file.in ;;
+ esac
+
+ # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories.
+ ac_dir=`(dirname "$ac_file") 2>/dev/null ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ { if $as_mkdir_p; then
+ mkdir -p "$ac_dir"
+ else
+ as_dir="$ac_dir"
+ as_dirs=
+ while test ! -d "$as_dir"; do
+ as_dirs="$as_dir $as_dirs"
+ as_dir=`(dirname "$as_dir") 2>/dev/null ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ done
+ test ! -n "$as_dirs" || mkdir $as_dirs
+ fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5
+echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;}
+ { (exit 1); exit 1; }; }; }
+
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be
+# absolute.
+ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd`
+ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd`
+ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd`
+ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd`
+
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_builddir$INSTALL ;;
+ esac
+
+ if test x"$ac_file" != x-; then
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ rm -f "$ac_file"
+ fi
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ if test x"$ac_file" = x-; then
+ configure_input=
+ else
+ configure_input="$ac_file. "
+ fi
+ configure_input=$configure_input"Generated from `echo $ac_file_in |
+ sed 's,.*/,,'` by configure."
+
+ # First look for the input files in the build tree, otherwise in the
+ # src tree.
+ ac_file_inputs=`IFS=:
+ for f in $ac_file_in; do
+ case $f in
+ -) echo $tmp/stdin ;;
+ [\\/$]*)
+ # Absolute (can't be DOS-style, as IFS=:)
+ test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ echo $f;;
+ *) # Relative
+ if test -f "$f"; then
+ # Build tree
+ echo $f
+ elif test -f "$srcdir/$f"; then
+ # Source tree
+ echo $srcdir/$f
+ else
+ # /dev/null tree
+ { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ fi;;
+ esac
+ done` || { (exit 1); exit 1; }
+ sed "/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/;
+s/:*\${srcdir}:*/:/;
+s/:*@srcdir@:*/:/;
+s/^\([^=]*=[ ]*\):*/\1/;
+s/:*$//;
+s/^[^=]*=[ ]*$//;
+}
+
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s,@configure_input@,$configure_input,;t t
+s,@srcdir@,$ac_srcdir,;t t
+s,@abs_srcdir@,$ac_abs_srcdir,;t t
+s,@top_srcdir@,$ac_top_srcdir,;t t
+s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t
+s,@builddir@,$ac_builddir,;t t
+s,@abs_builddir@,$ac_abs_builddir,;t t
+s,@top_builddir@,$ac_top_builddir,;t t
+s,@abs_top_builddir@,$ac_abs_top_builddir,;t t
+s,@INSTALL@,$ac_INSTALL,;t t
+" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out
+ rm -f $tmp/stdin
+ if test x"$ac_file" != x-; then
+ mv $tmp/out $ac_file
+ else
+ cat $tmp/out
+ rm -f $tmp/out
+ fi
+
+done
+
+{ (exit 0); exit 0; }
diff --git a/tools/bison++/configure b/tools/bison++/configure
new file mode 100644
index 000000000..d204fce62
--- /dev/null
+++ b/tools/bison++/configure
@@ -0,0 +1,5115 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.57 for FULL-PACKAGE-NAME VERSION.
+#
+# Report bugs to <BUG-REPORT-ADDRESS>.
+#
+# Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002
+# Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+
+# Support unset when possible.
+if (FOO=FOO; unset FOO) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+exec 6>&1
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_config_libobj_dir=.
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Maximum number of lines to put in a shell here document.
+# This variable seems obsolete. It should probably be removed, and
+# only ac_max_sed_lines should be used.
+: ${ac_max_here_lines=38}
+
+# Identity of this package.
+PACKAGE_NAME='FULL-PACKAGE-NAME'
+PACKAGE_TARNAME='full-package-name'
+PACKAGE_VERSION='VERSION'
+PACKAGE_STRING='FULL-PACKAGE-NAME VERSION'
+PACKAGE_BUGREPORT='BUG-REPORT-ADDRESS'
+
+ac_unique_file="bison.cc"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#if HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#if HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#if STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# if HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#if HAVE_STRING_H
+# if !STDC_HEADERS && HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#if HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#else
+# if HAVE_STDINT_H
+# include <stdint.h>
+# endif
+#endif
+#if HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO SET_MAKE AWK CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX EXEEXT OBJEXT CC CFLAGS ac_ct_CC LN_S CPP EGREP ALLOCA LIBOBJS LTLIBOBJS'
+ac_subst_files=''
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datadir='${prefix}/share'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+libdir='${exec_prefix}/lib'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+infodir='${prefix}/info'
+mandir='${prefix}/man'
+
+ac_prev=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval "$ac_prev=\$ac_option"
+ ac_prev=
+ continue
+ fi
+
+ ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'`
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_option in
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
+ | --da=*)
+ datadir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ eval "enable_$ac_feature=no" ;;
+
+ -enable-* | --enable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ case $ac_option in
+ *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "enable_$ac_feature='$ac_optarg'" ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst \
+ | --locals | --local | --loca | --loc | --lo)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* \
+ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package| sed 's/-/_/g'`
+ case $ac_option in
+ *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "with_$ac_package='$ac_optarg'" ;;
+
+ -without-* | --without-*)
+ ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package | sed 's/-/_/g'`
+ eval "with_$ac_package=no" ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`
+ eval "$ac_envvar='$ac_optarg'"
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+# Be sure to have absolute paths.
+for ac_var in exec_prefix prefix
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* | NONE | '' ) ;;
+ *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# Be sure to have absolute paths.
+for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \
+ localstatedir libdir includedir oldincludedir infodir mandir
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) ;;
+ *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then its parent.
+ ac_confdir=`(dirname "$0") 2>/dev/null ||
+$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$0" : 'X\(//\)[^/]' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$0" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r $srcdir/$ac_unique_file; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r $srcdir/$ac_unique_file; then
+ if test "$ac_srcdir_defaulted" = yes; then
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2
+ { (exit 1); exit 1; }; }
+ else
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+ fi
+fi
+(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null ||
+ { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2
+ { (exit 1); exit 1; }; }
+srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'`
+ac_env_build_alias_set=${build_alias+set}
+ac_env_build_alias_value=$build_alias
+ac_cv_env_build_alias_set=${build_alias+set}
+ac_cv_env_build_alias_value=$build_alias
+ac_env_host_alias_set=${host_alias+set}
+ac_env_host_alias_value=$host_alias
+ac_cv_env_host_alias_set=${host_alias+set}
+ac_cv_env_host_alias_value=$host_alias
+ac_env_target_alias_set=${target_alias+set}
+ac_env_target_alias_value=$target_alias
+ac_cv_env_target_alias_set=${target_alias+set}
+ac_cv_env_target_alias_value=$target_alias
+ac_env_CXX_set=${CXX+set}
+ac_env_CXX_value=$CXX
+ac_cv_env_CXX_set=${CXX+set}
+ac_cv_env_CXX_value=$CXX
+ac_env_CXXFLAGS_set=${CXXFLAGS+set}
+ac_env_CXXFLAGS_value=$CXXFLAGS
+ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set}
+ac_cv_env_CXXFLAGS_value=$CXXFLAGS
+ac_env_LDFLAGS_set=${LDFLAGS+set}
+ac_env_LDFLAGS_value=$LDFLAGS
+ac_cv_env_LDFLAGS_set=${LDFLAGS+set}
+ac_cv_env_LDFLAGS_value=$LDFLAGS
+ac_env_CPPFLAGS_set=${CPPFLAGS+set}
+ac_env_CPPFLAGS_value=$CPPFLAGS
+ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set}
+ac_cv_env_CPPFLAGS_value=$CPPFLAGS
+ac_env_CC_set=${CC+set}
+ac_env_CC_value=$CC
+ac_cv_env_CC_set=${CC+set}
+ac_cv_env_CC_value=$CC
+ac_env_CFLAGS_set=${CFLAGS+set}
+ac_env_CFLAGS_value=$CFLAGS
+ac_cv_env_CFLAGS_set=${CFLAGS+set}
+ac_cv_env_CFLAGS_value=$CFLAGS
+ac_env_CPP_set=${CPP+set}
+ac_env_CPP_value=$CPP
+ac_cv_env_CPP_set=${CPP+set}
+ac_cv_env_CPP_value=$CPP
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures FULL-PACKAGE-NAME VERSION to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+_ACEOF
+
+ cat <<_ACEOF
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --datadir=DIR read-only architecture-independent data [PREFIX/share]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --infodir=DIR info documentation [PREFIX/info]
+ --mandir=DIR man documentation [PREFIX/man]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of FULL-PACKAGE-NAME VERSION:";;
+ esac
+ cat <<\_ACEOF
+
+Some influential environment variables:
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ CPPFLAGS C/C++ preprocessor flags, e.g. -I<include dir> if you have
+ headers in a nonstandard directory <include dir>
+ CC C compiler command
+ CFLAGS C compiler flags
+ CPP C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <BUG-REPORT-ADDRESS>.
+_ACEOF
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ ac_popdir=`pwd`
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d $ac_dir || continue
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be
+# absolute.
+ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd`
+ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd`
+ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd`
+ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd`
+
+ cd $ac_dir
+ # Check for guested configure; otherwise get Cygnus style configure.
+ if test -f $ac_srcdir/configure.gnu; then
+ echo
+ $SHELL $ac_srcdir/configure.gnu --help=recursive
+ elif test -f $ac_srcdir/configure; then
+ echo
+ $SHELL $ac_srcdir/configure --help=recursive
+ elif test -f $ac_srcdir/configure.ac ||
+ test -f $ac_srcdir/configure.in; then
+ echo
+ $ac_configure --help
+ else
+ echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi
+ cd $ac_popdir
+ done
+fi
+
+test -n "$ac_init_help" && exit 0
+if $ac_init_version; then
+ cat <<\_ACEOF
+FULL-PACKAGE-NAME configure VERSION
+generated by GNU Autoconf 2.57
+
+Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002
+Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit 0
+fi
+exec 5>config.log
+cat >&5 <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by FULL-PACKAGE-NAME $as_me VERSION, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+hostinfo = `(hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ echo "PATH: $as_dir"
+done
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_sep=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
+ ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'"
+ # Get rid of the leading space.
+ ac_sep=" "
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Be sure not to use single quotes in there, as some shells,
+# such as our DU 5.0 friend, will then `close' the trap.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+{
+ (set) 2>&1 |
+ case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ sed -n \
+ "s/'"'"'/'"'"'\\\\'"'"''"'"'/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p"
+ ;;
+ *)
+ sed -n \
+ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+ ;;
+ esac;
+}
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=$`echo $ac_var`
+ echo "$ac_var='"'"'$ac_val'"'"'"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------- ##
+## Output files. ##
+## ------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=$`echo $ac_var`
+ echo "$ac_var='"'"'$ac_val'"'"'"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ sed "/^$/d" confdefs.h | sort
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ echo "$as_me: caught signal $ac_signal"
+ echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core core.* *.core &&
+ rm -rf conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+ ' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -rf conftest* confdefs.h
+# AIX cpp loses on an empty file, so make sure it contains at least a newline.
+echo >confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer explicitly selected file to automatically selected ones.
+if test -z "$CONFIG_SITE"; then
+ if test "x$prefix" != xNONE; then
+ CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site"
+ else
+ CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
+ fi
+fi
+for ac_site_file in $CONFIG_SITE; do
+ if test -r "$ac_site_file"; then
+ { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . $cache_file;;
+ *) . ./$cache_file;;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in `(set) 2>&1 |
+ sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val="\$ac_cv_env_${ac_var}_value"
+ eval ac_new_val="\$ac_env_${ac_var}_value"
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ { echo "$as_me:$LINENO: former value: $ac_old_val" >&5
+echo "$as_me: former value: $ac_old_val" >&2;}
+ { echo "$as_me:$LINENO: current value: $ac_new_val" >&5
+echo "$as_me: current value: $ac_new_val" >&2;}
+ ac_cache_corrupted=:
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
+ ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ac_aux_dir=
+for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do
+ if test -f $ac_dir/install-sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f $ac_dir/install.sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f $ac_dir/shtool; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." >&5
+echo "$as_me: error: cannot find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+ac_config_guess="$SHELL $ac_aux_dir/config.guess"
+ac_config_sub="$SHELL $ac_aux_dir/config.sub"
+ac_configure="$SHELL $ac_aux_dir/configure" # This should be Cygnus configure.
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# ./install, which can be erroneously created by make from ./install.sh.
+echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+done
+
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. We don't cache a
+ # path for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the path is relative.
+ INSTALL=$ac_install_sh
+ fi
+fi
+echo "$as_me:$LINENO: result: $INSTALL" >&5
+echo "${ECHO_T}$INSTALL" >&6
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+echo "$as_me:$LINENO: checking whether build environment is sane" >&5
+echo $ECHO_N "checking whether build environment is sane... $ECHO_C" >&6
+# Just in case
+sleep 1
+echo timestamp > conftestfile
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt $srcdir/configure conftestfile 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t $srcdir/configure conftestfile`
+ fi
+ if test "$*" != "X $srcdir/configure conftestfile" \
+ && test "$*" != "X conftestfile $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ { { echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" >&5
+echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+ test "$2" = conftestfile
+ )
+then
+ # Ok.
+ :
+else
+ { { echo "$as_me:$LINENO: error: newly created file is older than distributed files!
+Check your system clock" >&5
+echo "$as_me: error: newly created file is older than distributed files!
+Check your system clock" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f conftest*
+echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+test "$program_prefix" != NONE &&
+ program_transform_name="s,^,$program_prefix,;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s,\$,$program_suffix,;$program_transform_name"
+# Double any \ or $. echo might interpret backslashes.
+# By default was `s,x,x', remove it if useless.
+cat <<\_ACEOF >conftest.sed
+s/[\\$]/&&/g;s/;s,x,x,$//
+_ACEOF
+program_transform_name=`echo $program_transform_name | sed -f conftest.sed`
+rm conftest.sed
+
+echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+echo $ECHO_N "checking whether ${MAKE-make} sets \$(MAKE)... $ECHO_C" >&6
+set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y,./+-,__p_,'`
+if eval "test \"\${ac_cv_prog_make_${ac_make}_set+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.make <<\_ACEOF
+all:
+ @echo 'ac_maketemp="$(MAKE)"'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+eval `${MAKE-make} -f conftest.make 2>/dev/null | grep temp=`
+if test -n "$ac_maketemp"; then
+ eval ac_cv_prog_make_${ac_make}_set=yes
+else
+ eval ac_cv_prog_make_${ac_make}_set=no
+fi
+rm -f conftest.make
+fi
+if eval "test \"`echo '$ac_cv_prog_make_'${ac_make}_set`\" = yes"; then
+ echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+ SET_MAKE=
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+
+PACKAGE=bison++
+
+VERSION=2.21.5
+
+if test "`cd $srcdir && pwd`" != "`pwd`" && test -f $srcdir/config.status; then
+ { { echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5
+echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+
+
+missing_dir=`cd $ac_aux_dir && pwd`
+echo "$as_me:$LINENO: checking for working aclocal" >&5
+echo $ECHO_N "checking for working aclocal... $ECHO_C" >&6
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (aclocal --version) < /dev/null > /dev/null 2>&1; then
+ ACLOCAL=aclocal
+ echo "$as_me:$LINENO: result: found" >&5
+echo "${ECHO_T}found" >&6
+else
+ ACLOCAL="$missing_dir/missing aclocal"
+ echo "$as_me:$LINENO: result: missing" >&5
+echo "${ECHO_T}missing" >&6
+fi
+
+echo "$as_me:$LINENO: checking for working autoconf" >&5
+echo $ECHO_N "checking for working autoconf... $ECHO_C" >&6
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (autoconf --version) < /dev/null > /dev/null 2>&1; then
+ AUTOCONF=autoconf
+ echo "$as_me:$LINENO: result: found" >&5
+echo "${ECHO_T}found" >&6
+else
+ AUTOCONF="$missing_dir/missing autoconf"
+ echo "$as_me:$LINENO: result: missing" >&5
+echo "${ECHO_T}missing" >&6
+fi
+
+echo "$as_me:$LINENO: checking for working automake" >&5
+echo $ECHO_N "checking for working automake... $ECHO_C" >&6
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (automake --version) < /dev/null > /dev/null 2>&1; then
+ AUTOMAKE=automake
+ echo "$as_me:$LINENO: result: found" >&5
+echo "${ECHO_T}found" >&6
+else
+ AUTOMAKE="$missing_dir/missing automake"
+ echo "$as_me:$LINENO: result: missing" >&5
+echo "${ECHO_T}missing" >&6
+fi
+
+echo "$as_me:$LINENO: checking for working autoheader" >&5
+echo $ECHO_N "checking for working autoheader... $ECHO_C" >&6
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (autoheader --version) < /dev/null > /dev/null 2>&1; then
+ AUTOHEADER=autoheader
+ echo "$as_me:$LINENO: result: found" >&5
+echo "${ECHO_T}found" >&6
+else
+ AUTOHEADER="$missing_dir/missing autoheader"
+ echo "$as_me:$LINENO: result: missing" >&5
+echo "${ECHO_T}missing" >&6
+fi
+
+echo "$as_me:$LINENO: checking for working makeinfo" >&5
+echo $ECHO_N "checking for working makeinfo... $ECHO_C" >&6
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (makeinfo --version) < /dev/null > /dev/null 2>&1; then
+ MAKEINFO=makeinfo
+ echo "$as_me:$LINENO: result: found" >&5
+echo "${ECHO_T}found" >&6
+else
+ MAKEINFO="$missing_dir/missing makeinfo"
+ echo "$as_me:$LINENO: result: missing" >&5
+echo "${ECHO_T}missing" >&6
+fi
+
+
+
+#AM_CONFIG_HEADER([config.h])
+
+# Checks for programs.
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_AWK+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AWK="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ echo "$as_me:$LINENO: result: $AWK" >&5
+echo "${ECHO_T}$AWK" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$AWK" && break
+done
+
+ac_ext=cc
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CXX+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ echo "$as_me:$LINENO: result: $CXX" >&5
+echo "${ECHO_T}$CXX" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5
+echo "${ECHO_T}$ac_ct_CXX" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$ac_ct_CXX" && break
+done
+test -n "$ac_ct_CXX" || ac_ct_CXX="g++"
+
+ CXX=$ac_ct_CXX
+fi
+
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO:" \
+ "checking for C++ compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5
+ (eval $ac_compiler --version </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v </dev/null >&5\"") >&5
+ (eval $ac_compiler -v </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V </dev/null >&5\"") >&5
+ (eval $ac_compiler -V </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+echo "$as_me:$LINENO: checking for C++ compiler default output" >&5
+echo $ECHO_N "checking for C++ compiler default output... $ECHO_C" >&6
+ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5
+ (eval $ac_link_default) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Find the output, starting from the most likely. This scheme is
+# not robust to junk in `.', hence go to wildcards (a.*) only as a last
+# resort.
+
+# Be careful to initialize this variable, since it used to be cached.
+# Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile.
+ac_cv_exeext=
+# b.out is created by i960 compilers.
+for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj )
+ ;;
+ conftest.$ac_ext )
+ # This is the source file.
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ # FIXME: I believe we export ac_cv_exeext for Libtool,
+ # but it would be cool to find out if it's true. Does anybody
+ # maintain Libtool? --akim.
+ export ac_cv_exeext
+ break;;
+ * )
+ break;;
+ esac
+done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: C++ compiler cannot create executables
+See \`config.log' for more details." >&5
+echo "$as_me: error: C++ compiler cannot create executables
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+
+ac_exeext=$ac_cv_exeext
+echo "$as_me:$LINENO: result: $ac_file" >&5
+echo "${ECHO_T}$ac_file" >&6
+
+# Check the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+echo "$as_me:$LINENO: checking whether the C++ compiler works" >&5
+echo $ECHO_N "checking whether the C++ compiler works... $ECHO_C" >&6
+# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
+# If not cross compiling, check that we can run a simple program.
+if test "$cross_compiling" != yes; then
+ if { ac_try='./$ac_file'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { echo "$as_me:$LINENO: error: cannot run C++ compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run C++ compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+fi
+echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+
+rm -f a.out a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+# Check the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
+echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6
+echo "$as_me:$LINENO: result: $cross_compiling" >&5
+echo "${ECHO_T}$cross_compiling" >&6
+
+echo "$as_me:$LINENO: checking for suffix of executables" >&5
+echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ export ac_cv_exeext
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest$ac_cv_exeext
+echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
+echo "${ECHO_T}$ac_cv_exeext" >&6
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+echo "$as_me:$LINENO: checking for suffix of object files" >&5
+echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6
+if test "${ac_cv_objext+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
+echo "${ECHO_T}$ac_cv_objext" >&6
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6
+if test "${ac_cv_cxx_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_compiler_gnu=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6
+GXX=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+CXXFLAGS="-g"
+echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5
+echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6
+if test "${ac_cv_prog_cxx_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cxx_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_prog_cxx_g=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+for ac_declaration in \
+ ''\
+ 'extern "C" void std::exit (int) throw (); using std::exit;' \
+ 'extern "C" void std::exit (int); using std::exit;' \
+ 'extern "C" void exit (int) throw ();' \
+ 'extern "C" void exit (int);' \
+ 'void exit (int);' \
+ '#include <stdlib.h>'
+do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+continue
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+rm -f conftest*
+if test -n "$ac_declaration"; then
+ echo '#ifdef __cplusplus' >>confdefs.h
+ echo $ac_declaration >>confdefs.h
+ echo '#endif' >>confdefs.h
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CC=$ac_ct_CC
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CC=$ac_ct_CC
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$ac_ct_CC" && break
+done
+
+ CC=$ac_ct_CC
+fi
+
+fi
+
+
+test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO:" \
+ "checking for C compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5
+ (eval $ac_compiler --version </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v </dev/null >&5\"") >&5
+ (eval $ac_compiler -v </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V </dev/null >&5\"") >&5
+ (eval $ac_compiler -V </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_compiler_gnu=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6
+GCC=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+CFLAGS="-g"
+echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_prog_cc_g=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_g" >&6
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+echo "$as_me:$LINENO: checking for $CC option to accept ANSI C" >&5
+echo $ECHO_N "checking for $CC option to accept ANSI C... $ECHO_C" >&6
+if test "${ac_cv_prog_cc_stdc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_prog_cc_stdc=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+# Don't try gcc -ansi; that turns off useful extensions and
+# breaks some systems' header files.
+# AIX -qlanglvl=ansi
+# Ultrix and OSF/1 -std1
+# HP-UX 10.20 and later -Ae
+# HP-UX older versions -Aa -D_HPUX_SOURCE
+# SVR4 -Xc -D__EXTENSIONS__
+for ac_arg in "" -qlanglvl=ansi -std1 -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_stdc=$ac_arg
+break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext
+done
+rm -f conftest.$ac_ext conftest.$ac_objext
+CC=$ac_save_CC
+
+fi
+
+case "x$ac_cv_prog_cc_stdc" in
+ x|xno)
+ echo "$as_me:$LINENO: result: none needed" >&5
+echo "${ECHO_T}none needed" >&6 ;;
+ *)
+ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_stdc" >&6
+ CC="$CC $ac_cv_prog_cc_stdc" ;;
+esac
+
+# Some people use a C++ compiler to compile C. Since we use `exit',
+# in C++ we need to declare it. In case someone uses the same compiler
+# for both compiling C and C++ we need to have the C++ compiler decide
+# the declaration of exit, since it's the most demanding environment.
+cat >conftest.$ac_ext <<_ACEOF
+#ifndef __cplusplus
+ choke me
+#endif
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ for ac_declaration in \
+ ''\
+ 'extern "C" void std::exit (int) throw (); using std::exit;' \
+ 'extern "C" void std::exit (int); using std::exit;' \
+ 'extern "C" void exit (int) throw ();' \
+ 'extern "C" void exit (int);' \
+ 'void exit (int);' \
+ '#include <stdlib.h>'
+do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+continue
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+rm -f conftest*
+if test -n "$ac_declaration"; then
+ echo '#ifdef __cplusplus' >>confdefs.h
+ echo $ac_declaration >>confdefs.h
+ echo '#endif' >>confdefs.h
+fi
+
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# ./install, which can be erroneously created by make from ./install.sh.
+echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+done
+
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. We don't cache a
+ # path for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the path is relative.
+ INSTALL=$ac_install_sh
+ fi
+fi
+echo "$as_me:$LINENO: result: $INSTALL" >&5
+echo "${ECHO_T}$INSTALL" >&6
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+echo "$as_me:$LINENO: checking whether ln -s works" >&5
+echo $ECHO_N "checking whether ln -s works... $ECHO_C" >&6
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+ echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+else
+ echo "$as_me:$LINENO: result: no, using $LN_S" >&5
+echo "${ECHO_T}no, using $LN_S" >&6
+fi
+
+# Checks for libraries.
+
+# Checks for header files.
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
+echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if test "${ac_cv_prog_CPP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether non-existent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+echo "$as_me:$LINENO: result: $CPP" >&5
+echo "${ECHO_T}$CPP" >&6
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether non-existent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ :
+else
+ { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&5
+echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+echo "$as_me:$LINENO: checking for egrep" >&5
+echo $ECHO_N "checking for egrep... $ECHO_C" >&6
+if test "${ac_cv_prog_egrep+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if echo a | (grep -E '(a|b)') >/dev/null 2>&1
+ then ac_cv_prog_egrep='grep -E'
+ else ac_cv_prog_egrep='egrep'
+ fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_egrep" >&5
+echo "${ECHO_T}$ac_cv_prog_egrep" >&6
+ EGREP=$ac_cv_prog_egrep
+
+
+echo "$as_me:$LINENO: checking for ANSI C header files" >&5
+echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6
+if test "${ac_cv_header_stdc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_header_stdc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_header_stdc=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ctype.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ exit(2);
+ exit (0);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_header_stdc=no
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
+echo "${ECHO_T}$ac_cv_header_stdc" >&6
+if test $ac_cv_header_stdc = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STDC_HEADERS 1
+_ACEOF
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+
+
+
+
+
+
+
+
+
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_Header=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_Header=no"
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+
+
+
+
+
+for ac_header in alloca.h malloc.h memory.h stddef.h stdlib.h string.h strings.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+else
+ # Is the header compilable?
+echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_header_compiler=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6
+
+# Is the header present?
+echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc in
+ yes:no )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+ no:yes )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ eval "$as_ac_Header=$ac_header_preproc"
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+# Checks for typedefs, structures, and compiler characteristics.
+echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5
+echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6
+if test "${ac_cv_c_const+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+/* FIXME: Include the comments suggested by Paul. */
+#ifndef __cplusplus
+ /* Ultrix mips cc rejects this. */
+ typedef int charset[2];
+ const charset x;
+ /* SunOS 4.1.1 cc rejects this. */
+ char const *const *ccp;
+ char **p;
+ /* NEC SVR4.0.2 mips cc rejects this. */
+ struct point {int x, y;};
+ static struct point const zero = {0,0};
+ /* AIX XL C 1.02.0.0 rejects this.
+ It does not let you subtract one const X* pointer from another in
+ an arm of an if-expression whose if-part is not a constant
+ expression */
+ const char *g = "string";
+ ccp = &g + (g ? g-g : 0);
+ /* HPUX 7.0 cc rejects these. */
+ ++ccp;
+ p = (char**) ccp;
+ ccp = (char const *const *) p;
+ { /* SCO 3.2v4 cc rejects this. */
+ char *t;
+ char const *s = 0 ? (char *) 0 : (char const *) 0;
+
+ *t++ = 0;
+ }
+ { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */
+ int x[] = {25, 17};
+ const int *foo = &x[0];
+ ++foo;
+ }
+ { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
+ typedef const int *iptr;
+ iptr p = 0;
+ ++p;
+ }
+ { /* AIX XL C 1.02.0.0 rejects this saying
+ "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
+ struct s { int j; const int *ap[3]; };
+ struct s *b; b->j = 5;
+ }
+ { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
+ const int foo = 10;
+ }
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_c_const=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_c_const=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5
+echo "${ECHO_T}$ac_cv_c_const" >&6
+if test $ac_cv_c_const = no; then
+
+cat >>confdefs.h <<\_ACEOF
+#define const
+_ACEOF
+
+fi
+
+echo "$as_me:$LINENO: checking for size_t" >&5
+echo $ECHO_N "checking for size_t... $ECHO_C" >&6
+if test "${ac_cv_type_size_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+if ((size_t *) 0)
+ return 0;
+if (sizeof (size_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_size_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_size_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5
+echo "${ECHO_T}$ac_cv_type_size_t" >&6
+if test $ac_cv_type_size_t = yes; then
+ :
+else
+
+cat >>confdefs.h <<_ACEOF
+#define size_t unsigned
+_ACEOF
+
+fi
+
+
+# Checks for library functions.
+# The Ultrix 4.2 mips builtin alloca declared by alloca.h only works
+# for constant arguments. Useless!
+echo "$as_me:$LINENO: checking for working alloca.h" >&5
+echo $ECHO_N "checking for working alloca.h... $ECHO_C" >&6
+if test "${ac_cv_working_alloca_h+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <alloca.h>
+int
+main ()
+{
+char *p = (char *) alloca (2 * sizeof (int));
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_working_alloca_h=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_working_alloca_h=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_working_alloca_h" >&5
+echo "${ECHO_T}$ac_cv_working_alloca_h" >&6
+if test $ac_cv_working_alloca_h = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ALLOCA_H 1
+_ACEOF
+
+fi
+
+echo "$as_me:$LINENO: checking for alloca" >&5
+echo $ECHO_N "checking for alloca... $ECHO_C" >&6
+if test "${ac_cv_func_alloca_works+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __GNUC__
+# define alloca __builtin_alloca
+#else
+# ifdef _MSC_VER
+# include <malloc.h>
+# define alloca _alloca
+# else
+# if HAVE_ALLOCA_H
+# include <alloca.h>
+# else
+# ifdef _AIX
+ #pragma alloca
+# else
+# ifndef alloca /* predefined by HP cc +Olibcalls */
+char *alloca ();
+# endif
+# endif
+# endif
+# endif
+#endif
+
+int
+main ()
+{
+char *p = (char *) alloca (1);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_alloca_works=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_func_alloca_works=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_alloca_works" >&5
+echo "${ECHO_T}$ac_cv_func_alloca_works" >&6
+
+if test $ac_cv_func_alloca_works = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ALLOCA 1
+_ACEOF
+
+else
+ # The SVR3 libPW and SVR4 libucb both contain incompatible functions
+# that cause trouble. Some versions do not even contain alloca or
+# contain a buggy version. If you still want to use their alloca,
+# use ar to extract alloca.o from them instead of compiling alloca.c.
+
+ALLOCA=alloca.$ac_objext
+
+cat >>confdefs.h <<\_ACEOF
+#define C_ALLOCA 1
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking whether \`alloca.c' needs Cray hooks" >&5
+echo $ECHO_N "checking whether \`alloca.c' needs Cray hooks... $ECHO_C" >&6
+if test "${ac_cv_os_cray+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#if defined(CRAY) && ! defined(CRAY2)
+webecray
+#else
+wenotbecray
+#endif
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "webecray" >/dev/null 2>&1; then
+ ac_cv_os_cray=yes
+else
+ ac_cv_os_cray=no
+fi
+rm -f conftest*
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_os_cray" >&5
+echo "${ECHO_T}$ac_cv_os_cray" >&6
+if test $ac_cv_os_cray = yes; then
+ for ac_func in _getb67 GETB67 getb67; do
+ as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define CRAY_STACKSEG_END $ac_func
+_ACEOF
+
+ break
+fi
+
+ done
+fi
+
+echo "$as_me:$LINENO: checking stack direction for C alloca" >&5
+echo $ECHO_N "checking stack direction for C alloca... $ECHO_C" >&6
+if test "${ac_cv_c_stack_direction+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$cross_compiling" = yes; then
+ ac_cv_c_stack_direction=0
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+int
+find_stack_direction ()
+{
+ static char *addr = 0;
+ auto char dummy;
+ if (addr == 0)
+ {
+ addr = &dummy;
+ return find_stack_direction ();
+ }
+ else
+ return (&dummy > addr) ? 1 : -1;
+}
+
+int
+main ()
+{
+ exit (find_stack_direction () < 0);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_c_stack_direction=1
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_c_stack_direction=-1
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_c_stack_direction" >&5
+echo "${ECHO_T}$ac_cv_c_stack_direction" >&6
+
+cat >>confdefs.h <<_ACEOF
+#define STACK_DIRECTION $ac_cv_c_stack_direction
+_ACEOF
+
+
+fi
+
+
+for ac_header in stdlib.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+else
+ # Is the header compilable?
+echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_header_compiler=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6
+
+# Is the header present?
+echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc in
+ yes:no )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+ no:yes )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ eval "$as_ac_Header=$ac_header_preproc"
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+echo "$as_me:$LINENO: checking for GNU libc compatible malloc" >&5
+echo $ECHO_N "checking for GNU libc compatible malloc... $ECHO_C" >&6
+if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$cross_compiling" = yes; then
+ ac_cv_func_malloc_0_nonnull=no
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#if STDC_HEADERS || HAVE_STDLIB_H
+# include <stdlib.h>
+#else
+char *malloc ();
+#endif
+
+int
+main ()
+{
+exit (malloc (0) ? 0 : 1);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_malloc_0_nonnull=yes
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_func_malloc_0_nonnull=no
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_malloc_0_nonnull" >&5
+echo "${ECHO_T}$ac_cv_func_malloc_0_nonnull" >&6
+if test $ac_cv_func_malloc_0_nonnull = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_MALLOC 1
+_ACEOF
+
+else
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MALLOC 0
+_ACEOF
+
+ LIBOBJS="$LIBOBJS malloc.$ac_objext"
+
+cat >>confdefs.h <<\_ACEOF
+#define malloc rpl_malloc
+_ACEOF
+
+fi
+
+
+
+
+ ac_config_files="$ac_config_files Makefile"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, don't put newlines in cache variables' values.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+{
+ (set) 2>&1 |
+ case `(ac_space=' '; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;;
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n \
+ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+ ;;
+ esac;
+} |
+ sed '
+ t clear
+ : clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ : end' >>confcache
+if diff $cache_file confcache >/dev/null 2>&1; then :; else
+ if test -w $cache_file; then
+ test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file"
+ cat confcache >$cache_file
+ else
+ echo "not updating unwritable cache $cache_file"
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/;
+s/:*\${srcdir}:*/:/;
+s/:*@srcdir@:*/:/;
+s/^\([^=]*=[ ]*\):*/\1/;
+s/:*$//;
+s/^[^=]*=[ ]*$//;
+}'
+fi
+
+# Transform confdefs.h into DEFS.
+# Protect against shell expansion while executing Makefile rules.
+# Protect against Makefile macro expansion.
+#
+# If the first sed substitution is executed (which looks for macros that
+# take arguments), then we branch to the quote section. Otherwise,
+# look for a macro that doesn't take arguments.
+cat >confdef2opt.sed <<\_ACEOF
+t clear
+: clear
+s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\),-D\1=\2,g
+t quote
+s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\),-D\1=\2,g
+t quote
+d
+: quote
+s,[ `~#$^&*(){}\\|;'"<>?],\\&,g
+s,\[,\\&,g
+s,\],\\&,g
+s,\$,$$,g
+p
+_ACEOF
+# We use echo to avoid assuming a particular line-breaking character.
+# The extra dot is to prevent the shell from consuming trailing
+# line-breaks from the sub-command output. A line-break within
+# single-quotes doesn't work because, if this script is created in a
+# platform that uses two characters for line-breaks (e.g., DOS), tr
+# would break.
+ac_LF_and_DOT=`echo; echo .`
+DEFS=`sed -n -f confdef2opt.sed confdefs.h | tr "$ac_LF_and_DOT" ' .'`
+rm -f confdef2opt.sed
+
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_i=`echo "$ac_i" |
+ sed 's/\$U\././;s/\.o$//;s/\.obj$//'`
+ # 2. Add them.
+ ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+
+# Support unset when possible.
+if (FOO=FOO; unset FOO) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5
+echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5
+echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;}
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+exec 6>&1
+
+# Open the log real soon, to keep \$[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling. Logging --version etc. is OK.
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+} >&5
+cat >&5 <<_CSEOF
+
+This file was extended by FULL-PACKAGE-NAME $as_me VERSION, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+_CSEOF
+echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5
+echo >&5
+_ACEOF
+
+# Files that config.status was made for.
+if test -n "$ac_config_files"; then
+ echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_headers"; then
+ echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_links"; then
+ echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_commands"; then
+ echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+
+Configuration files:
+$config_files
+
+Report bugs to <bug-autoconf@gnu.org>."
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+ac_cs_version="\\
+FULL-PACKAGE-NAME config.status VERSION
+configured by $0, generated by GNU Autoconf 2.57,
+ with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+srcdir=$srcdir
+INSTALL="$INSTALL"
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "x$1" : 'x\([^=]*\)='`
+ ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ -*)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ *) # This is not an option, so the user has probably given explicit
+ # arguments.
+ ac_option=$1
+ ac_need_defaults=false;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --vers* | -V )
+ echo "$ac_cs_version"; exit 0 ;;
+ --he | --h)
+ # Conflict between --help and --header
+ { { echo "$as_me:$LINENO: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ echo "$ac_cs_usage"; exit 0 ;;
+ --debug | --d* | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+ ac_need_defaults=false;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1" ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+if \$ac_cs_recheck; then
+ echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
+ exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+fi
+
+_ACEOF
+
+
+
+
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+for ac_config_target in $ac_config_targets
+do
+ case "$ac_config_target" in
+ # Handling of arguments.
+ "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason to put it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Create a temporary directory, and hook for its removal unless debugging.
+$debug ||
+{
+ trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./confstat$$-$RANDOM
+ (umask 077 && mkdir $tmp)
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+
+#
+# CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "\$CONFIG_FILES"; then
+ # Protect against being on the right side of a sed subst in config.status.
+ sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g;
+ s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF
+s,@SHELL@,$SHELL,;t t
+s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t
+s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t
+s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t
+s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t
+s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t
+s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t
+s,@exec_prefix@,$exec_prefix,;t t
+s,@prefix@,$prefix,;t t
+s,@program_transform_name@,$program_transform_name,;t t
+s,@bindir@,$bindir,;t t
+s,@sbindir@,$sbindir,;t t
+s,@libexecdir@,$libexecdir,;t t
+s,@datadir@,$datadir,;t t
+s,@sysconfdir@,$sysconfdir,;t t
+s,@sharedstatedir@,$sharedstatedir,;t t
+s,@localstatedir@,$localstatedir,;t t
+s,@libdir@,$libdir,;t t
+s,@includedir@,$includedir,;t t
+s,@oldincludedir@,$oldincludedir,;t t
+s,@infodir@,$infodir,;t t
+s,@mandir@,$mandir,;t t
+s,@build_alias@,$build_alias,;t t
+s,@host_alias@,$host_alias,;t t
+s,@target_alias@,$target_alias,;t t
+s,@DEFS@,$DEFS,;t t
+s,@ECHO_C@,$ECHO_C,;t t
+s,@ECHO_N@,$ECHO_N,;t t
+s,@ECHO_T@,$ECHO_T,;t t
+s,@LIBS@,$LIBS,;t t
+s,@INSTALL_PROGRAM@,$INSTALL_PROGRAM,;t t
+s,@INSTALL_SCRIPT@,$INSTALL_SCRIPT,;t t
+s,@INSTALL_DATA@,$INSTALL_DATA,;t t
+s,@PACKAGE@,$PACKAGE,;t t
+s,@VERSION@,$VERSION,;t t
+s,@ACLOCAL@,$ACLOCAL,;t t
+s,@AUTOCONF@,$AUTOCONF,;t t
+s,@AUTOMAKE@,$AUTOMAKE,;t t
+s,@AUTOHEADER@,$AUTOHEADER,;t t
+s,@MAKEINFO@,$MAKEINFO,;t t
+s,@SET_MAKE@,$SET_MAKE,;t t
+s,@AWK@,$AWK,;t t
+s,@CXX@,$CXX,;t t
+s,@CXXFLAGS@,$CXXFLAGS,;t t
+s,@LDFLAGS@,$LDFLAGS,;t t
+s,@CPPFLAGS@,$CPPFLAGS,;t t
+s,@ac_ct_CXX@,$ac_ct_CXX,;t t
+s,@EXEEXT@,$EXEEXT,;t t
+s,@OBJEXT@,$OBJEXT,;t t
+s,@CC@,$CC,;t t
+s,@CFLAGS@,$CFLAGS,;t t
+s,@ac_ct_CC@,$ac_ct_CC,;t t
+s,@LN_S@,$LN_S,;t t
+s,@CPP@,$CPP,;t t
+s,@EGREP@,$EGREP,;t t
+s,@ALLOCA@,$ALLOCA,;t t
+s,@LIBOBJS@,$LIBOBJS,;t t
+s,@LTLIBOBJS@,$LTLIBOBJS,;t t
+CEOF
+
+_ACEOF
+
+ cat >>$CONFIG_STATUS <<\_ACEOF
+ # Split the substitutions into bite-sized pieces for seds with
+ # small command number limits, like on Digital OSF/1 and HP-UX.
+ ac_max_sed_lines=48
+ ac_sed_frag=1 # Number of current file.
+ ac_beg=1 # First line for current file.
+ ac_end=$ac_max_sed_lines # Line after last line for current file.
+ ac_more_lines=:
+ ac_sed_cmds=
+ while $ac_more_lines; do
+ if test $ac_beg -gt 1; then
+ sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ else
+ sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ fi
+ if test ! -s $tmp/subs.frag; then
+ ac_more_lines=false
+ else
+ # The purpose of the label and of the branching condition is to
+ # speed up the sed processing (if there are no `@' at all, there
+ # is no need to browse any of the substitutions).
+ # These are the two extra sed commands mentioned above.
+ (echo ':t
+ /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed"
+ else
+ ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed"
+ fi
+ ac_sed_frag=`expr $ac_sed_frag + 1`
+ ac_beg=$ac_end
+ ac_end=`expr $ac_end + $ac_max_sed_lines`
+ fi
+ done
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds=cat
+ fi
+fi # test -n "$CONFIG_FILES"
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case $ac_file in
+ - | *:- | *:-:* ) # input from stdin
+ cat >$tmp/stdin
+ ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ * ) ac_file_in=$ac_file.in ;;
+ esac
+
+ # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories.
+ ac_dir=`(dirname "$ac_file") 2>/dev/null ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ { if $as_mkdir_p; then
+ mkdir -p "$ac_dir"
+ else
+ as_dir="$ac_dir"
+ as_dirs=
+ while test ! -d "$as_dir"; do
+ as_dirs="$as_dir $as_dirs"
+ as_dir=`(dirname "$as_dir") 2>/dev/null ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ done
+ test ! -n "$as_dirs" || mkdir $as_dirs
+ fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5
+echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;}
+ { (exit 1); exit 1; }; }; }
+
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be
+# absolute.
+ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd`
+ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd`
+ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd`
+ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd`
+
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_builddir$INSTALL ;;
+ esac
+
+ if test x"$ac_file" != x-; then
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ rm -f "$ac_file"
+ fi
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ if test x"$ac_file" = x-; then
+ configure_input=
+ else
+ configure_input="$ac_file. "
+ fi
+ configure_input=$configure_input"Generated from `echo $ac_file_in |
+ sed 's,.*/,,'` by configure."
+
+ # First look for the input files in the build tree, otherwise in the
+ # src tree.
+ ac_file_inputs=`IFS=:
+ for f in $ac_file_in; do
+ case $f in
+ -) echo $tmp/stdin ;;
+ [\\/$]*)
+ # Absolute (can't be DOS-style, as IFS=:)
+ test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ echo $f;;
+ *) # Relative
+ if test -f "$f"; then
+ # Build tree
+ echo $f
+ elif test -f "$srcdir/$f"; then
+ # Source tree
+ echo $srcdir/$f
+ else
+ # /dev/null tree
+ { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ fi;;
+ esac
+ done` || { (exit 1); exit 1; }
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ sed "$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s,@configure_input@,$configure_input,;t t
+s,@srcdir@,$ac_srcdir,;t t
+s,@abs_srcdir@,$ac_abs_srcdir,;t t
+s,@top_srcdir@,$ac_top_srcdir,;t t
+s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t
+s,@builddir@,$ac_builddir,;t t
+s,@abs_builddir@,$ac_abs_builddir,;t t
+s,@top_builddir@,$ac_top_builddir,;t t
+s,@abs_top_builddir@,$ac_abs_top_builddir,;t t
+s,@INSTALL@,$ac_INSTALL,;t t
+" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out
+ rm -f $tmp/stdin
+ if test x"$ac_file" != x-; then
+ mv $tmp/out $ac_file
+ else
+ cat $tmp/out
+ rm -f $tmp/out
+ fi
+
+done
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+
diff --git a/tools/bison++/configure-stamp b/tools/bison++/configure-stamp
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tools/bison++/configure-stamp
diff --git a/tools/bison++/configure.bat b/tools/bison++/configure.bat
new file mode 100644
index 000000000..f92b00ad5
--- /dev/null
+++ b/tools/bison++/configure.bat
@@ -0,0 +1,28 @@
+@echo off
+echo Configuring bison for go32
+rem This batch file assumes a unix-type "sed" program
+
+echo # Makefile generated by "configure.bat"> Makefile
+echo all.dos : bison >> Makefile
+
+if exist config.sed del config.sed
+
+echo "s/@srcdir@/./g ">> config.sed
+echo "s/@CC@/gcc/g ">> config.sed
+echo "s/@INSTALL@//g ">> config.sed
+echo "s/@INSTALL_PROGRAM@//g ">> config.sed
+echo "s/@INSTALL_DATA@//g ">> config.sed
+echo "s/@DEFS@/-DHAVE_STRERROR/g ">> config.sed
+echo "s/@LIBS@//g ">> config.sed
+echo "s/@ALLOCA@//g ">> config.sed
+
+echo "/^bison[ ]*:/,/-o/ { ">> config.sed
+echo " s/ \$(CC)/ >bison.rf/ ">> config.sed
+echo " /-o/ a\ ">> config.sed
+echo " $(CC) @bison.rf ">> config.sed
+echo "} ">> config.sed
+
+sed -e "s/^\"//" -e "s/\"$//" -e "s/[ ]*$//" config.sed > config2.sed
+sed -f config2.sed Makefile.in >> Makefile
+del config.sed
+del config2.sed
diff --git a/tools/bison++/configure.in b/tools/bison++/configure.in
new file mode 100644
index 000000000..8e0d4c9fc
--- /dev/null
+++ b/tools/bison++/configure.in
@@ -0,0 +1,32 @@
+# Process this file with autoconf to produce a configure script.
+PACKAGE=bison++
+FULL-PACKAGE-NAME=PACKAGE
+VERSION=2.21.5
+BUG-REPORT-ADDRESS=alain.coetmeur@caissedesdepots.fr
+AC_INIT(FULL-PACKAGE-NAME, VERSION, BUG-REPORT-ADDRESS)
+AM_INIT_AUTOMAKE(bison++, 2.21.5)
+AC_CONFIG_SRCDIR([bison.cc])
+#AM_CONFIG_HEADER([config.h])
+
+# Checks for programs.
+AC_PROG_AWK
+AC_PROG_CXX
+AC_PROG_CC
+AC_PROG_INSTALL
+AC_PROG_LN_S
+# Checks for libraries.
+
+# Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS([alloca.h malloc.h memory.h stddef.h stdlib.h string.h strings.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_SIZE_T
+
+# Checks for library functions.
+AC_FUNC_ALLOCA
+AC_FUNC_MALLOC
+
+AC_CONFIG_FILES([Makefile])
+AC_OUTPUT
diff --git a/tools/bison++/conflict.cc b/tools/bison++/conflict.cc
new file mode 100644
index 000000000..a8989b776
--- /dev/null
+++ b/tools/bison++/conflict.cc
@@ -0,0 +1,767 @@
+/* Find and resolve or report look-ahead conflicts for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifdef _AIX
+ #pragma alloca
+#endif
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+#include "new.h"
+#include "files.h"
+#include "gram.h"
+#include "state.h"
+
+
+
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+
+#elif defined (HAVE_ALLOCA_H)
+#include <alloca.h>
+
+#elif defined( _AIX)
+
+#elif defined( _MSDOS)
+
+#ifndef alloca
+#include <malloc.h>
+#define alloca _alloca
+#endif /* ndef alloca */
+
+#else /* not msdos */
+char *alloca ();
+
+#endif /* msdos ? */
+
+extern char **tags;
+extern int tokensetsize;
+extern char *consistent;
+extern short *accessing_symbol;
+extern shifts **shift_table;
+extern unsigned *LA;
+extern short *LAruleno;
+extern short *lookaheads;
+extern int verboseflag;
+
+void set_conflicts(int);
+void resolve_sr_conflict(int,int);
+void flush_shift(int,int);
+void log_resolution(int,int,int,char*);
+void total_conflicts();
+void count_sr_conflicts(int);
+void count_rr_conflicts(int);
+
+char any_conflicts;
+char *conflicts;
+errs **err_table;
+int expected_conflicts;
+
+
+static unsigned *shiftset;
+static unsigned *lookaheadset;
+static int src_total;
+static int rrc_total;
+static int src_count;
+static int rrc_count;
+
+
+void
+initialize_conflicts()
+{
+ register int i;
+/* register errs *sp; JF unused */
+
+ conflicts = NEW2(nstates, char);
+ shiftset = NEW2(tokensetsize, unsigned);
+ lookaheadset = NEW2(tokensetsize, unsigned);
+
+ err_table = NEW2(nstates, errs *);
+
+ any_conflicts = 0;
+
+ for (i = 0; i < nstates; i++)
+ set_conflicts(i);
+}
+
+
+void
+set_conflicts(int state)
+{
+ register int i;
+ register int k;
+ register shifts *shiftp;
+ register unsigned *fp2;
+ register unsigned *fp3;
+ register unsigned *fp4;
+ register unsigned *fp1;
+ register int symbol;
+
+ if (consistent[state]) return;
+
+ for (i = 0; i < tokensetsize; i++)
+ lookaheadset[i] = 0;
+
+ shiftp = shift_table[state];
+ if (shiftp)
+ {
+ k = shiftp->nshifts;
+ for (i = 0; i < k; i++)
+ {
+ symbol = accessing_symbol[shiftp->internalShifts[i]];
+ if (ISVAR(symbol)) break;
+ SETBIT(lookaheadset, symbol);
+ }
+ }
+
+ k = lookaheads[state + 1];
+ fp4 = lookaheadset + tokensetsize;
+
+ /* loop over all rules which require lookahead in this state */
+ /* first check for shift-reduce conflict, and try to resolve using precedence */
+
+ for (i = lookaheads[state]; i < k; i++)
+ if (rprec[LAruleno[i]])
+ {
+ fp1 = LA + i * tokensetsize;
+ fp2 = fp1;
+ fp3 = lookaheadset;
+
+ while (fp3 < fp4)
+ {
+ if (*fp2++ & *fp3++)
+ {
+ resolve_sr_conflict(state, i);
+ break;
+ }
+ }
+ }
+
+ /* loop over all rules which require lookahead in this state */
+ /* Check for conflicts not resolved above. */
+
+ for (i = lookaheads[state]; i < k; i++)
+ {
+ fp1 = LA + i * tokensetsize;
+ fp2 = fp1;
+ fp3 = lookaheadset;
+
+ while (fp3 < fp4)
+ {
+ if (*fp2++ & *fp3++)
+ {
+ conflicts[state] = 1;
+ any_conflicts = 1;
+ }
+ }
+
+ fp2 = fp1;
+ fp3 = lookaheadset;
+
+ while (fp3 < fp4)
+ *fp3++ |= *fp2++;
+ }
+}
+
+
+
+/* Attempt to resolve shift-reduce conflict for one rule
+by means of precedence declarations.
+It has already been checked that the rule has a precedence.
+A conflict is resolved by modifying the shift or reduce tables
+so that there is no longer a conflict. */
+
+void
+resolve_sr_conflict(int state, int lookaheadnum)
+{
+ register int i;
+ register int mask;
+ register unsigned *fp1;
+ register unsigned *fp2;
+ register int redprec;
+ /* Extra parens avoid errors on Ultrix 4.3. */
+ errs *errp = (errs *) alloca ((sizeof(errs) + ntokens * sizeof(short)));
+ short *errtokens = errp->internalErrs;
+
+ /* find the rule to reduce by to get precedence of reduction */
+ redprec = rprec[LAruleno[lookaheadnum]];
+
+ mask = 1;
+ fp1 = LA + lookaheadnum * tokensetsize;
+ fp2 = lookaheadset;
+ for (i = 0; i < ntokens; i++)
+ {
+ if ((mask & *fp2 & *fp1) && sprec[i])
+ /* Shift-reduce conflict occurs for token number i
+ and it has a precedence.
+ The precedence of shifting is that of token i. */
+ {
+ if (sprec[i] < redprec)
+ {
+ if (verboseflag) log_resolution(state, lookaheadnum, i, "reduce");
+ *fp2 &= ~mask; /* flush the shift for this token */
+ flush_shift(state, i);
+ }
+ else if (sprec[i] > redprec)
+ {
+ if (verboseflag) log_resolution(state, lookaheadnum, i, "shift");
+ *fp1 &= ~mask; /* flush the reduce for this token */
+ }
+ else
+ {
+ /* Matching precedence levels.
+ For left association, keep only the reduction.
+ For right association, keep only the shift.
+ For nonassociation, keep neither. */
+
+ switch (sassoc[i])
+ {
+
+ case RIGHT_ASSOC:
+ if (verboseflag) log_resolution(state, lookaheadnum, i, "shift");
+ break;
+
+ case LEFT_ASSOC:
+ if (verboseflag) log_resolution(state, lookaheadnum, i, "reduce");
+ break;
+
+ case NON_ASSOC:
+ if (verboseflag) log_resolution(state, lookaheadnum, i, "an error");
+ break;
+ }
+
+ if (sassoc[i] != RIGHT_ASSOC)
+ {
+ *fp2 &= ~mask; /* flush the shift for this token */
+ flush_shift(state, i);
+ }
+ if (sassoc[i] != LEFT_ASSOC)
+ {
+ *fp1 &= ~mask; /* flush the reduce for this token */
+ }
+ if (sassoc[i] == NON_ASSOC)
+ {
+ /* Record an explicit error for this token. */
+ *errtokens++ = i;
+ }
+ }
+ }
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ fp2++; fp1++;
+ }
+ }
+ errp->nerrs = errtokens - errp->internalErrs;
+ if (errp->nerrs)
+ {
+ /* Some tokens have been explicitly made errors. Allocate
+ a permanent errs structure for this state, to record them. */
+ i = (char *) errtokens - (char *) errp;
+ err_table[state] = (errs *) xmalloc ((unsigned int)i);
+ bcopy (errp, err_table[state], i);
+ }
+ else
+ err_table[state] = 0;
+}
+
+
+
+/* turn off the shift recorded for the specified token in the specified state.
+Used when we resolve a shift-reduce conflict in favor of the reduction. */
+
+void
+flush_shift(int state, int token)
+{
+ register shifts *shiftp;
+ register int k, i;
+/* register unsigned symbol; JF unused */
+
+ shiftp = shift_table[state];
+
+ if (shiftp)
+ {
+ k = shiftp->nshifts;
+ for (i = 0; i < k; i++)
+ {
+ if (shiftp->internalShifts[i] && token == accessing_symbol[shiftp->internalShifts[i]])
+ (shiftp->internalShifts[i]) = 0;
+ }
+ }
+}
+
+
+void
+log_resolution(int state, int LAno, int token, char* resolution)
+{
+ fprintf(foutput,
+ "Conflict in state %d between rule %d and token %s resolved as %s.\n",
+ state, LAruleno[LAno], tags[token], resolution);
+}
+
+
+void
+conflict_log()
+{
+ register int i;
+
+ src_total = 0;
+ rrc_total = 0;
+
+ for (i = 0; i < nstates; i++)
+ {
+ if (conflicts[i])
+ {
+ count_sr_conflicts(i);
+ count_rr_conflicts(i);
+ src_total += src_count;
+ rrc_total += rrc_count;
+ }
+ }
+
+ total_conflicts();
+}
+
+
+void
+verbose_conflict_log()
+{
+ register int i;
+
+ src_total = 0;
+ rrc_total = 0;
+
+ for (i = 0; i < nstates; i++)
+ {
+ if (conflicts[i])
+ {
+ count_sr_conflicts(i);
+ count_rr_conflicts(i);
+ src_total += src_count;
+ rrc_total += rrc_count;
+
+ fprintf(foutput, "State %d contains", i);
+
+ if (src_count == 1)
+ fprintf(foutput, " 1 shift/reduce conflict");
+ else if (src_count > 1)
+ fprintf(foutput, " %d shift/reduce conflicts", src_count);
+
+ if (src_count > 0 && rrc_count > 0)
+ fprintf(foutput, " and");
+
+ if (rrc_count == 1)
+ fprintf(foutput, " 1 reduce/reduce conflict");
+ else if (rrc_count > 1)
+ fprintf(foutput, " %d reduce/reduce conflicts", rrc_count);
+
+ putc('.', foutput);
+ putc('\n', foutput);
+ }
+ }
+
+ total_conflicts();
+}
+
+
+void
+total_conflicts()
+{
+ extern int fixed_outfiles;
+
+ if (src_total == expected_conflicts && rrc_total == 0)
+ return;
+
+ if (fixed_outfiles)
+ {
+ /* If invoked under the name `yacc', use the output format
+ specified by POSIX. */
+ fprintf(stderr, "conflicts: ");
+ if (src_total > 0)
+ fprintf(stderr, " %d shift/reduce", src_total);
+ if (src_total > 0 && rrc_total > 0)
+ fprintf(stderr, ",");
+ if (rrc_total > 0)
+ fprintf(stderr, " %d reduce/reduce", rrc_total);
+ putc('\n', stderr);
+ }
+ else
+ {
+ fprintf(stderr, "%s contains", infile);
+
+ if (src_total == 1)
+ fprintf(stderr, " 1 shift/reduce conflict");
+ else if (src_total > 1)
+ fprintf(stderr, " %d shift/reduce conflicts", src_total);
+
+ if (src_total > 0 && rrc_total > 0)
+ fprintf(stderr, " and");
+
+ if (rrc_total == 1)
+ fprintf(stderr, " 1 reduce/reduce conflict");
+ else if (rrc_total > 1)
+ fprintf(stderr, " %d reduce/reduce conflicts", rrc_total);
+
+ putc('.', stderr);
+ putc('\n', stderr);
+ }
+}
+
+
+void
+count_sr_conflicts(int state)
+{
+ register int i;
+ register int k;
+ register int mask;
+ register shifts *shiftp;
+ register unsigned *fp1;
+ register unsigned *fp2;
+ register unsigned *fp3;
+ register int symbol;
+
+ src_count = 0;
+
+ shiftp = shift_table[state];
+ if (!shiftp) return;
+
+ for (i = 0; i < tokensetsize; i++)
+ {
+ shiftset[i] = 0;
+ lookaheadset[i] = 0;
+ }
+
+ k = shiftp->nshifts;
+ for (i = 0; i < k; i++)
+ {
+ if (! shiftp->internalShifts[i]) continue;
+ symbol = accessing_symbol[shiftp->internalShifts[i]];
+ if (ISVAR(symbol)) break;
+ SETBIT(shiftset, symbol);
+ }
+
+ k = lookaheads[state + 1];
+ fp3 = lookaheadset + tokensetsize;
+
+ for (i = lookaheads[state]; i < k; i++)
+ {
+ fp1 = LA + i * tokensetsize;
+ fp2 = lookaheadset;
+
+ while (fp2 < fp3)
+ *fp2++ |= *fp1++;
+ }
+
+ fp1 = shiftset;
+ fp2 = lookaheadset;
+
+ while (fp2 < fp3)
+ *fp2++ &= *fp1++;
+
+ mask = 1;
+ fp2 = lookaheadset;
+ for (i = 0; i < ntokens; i++)
+ {
+ if (mask & *fp2)
+ src_count++;
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ fp2++;
+ }
+ }
+}
+
+
+void
+count_rr_conflicts(int state)
+{
+ register int i;
+ register int j;
+ register int count;
+ register unsigned mask;
+ register unsigned *baseword;
+ register unsigned *wordp;
+ register int m;
+ register int n;
+
+ rrc_count = 0;
+
+ m = lookaheads[state];
+ n = lookaheads[state + 1];
+
+ if (n - m < 2) return;
+
+ mask = 1;
+ baseword = LA + m * tokensetsize;
+ for (i = 0; i < ntokens; i++)
+ {
+ wordp = baseword;
+
+ count = 0;
+ for (j = m; j < n; j++)
+ {
+ if (mask & *wordp)
+ count++;
+
+ wordp += tokensetsize;
+ }
+
+ if (count >= 2) rrc_count++;
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ baseword++;
+ }
+ }
+}
+
+
+void
+print_reductions(int state)
+{
+ register int i;
+ register int j;
+ register int k;
+ register unsigned *fp1;
+ register unsigned *fp2;
+ register unsigned *fp3;
+ register unsigned *fp4;
+ register int rule;
+ register int symbol;
+ register unsigned mask;
+ register int m;
+ register int n;
+ register int default_LA;
+ register int default_rule;
+ register int cmax;
+ register int count;
+ register shifts *shiftp;
+ register errs *errp;
+ int nodefault = 0;
+
+ for (i = 0; i < tokensetsize; i++)
+ shiftset[i] = 0;
+
+ shiftp = shift_table[state];
+ if (shiftp)
+ {
+ k = shiftp->nshifts;
+ for (i = 0; i < k; i++)
+ {
+ if (! shiftp->internalShifts[i]) continue;
+ symbol = accessing_symbol[shiftp->internalShifts[i]];
+ if (ISVAR(symbol)) break;
+ /* if this state has a shift for the error token,
+ don't use a default rule. */
+ if (symbol == error_token_number) nodefault = 1;
+ SETBIT(shiftset, symbol);
+ }
+ }
+
+ errp = err_table[state];
+ if (errp)
+ {
+ k = errp->nerrs;
+ for (i = 0; i < k; i++)
+ {
+ if (! errp->internalErrs[i]) continue;
+ symbol = errp->internalErrs[i];
+ SETBIT(shiftset, symbol);
+ }
+ }
+
+ m = lookaheads[state];
+ n = lookaheads[state + 1];
+
+ if (n - m == 1 && ! nodefault)
+ {
+ default_rule = LAruleno[m];
+
+ fp1 = LA + m * tokensetsize;
+ fp2 = shiftset;
+ fp3 = lookaheadset;
+ fp4 = lookaheadset + tokensetsize;
+
+ while (fp3 < fp4)
+ *fp3++ = *fp1++ & *fp2++;
+
+ mask = 1;
+ fp3 = lookaheadset;
+
+ for (i = 0; i < ntokens; i++)
+ {
+ if (mask & *fp3)
+ fprintf(foutput, " %-4s\t[reduce using rule %d (%s)]\n",
+ tags[i], default_rule, tags[rlhs[default_rule]]);
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ fp3++;
+ }
+ }
+
+ fprintf(foutput, " $default\treduce using rule %d (%s)\n\n",
+ default_rule, tags[rlhs[default_rule]]);
+ }
+ else if (n - m >= 1)
+ {
+ cmax = 0;
+ default_LA = -1;
+ fp4 = lookaheadset + tokensetsize;
+
+ if (! nodefault)
+ for (i = m; i < n; i++)
+ {
+ fp1 = LA + i * tokensetsize;
+ fp2 = shiftset;
+ fp3 = lookaheadset;
+
+ while (fp3 < fp4)
+ *fp3++ = *fp1++ & ( ~ (*fp2++));
+
+ count = 0;
+ mask = 1;
+ fp3 = lookaheadset;
+ for (j = 0; j < ntokens; j++)
+ {
+ if (mask & *fp3)
+ count++;
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ fp3++;
+ }
+ }
+
+ if (count > cmax)
+ {
+ cmax = count;
+ default_LA = i;
+ default_rule = LAruleno[i];
+ }
+
+ fp2 = shiftset;
+ fp3 = lookaheadset;
+
+ while (fp3 < fp4)
+ *fp2++ |= *fp3++;
+ }
+
+ for (i = 0; i < tokensetsize; i++)
+ shiftset[i] = 0;
+
+ if (shiftp)
+ {
+ k = shiftp->nshifts;
+ for (i = 0; i < k; i++)
+ {
+ if (! shiftp->internalShifts[i]) continue;
+ symbol = accessing_symbol[shiftp->internalShifts[i]];
+ if (ISVAR(symbol)) break;
+ SETBIT(shiftset, symbol);
+ }
+ }
+
+ mask = 1;
+ fp1 = LA + m * tokensetsize;
+ fp2 = shiftset;
+ for (i = 0; i < ntokens; i++)
+ {
+ int defaulted = 0;
+
+ if (mask & *fp2)
+ count = 1;
+ else
+ count = 0;
+
+ fp3 = fp1;
+ for (j = m; j < n; j++)
+ {
+ if (mask & *fp3)
+ {
+ if (count == 0)
+ {
+ if (j != default_LA)
+ {
+ rule = LAruleno[j];
+ fprintf(foutput, " %-4s\treduce using rule %d (%s)\n",
+ tags[i], rule, tags[rlhs[rule]]);
+ }
+ else defaulted = 1;
+
+ count++;
+ }
+ else
+ {
+ if (defaulted)
+ {
+ rule = LAruleno[default_LA];
+ fprintf(foutput, " %-4s\treduce using rule %d (%s)\n",
+ tags[i], rule, tags[rlhs[rule]]);
+ defaulted = 0;
+ }
+ rule = LAruleno[j];
+ fprintf(foutput, " %-4s\t[reduce using rule %d (%s)]\n",
+ tags[i], rule, tags[rlhs[rule]]);
+ }
+ }
+
+ fp3 += tokensetsize;
+ }
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ /* This used to be fp1, but I think fp2 is right
+ because fp2 is where the words are fetched to test with mask
+ in this loop. */
+ fp2++;
+ }
+ }
+
+ if (default_LA >= 0)
+ {
+ fprintf(foutput, " $default\treduce using rule %d (%s)\n",
+ default_rule, tags[rlhs[default_rule]]);
+ }
+
+ putc('\n', foutput);
+ }
+}
+
+
+void
+finalize_conflicts()
+{
+ FREE(conflicts);
+ FREE(shiftset);
+ FREE(lookaheadset);
+}
diff --git a/tools/bison++/derives.cc b/tools/bison++/derives.cc
new file mode 100644
index 000000000..f7dfaf736
--- /dev/null
+++ b/tools/bison++/derives.cc
@@ -0,0 +1,118 @@
+/* Match rules with nonterminals for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* set_derives finds, for each variable (nonterminal), which rules can derive it.
+ It sets up the value of derives so that
+ derives[i - ntokens] points to a vector of rule numbers,
+ terminated with -1. */
+
+#include <stdio.h>
+#include "system.h"
+#include "new.h"
+#include "types.h"
+#include "gram.h"
+
+
+short **derives;
+
+void
+set_derives()
+{
+ register int i;
+ register int lhs;
+ register shorts *p;
+ register short *q;
+ register shorts **dset;
+ register shorts *delts;
+
+ dset = NEW2(nvars, shorts *) - ntokens;
+ delts = NEW2(nrules + 1, shorts);
+
+ p = delts;
+ for (i = nrules; i > 0; i--)
+ {
+ lhs = rlhs[i];
+ if (lhs >= 0)
+ {
+ p->next = dset[lhs];
+ p->value = i;
+ dset[lhs] = p;
+ p++;
+ }
+ }
+
+ derives = NEW2(nvars, short *) - ntokens;
+ q = NEW2(nvars + nrules, short);
+
+ for (i = ntokens; i < nsyms; i++)
+ {
+ derives[i] = q;
+ p = dset[i];
+ while (p)
+ {
+ *q++ = p->value;
+ p = p->next;
+ }
+ *q++ = -1;
+ }
+
+#ifdef DEBUG
+ print_derives();
+#endif
+
+ FREE(dset + ntokens);
+ FREE(delts);
+}
+
+void
+free_derives()
+{
+ FREE(derives[ntokens]);
+ FREE(derives + ntokens);
+}
+
+
+
+#ifdef DEBUG
+
+print_derives()
+{
+ register int i;
+ register short *sp;
+
+ extern char **tags;
+
+ printf("\n\n\nDERIVES\n\n");
+
+ for (i = ntokens; i < nsyms; i++)
+ {
+ printf("%s derives", tags[i]);
+ for (sp = derives[i]; *sp > 0; sp++)
+ {
+ printf(" %d", *sp);
+ }
+ putchar('\n');
+ }
+
+ putchar('\n');
+}
+
+#endif
+
diff --git a/tools/bison++/files.cc b/tools/bison++/files.cc
new file mode 100644
index 000000000..04b731519
--- /dev/null
+++ b/tools/bison++/files.cc
@@ -0,0 +1,403 @@
+/* Open and close files for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef XPFILE
+ #define XPFILE "bison.cc"
+#endif
+#ifndef XPFILE1
+ #define XPFILE1 "bison.hairy"
+#endif
+#ifndef XHFILE
+ #define XHFILE "bison.h"
+#endif
+
+#include <stdio.h>
+#include "system.h"
+#include "files.h"
+#include "new.h"
+#include "gram.h"
+
+FILE *finput = NULL;
+FILE *foutput = NULL;
+FILE *fdefines = NULL;
+FILE *ftable = NULL;
+FILE *fattrs = NULL;
+FILE *fguard = NULL;
+FILE *faction = NULL;
+FILE *fparser = NULL;
+FILE *fbisoncomp = NULL; /* outputs YY_USE_CLASS defs (i.e bison or bison++ output*/
+
+extern bool bison_compability;
+/* File name specified with -o for the output file, or 0 if no -o. */
+char *spec_outfile;
+
+char *infile;
+char *outfile;
+char *defsfile;
+char *tabfile;
+char *attrsfile;
+char *guardfile;
+char *tmpattrsfile;
+char *tmptabfile;
+char *tmpdefsfile;
+char *tmpbisoncompfile;
+/* AC added */
+char *hskelfile=NULL;
+char *cparserfile=NULL;
+FILE *fhskel=NULL;
+char *parser_name="parse";
+int parser_defined=0;
+int line_fparser=1;
+int line_fhskel=1;
+char *parser_fname="bison.cc";
+char *hskel_fname="bison.h";
+char *header_name=NULL;
+/* AC added end*/
+
+
+
+extern char *mktemp(); /* So the compiler won't complain */
+extern char *getenv();
+extern void perror();
+FILE *tryopen(char*,char*); /* This might be a good idea */
+extern void done(int);
+
+extern char *program_name;
+extern int verboseflag;
+extern int definesflag;
+int fixed_outfiles = 0;
+
+static char *c_suffixes[]=
+ {".tab.c",".tab.cc",".tab.cpp",".tab.cxx",".tab.C",
+ ".c",".cc",".cpp",".cxx",".C",".CPP",".CXX",".CC",(char *)0};
+
+
+
+char*
+stringappend(char* string1, int end1, char* string2)
+{
+ register char *ostring;
+ register char *cp, *cp1;
+ register int i;
+
+ cp = string2; i = 0;
+ while (*cp++) i++;
+
+ ostring = NEW2(i+end1+1, char);
+
+ cp = ostring;
+ cp1 = string1;
+ for (i = 0; i < end1; i++)
+ *cp++ = *cp1++;
+
+ cp1 = string2;
+ while (*cp++ = *cp1++) ;
+
+ return ostring;
+}
+
+
+/* JF this has been hacked to death. Nowaday it sets up the file names for
+ the output files, and opens the tmp files and the parser */
+// Cleaned up under bison_compability run.x
+void
+openfiles()
+{
+ char *name_base;
+ register char *cp;
+ char *filename;
+ int base_length;
+ int short_base_length;
+
+ char *tmp_base = "/tmp/b.";
+ int tmp_len;
+
+ tmp_len = strlen (tmp_base);
+
+ if (spec_outfile)
+ {
+ /* -o was specified. The precise -o name will be used for ftable.
+ For other output files, remove the ".c" or ".tab.c" suffix. */
+ name_base = spec_outfile;
+ base_length = strlen (name_base);
+ /* SHORT_BASE_LENGTH includes neither ".tab" nor ".c". */
+ char **suffix;
+ for(suffix=c_suffixes;*suffix;suffix++)
+ /* try to detect .c .cpp .tab.c ... options */
+ {
+ if(strlen(name_base)>strlen(*suffix)
+ && strcmp(name_base+base_length-strlen(*suffix),*suffix)==0)
+ {
+ base_length -= strlen(*suffix);
+ break;
+ }
+ };
+ short_base_length=base_length;
+ }
+ else if (spec_file_prefix)
+ {
+ /* -b was specified. Construct names from it. */
+ /* SHORT_BASE_LENGTH includes neither ".tab" nor ".c". */
+ short_base_length = strlen (spec_file_prefix);
+ /* Count room for `.tab'. */
+ base_length = short_base_length + 4;
+ name_base = (char *) xmalloc (base_length + 1);
+ /* Append `.tab'. */
+ strcpy (name_base, spec_file_prefix);
+ strcat (name_base, ".tab");
+ }
+ else
+ {
+ /* -o was not specified; compute output file name from input
+ or use y.tab.c, etc., if -y was specified. */
+
+ if(fixed_outfiles)
+ {
+ name_base = (char*) malloc(sizeof(char)*10);
+ strcpy(name_base,"y.y");
+ }
+ else
+ name_base = infile;
+
+ /* BASE_LENGTH gets length of NAME_BASE, sans ".y" suffix if any. */
+
+ base_length = strlen (name_base);
+ if (!strcmp (name_base + base_length - 2, ".y"))
+ base_length -= 2;
+ short_base_length = base_length;
+
+ name_base = stringappend(name_base, short_base_length, ".tab");
+ base_length = short_base_length + 4;
+ }
+
+ finput = tryopen(infile, "r");
+
+ filename=cparserfile;
+ if(filename==NULL)
+ filename = getenv("BISON_SIMPLE");
+ {
+ if(filename)
+ {
+ parser_fname=(char *)xmalloc(strlen(filename)+1);
+ strcpy(parser_fname,filename);
+ }
+ else
+ {
+ parser_fname=(char *)xmalloc(strlen(PFILE)+1);
+ strcpy(parser_fname,PFILE);
+ }
+ }
+ fparser = tryopen(parser_fname, "r");
+
+ filename=hskelfile;
+ if(filename==NULL)
+ filename = getenv("BISON_SIMPLE_H");
+ {
+ if(filename)
+ {
+ hskel_fname=(char *)xmalloc(strlen(filename)+1);
+ strcpy(hskel_fname,filename);
+ }
+ else
+ {
+ hskel_fname=(char *)xmalloc(strlen(HFILE)+1);
+ strcpy(hskel_fname,HFILE);
+ }
+ }
+
+ fhskel = tryopen(hskel_fname, "r");
+
+ if (verboseflag)
+ {
+ if (spec_name_prefix)
+ outfile = stringappend(name_base, short_base_length, ".out");
+ else
+ outfile = stringappend(name_base, short_base_length, ".output");
+ foutput = tryopen(outfile, "w");
+ }
+
+ faction = tmpfile();
+ fattrs = tmpfile();
+ ftable = tmpfile();
+ fbisoncomp = tmpfile();
+
+ if (definesflag)
+ { if(header_name)
+ defsfile=header_name;
+ else
+ defsfile = stringappend(name_base, base_length, ".h");
+
+ fdefines = tmpfile();
+
+ }
+
+
+ /* These are opened by `done' or `open_extra_files', if at all */
+ if (spec_outfile)
+ tabfile = spec_outfile;
+ else
+ tabfile = stringappend(name_base, base_length, ".c");
+
+ attrsfile = stringappend(name_base, short_base_length, ".stype.h");
+ guardfile = stringappend(name_base, short_base_length, ".guard.c");
+}
+
+
+
+/* open the output files needed only for the semantic parser.
+This is done when %semantic_parser is seen in the declarations section. */
+
+void
+open_extra_files()
+{
+ FILE *ftmp;
+ int c;
+ char *filename, *cp;
+
+ fclose(fparser);
+ filename=cparserfile;
+ if(filename!=NULL)
+ filename = (char *) getenv ("BISON_HAIRY");
+#ifdef _MSDOS
+ /* File doesn't exist in current directory; try in INIT directory. */
+ cp = getenv("INIT");
+ if (filename == 0 && cp != NULL)
+ {FILE *tst;
+ filename = (char *)xmalloc(strlen(cp) + strlen(PFILE1) + 2);
+ strcpy(filename, PFILE1);
+ if((tst=fopen(filename,"r"))!=NULL)
+ {fclose(tst);}
+ else
+ {
+ strcpy(filename, cp);
+ cp = filename + strlen(filename);
+ *cp++ = '/';
+ strcpy(cp, PFILE1);
+ }
+
+ }
+#endif /* MSDOS */
+ {
+
+ if(filename)
+ {
+ parser_fname=(char *)xmalloc(strlen(filename)+1);
+ strcpy(parser_fname,filename);
+ }
+ else
+ {
+ parser_fname=(char *)xmalloc(strlen(PFILE1)+1);
+ strcpy(parser_fname,PFILE1);
+ }
+ }
+ fparser = tryopen(parser_fname, "r");
+
+
+ /* JF change from inline attrs file to separate one */
+ ftmp = tryopen(attrsfile, "w");
+ rewind(fattrs);
+ while((c=getc(fattrs))!=EOF) /* Thank god for buffering */
+ putc(c,ftmp);
+ fclose(fattrs);
+ fattrs=ftmp;
+
+ fguard = tryopen(guardfile, "w");
+
+}
+
+ /* JF to make file opening easier. This func tries to open file
+ NAME with mode MODE, and prints an error message if it fails. */
+FILE *
+tryopen(char* name, char* mode)
+{
+ FILE *ptr;
+
+ ptr = fopen(name, mode);
+ if (ptr == NULL)
+ {
+ fprintf(stderr, "%s: ", program_name);
+ perror(name);
+ done(2);
+ }
+ return ptr;
+}
+
+void
+done(int k)
+{
+ if (faction)
+ fclose(faction);
+
+ if (fattrs)
+ fclose(fattrs);
+
+ if (fguard)
+ fclose(fguard);
+
+ if (finput)
+ fclose(finput);
+
+ if (fparser)
+ fclose(fparser);
+
+ if (foutput)
+ fclose(foutput);
+
+ /* JF write out the output file */
+ if (k == 0 && ftable)
+ {
+ FILE *ftmp;
+ register int c;
+
+ ftmp=tryopen(tabfile, "w");
+/* avoid reloading the definitions of tab.h */
+ fprintf(ftmp,"#define YY_%s_h_included\n",parser_name);
+ if(bison_compability==false)
+ fprintf(ftmp,"#define YY_USE_CLASS\n");
+ else
+ fprintf(ftmp,"/*#define YY_USE_CLASS \n*/");
+
+ rewind(ftable);
+ while((c=getc(ftable)) != EOF)
+ putc(c,ftmp);
+ fclose(ftmp);
+ fclose(ftable);
+
+
+ if (definesflag)
+ {
+ ftmp = tryopen(defsfile, "w");
+ fprintf(ftmp,"#ifndef YY_%s_h_included\n",parser_name);
+ fprintf(ftmp,"#define YY_%s_h_included\n",parser_name);
+ if(bison_compability==false)
+ fprintf(ftmp,"#define YY_USE_CLASS\n");
+ else
+ fprintf(ftmp,"/*#define YY_USE_CLASS \n*/");
+ fflush(fdefines);
+ rewind(fdefines);
+ while((c=getc(fdefines)) != EOF)
+ putc(c,ftmp);
+ fclose(fdefines);
+ fprintf(ftmp,"#endif\n");
+ fclose(ftmp);
+ }
+ }
+
+}
diff --git a/tools/bison++/files.h b/tools/bison++/files.h
new file mode 100644
index 000000000..522818e4f
--- /dev/null
+++ b/tools/bison++/files.h
@@ -0,0 +1,66 @@
+/* File names and variables for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+
+/* These two should be pathnames for opening the sample parser files.
+ When bison is installed, they should be absolute pathnames.
+ XPFILE1 and XPFILE2 normally come from the Makefile. */
+
+#define PFILE XPFILE /* Simple parser */
+#define PFILE1 XPFILE1 /* Semantic parser */
+
+extern FILE *finput; /* read grammar specifications */
+extern FILE *foutput; /* optionally output messages describing the actions taken */
+extern FILE *fdefines; /* optionally output #define's for token numbers. */
+extern FILE *ftable; /* output the tables and the parser */
+extern FILE *fattrs; /* if semantic parser, output a .h file that defines YYSTYPE */
+ /* and also contains all the %{ ... %} definitions. */
+extern FILE *fguard; /* if semantic parser, output yyguard, containing all the guard code */
+extern FILE *faction; /* output all the action code; precise form depends on which parser */
+extern FILE *fparser; /* read the parser to copy into ftable */
+extern FILE *fbisoncomp; /* outputs YY_USE_CLASS defs (i.e bison or bison++ output*/
+
+/* File name specified with -o for the output file, or 0 if no -o. */
+extern char *spec_outfile;
+
+extern char *spec_name_prefix; /* for -a, from getargs.c */
+
+/* File name pfx specified with -b, or 0 if no -b. */
+extern char *spec_file_prefix;
+
+extern char *infile;
+extern char *outfile;
+extern char *defsfile;
+extern char *tabfile;
+extern char *attrsfile;
+extern char *guardfile;
+extern char *actfile;
+
+/* AC addings */
+#define HFILE XHFILE /* header Skeleton */
+extern char *hskelfile; /* -H option : parser file name */
+extern char *cparserfile; /* -S option header skeleton filename */
+extern FILE *fhskel;
+extern char *parser_name;
+extern int parser_defined;
+extern int yylsp_needed;
+char *quoted_filename(char*); /* quote filename, especially on DOS */
+
+/* AC added end*/
diff --git a/tools/bison++/getargs.cc b/tools/bison++/getargs.cc
new file mode 100644
index 000000000..ea72c28b0
--- /dev/null
+++ b/tools/bison++/getargs.cc
@@ -0,0 +1,161 @@
+/* Parse command line arguments for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include "getopt.h"
+#include "system.h"
+#include "files.h"
+
+int verboseflag;
+int definesflag;
+int debugflag;
+int nolinesflag;
+char *spec_name_prefix; /* for -p. */
+char *spec_file_prefix; /* for -b. */
+extern int fixed_outfiles;/* for -y */
+extern char *header_name;
+extern char *program_name;
+extern char *version_string;
+
+extern void fatal();
+
+struct option longopts[] =
+{
+ {"debug", 0, &debugflag, 1},
+ {"defines", 0, &definesflag, 1},
+ {"file-prefix", 1, 0, 'b'},
+ {"fixed-output-files", 0, &fixed_outfiles, 1},
+ {"name-prefix", 1, 0, 'a'},
+ {"no-lines", 0, &nolinesflag, 1},
+ {"output-file", 1, 0, 'o'},
+ {"output", 1, 0, 'o'},
+ {"verbose", 0, &verboseflag, 1},
+ {"version", 0, 0, 'V'},
+ {"yacc", 0, &fixed_outfiles, 1},
+ {"skeleton", 1, 0, 'S'},
+ {"headerskeleton", 1, 0, 'H'},
+ {"header-file", 1, 0, 'h'},
+ {"help", 0, 0, 'u'},
+ {"usage", 0, 0, 'u'},
+ {0, 0, 0, 0}
+};
+
+
+void usage (FILE* stream)
+{
+ fprintf (stderr, "\
+Usage: %s [-dltvyVu] [-b file-prefix] [-p name-prefix]\n\
+ [-o outfile] [-h headerfile]\n\
+ [-S skeleton] [-H header-skeleton]\n\
+ [--debug] [--defines] [--fixed-output-files] [--no-lines]\n\
+ [--verbose] [--version] [--yacc] [--usage] [--help]\n\
+ [--file-prefix=prefix] [--name-prefix=prefix]\n\
+ [--skeleton=skeletonfile] [--headerskeleton=headerskeletonfile]\n\
+ [--output=outfile] [--header-name=header] grammar-file\n",
+ program_name);
+}
+
+
+
+void
+getargs(int argc, char** argv)
+{
+ register int c;
+
+ verboseflag = 0;
+ definesflag = 0;
+ debugflag = 0;
+ fixed_outfiles = 0;
+
+ while ((c = getopt_long (argc, argv, "yvdltVuo:b:p:S:H:h:", longopts, (int *)0))
+ != EOF)
+ {
+ switch (c)
+ {
+ case 0:
+ /* Certain long options cause getopt_long to return 0. */
+ break;
+
+ case 'y':
+ fixed_outfiles = 1;
+ break;
+
+ case 'V':
+ printf("%s", version_string);
+ exit(0);
+
+ case 'v':
+ verboseflag = 1;
+ break;
+
+ case 'd':
+ definesflag = 1;
+ break;
+
+ case 'l':
+ nolinesflag = 1;
+ break;
+
+ case 't':
+ debugflag = 1;
+ break;
+
+ case 'o':
+ spec_outfile = optarg;
+ break;
+
+ case 'b':
+ spec_file_prefix = optarg;
+ break;
+
+ case 'p':
+ spec_name_prefix = optarg;
+ break;
+ case 'S':
+ cparserfile = optarg;
+ break;
+ case 'H':
+ hskelfile = optarg;
+ break;
+
+ case 'h':
+ header_name = optarg;
+ break;
+
+ case 'u':
+ usage(stdout);
+ exit (0);
+
+ default:
+ usage(stderr);
+ exit (1);
+ }
+ }
+
+ if (optind == argc)
+ {
+ fprintf(stderr, "%s: no grammar file given\n", program_name);
+ exit(1);
+ }
+ if (optind < argc - 1)
+ fprintf(stderr, "%s: warning: extra arguments ignored\n", program_name);
+
+ infile = argv[optind];
+}
diff --git a/tools/bison++/getopt.cc b/tools/bison++/getopt.cc
new file mode 100644
index 000000000..509e70a03
--- /dev/null
+++ b/tools/bison++/getopt.cc
@@ -0,0 +1,744 @@
+/* Getopt for GNU.
+ NOTE: getopt is now part of the C library, so if you don't know what
+ "Keep this file name-space clean" means, talk to roland@gnu.ai.mit.edu
+ before changing it!
+
+ Copyright (C) 1987, 88, 89, 90, 91, 92, 1993
+ Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifdef HAVE_CONFIG_H
+/* We use <config.h> instead of "config.h" so that a compilation
+ using -I. -I$srcdir will use ./config.h rather than $srcdir/config.h
+ (which it would do because getopt.c was found in $srcdir). */
+#include <config.h>
+#endif
+
+#ifndef __STDC__
+/* This is a separate conditional since some stdc systems
+ reject `defined (const)'. */
+#ifndef const
+#define const
+#endif
+#endif
+
+/* This tells Alpha OSF/1 not to define a getopt prototype in <stdio.h>. */
+#ifndef _NO_PROTO
+#define _NO_PROTO
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+/* Comment out all this code if we are using the GNU C Library, and are not
+ actually compiling the library itself. This code is part of the GNU C
+ Library, but also included in many other GNU distributions. Compiling
+ and linking in this code is a waste when using the GNU C library
+ (especially if it is a shared library). Rather than having every GNU
+ program understand `configure --with-gnu-libc' and omit the object files,
+ it is simpler to just do this in the source for each such file. */
+
+#if defined (_LIBC) || !defined (__GNU_LIBRARY__)
+
+
+/* This needs to come after some library #include
+ to get __GNU_LIBRARY__ defined. */
+#ifdef __GNU_LIBRARY__
+/* Don't include stdlib.h for non-GNU C libraries because some of them
+ contain conflicting prototypes for getopt. */
+#include <stdlib.h>
+#endif /* GNU C library. */
+
+/* If GETOPT_COMPAT is defined, `+' as well as `--' can introduce a
+ long-named option. Because this is not POSIX.2 compliant, it is
+ being phased out. */
+/* #define GETOPT_COMPAT */
+
+/* This version of `getopt' appears to the caller like standard Unix `getopt'
+ but it behaves differently for the user, since it allows the user
+ to intersperse the options with the other arguments.
+
+ As `getopt' works, it permutes the elements of ARGV so that,
+ when it is done, all the options precede everything else. Thus
+ all application programs are extended to handle flexible argument order.
+
+ Setting the environment variable POSIXLY_CORRECT disables permutation.
+ Then the behavior is completely standard.
+
+ GNU application programs can use a third alternative mode in which
+ they can distinguish the relative order of options and other arguments. */
+
+#include "getopt.h"
+
+/* For communication from `getopt' to the caller.
+ When `getopt' finds an option that takes an argument,
+ the argument value is returned here.
+ Also, when `ordering' is RETURN_IN_ORDER,
+ each non-option ARGV-element is returned here. */
+
+char *optarg = 0;
+
+/* Index in ARGV of the next element to be scanned.
+ This is used for communication to and from the caller
+ and for communication between successive calls to `getopt'.
+
+ On entry to `getopt', zero means this is the first call; initialize.
+
+ When `getopt' returns EOF, this is the index of the first of the
+ non-option elements that the caller should itself scan.
+
+ Otherwise, `optind' communicates from one call to the next
+ how much of ARGV has been scanned so far. */
+
+/* XXX 1003.2 says this must be 1 before any call. */
+int optind = 0;
+
+/* The next char to be scanned in the option-element
+ in which the last option character we returned was found.
+ This allows us to pick up the scan where we left off.
+
+ If this is zero, or a null string, it means resume the scan
+ by advancing to the next ARGV-element. */
+
+static char *nextchar;
+
+/* Callers store zero here to inhibit the error message
+ for unrecognized options. */
+
+int opterr = 1;
+
+/* Set to an option character which was unrecognized.
+ This must be initialized on some systems to avoid linking in the
+ system's own getopt implementation. */
+
+int optopt = '?';
+
+/* Describe how to deal with options that follow non-option ARGV-elements.
+
+ If the caller did not specify anything,
+ the default is REQUIRE_ORDER if the environment variable
+ POSIXLY_CORRECT is defined, PERMUTE otherwise.
+
+ REQUIRE_ORDER means don't recognize them as options;
+ stop option processing when the first non-option is seen.
+ This is what Unix does.
+ This mode of operation is selected by either setting the environment
+ variable POSIXLY_CORRECT, or using `+' as the first character
+ of the list of option characters.
+
+ PERMUTE is the default. We permute the contents of ARGV as we scan,
+ so that eventually all the non-options are at the end. This allows options
+ to be given in any order, even with programs that were not written to
+ expect this.
+
+ RETURN_IN_ORDER is an option available to programs that were written
+ to expect options and other ARGV-elements in any order and that care about
+ the ordering of the two. We describe each non-option ARGV-element
+ as if it were the argument of an option with character code 1.
+ Using `-' as the first character of the list of option characters
+ selects this mode of operation.
+
+ The special argument `--' forces an end of option-scanning regardless
+ of the value of `ordering'. In the case of RETURN_IN_ORDER, only
+ `--' can cause `getopt' to return EOF with `optind' != ARGC. */
+
+static enum
+{
+ REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER
+} ordering;
+
+#ifdef __GNU_LIBRARY__
+/* We want to avoid inclusion of string.h with non-GNU libraries
+ because there are many ways it can cause trouble.
+ On some systems, it contains special magic macros that don't work
+ in GCC. */
+#include <string.h>
+#define my_index strchr
+#else
+
+/* Avoid depending on library functions or files
+ whose names are inconsistent. */
+
+char *getenv ();
+
+static char *
+my_index (const char *str, int chr)
+{
+ while (*str)
+ {
+ if (*str == chr)
+ return (char *) str;
+ str++;
+ }
+ return 0;
+}
+
+/* If using GCC, we can safely declare strlen this way.
+ If not using GCC, it is ok not to declare it.
+ (Supposedly there are some machines where it might get a warning,
+ but changing this conditional to __STDC__ is too risky.) */
+#ifdef __GNUC__
+#ifdef IN_GCC
+#include "gstddef.h"
+#else
+#include <stddef.h>
+#endif
+extern size_t strlen (const char *);
+#endif
+
+#endif /* GNU C library. */
+
+/* Handle permutation of arguments. */
+
+/* Describe the part of ARGV that contains non-options that have
+ been skipped. `first_nonopt' is the index in ARGV of the first of them;
+ `last_nonopt' is the index after the last of them. */
+
+static int first_nonopt;
+static int last_nonopt;
+
+/* Exchange two adjacent subsequences of ARGV.
+ One subsequence is elements [first_nonopt,last_nonopt)
+ which contains all the non-options that have been skipped so far.
+ The other is elements [last_nonopt,optind), which contains all
+ the options processed since those non-options were skipped.
+
+ `first_nonopt' and `last_nonopt' are relocated so that they describe
+ the new indices of the non-options in ARGV after they are moved. */
+
+static void
+exchange (char **argv)
+{
+ int bottom = first_nonopt;
+ int middle = last_nonopt;
+ int top = optind;
+ char *tem;
+
+ /* Exchange the shorter segment with the far end of the longer segment.
+ That puts the shorter segment into the right place.
+ It leaves the longer segment in the right place overall,
+ but it consists of two parts that need to be swapped next. */
+
+ while (top > middle && middle > bottom)
+ {
+ if (top - middle > middle - bottom)
+ {
+ /* Bottom segment is the short one. */
+ int len = middle - bottom;
+ register int i;
+
+ /* Swap it with the top part of the top segment. */
+ for (i = 0; i < len; i++)
+ {
+ tem = argv[bottom + i];
+ argv[bottom + i] = argv[top - (middle - bottom) + i];
+ argv[top - (middle - bottom) + i] = tem;
+ }
+ /* Exclude the moved bottom segment from further swapping. */
+ top -= len;
+ }
+ else
+ {
+ /* Top segment is the short one. */
+ int len = top - middle;
+ register int i;
+
+ /* Swap it with the bottom part of the bottom segment. */
+ for (i = 0; i < len; i++)
+ {
+ tem = argv[bottom + i];
+ argv[bottom + i] = argv[middle + i];
+ argv[middle + i] = tem;
+ }
+ /* Exclude the moved top segment from further swapping. */
+ bottom += len;
+ }
+ }
+
+ /* Update records for the slots the non-options now occupy. */
+
+ first_nonopt += (optind - last_nonopt);
+ last_nonopt = optind;
+}
+
+/* Scan elements of ARGV (whose length is ARGC) for option characters
+ given in OPTSTRING.
+
+ If an element of ARGV starts with '-', and is not exactly "-" or "--",
+ then it is an option element. The characters of this element
+ (aside from the initial '-') are option characters. If `getopt'
+ is called repeatedly, it returns successively each of the option characters
+ from each of the option elements.
+
+ If `getopt' finds another option character, it returns that character,
+ updating `optind' and `nextchar' so that the next call to `getopt' can
+ resume the scan with the following option character or ARGV-element.
+
+ If there are no more option characters, `getopt' returns `EOF'.
+ Then `optind' is the index in ARGV of the first ARGV-element
+ that is not an option. (The ARGV-elements have been permuted
+ so that those that are not options now come last.)
+
+ OPTSTRING is a string containing the legitimate option characters.
+ If an option character is seen that is not listed in OPTSTRING,
+ return '?' after printing an error message. If you set `opterr' to
+ zero, the error message is suppressed but we still return '?'.
+
+ If a char in OPTSTRING is followed by a colon, that means it wants an arg,
+ so the following text in the same ARGV-element, or the text of the following
+ ARGV-element, is returned in `optarg'. Two colons mean an option that
+ wants an optional arg; if there is text in the current ARGV-element,
+ it is returned in `optarg', otherwise `optarg' is set to zero.
+
+ If OPTSTRING starts with `-' or `+', it requests different methods of
+ handling the non-option ARGV-elements.
+ See the comments about RETURN_IN_ORDER and REQUIRE_ORDER, above.
+
+ Long-named options begin with `--' instead of `-'.
+ Their names may be abbreviated as long as the abbreviation is unique
+ or is an exact match for some defined option. If they have an
+ argument, it follows the option name in the same ARGV-element, separated
+ from the option name by a `=', or else the in next ARGV-element.
+ When `getopt' finds a long-named option, it returns 0 if that option's
+ `flag' field is nonzero, the value of the option's `val' field
+ if the `flag' field is zero.
+
+ The elements of ARGV aren't really const, because we permute them.
+ But we pretend they're const in the prototype to be compatible
+ with other systems.
+
+ LONGOPTS is a vector of `struct option' terminated by an
+ element containing a name which is zero.
+
+ LONGIND returns the index in LONGOPT of the long-named option found.
+ It is only valid when a long-named option has been found by the most
+ recent call.
+
+ If LONG_ONLY is nonzero, '-' as well as '--' can introduce
+ long-named options. */
+
+int
+_getopt_internal (int argc, char *const *argv, const char *optstring,
+ const struct option *longopts, int *longind, int long_only)
+{
+ int option_index;
+
+ optarg = 0;
+
+ /* Initialize the internal data when the first call is made.
+ Start processing options with ARGV-element 1 (since ARGV-element 0
+ is the program name); the sequence of previously skipped
+ non-option ARGV-elements is empty. */
+
+ if (optind == 0)
+ {
+ first_nonopt = last_nonopt = optind = 1;
+
+ nextchar = NULL;
+
+ /* Determine how to handle the ordering of options and nonoptions. */
+
+ if (optstring[0] == '-')
+ {
+ ordering = RETURN_IN_ORDER;
+ ++optstring;
+ }
+ else if (optstring[0] == '+')
+ {
+ ordering = REQUIRE_ORDER;
+ ++optstring;
+ }
+ else if (getenv ("POSIXLY_CORRECT") != NULL)
+ ordering = REQUIRE_ORDER;
+ else
+ ordering = PERMUTE;
+ }
+
+ if (nextchar == NULL || *nextchar == '\0')
+ {
+ if (ordering == PERMUTE)
+ {
+ /* If we have just processed some options following some non-options,
+ exchange them so that the options come first. */
+
+ if (first_nonopt != last_nonopt && last_nonopt != optind)
+ exchange ((char **) argv);
+ else if (last_nonopt != optind)
+ first_nonopt = optind;
+
+ /* Now skip any additional non-options
+ and extend the range of non-options previously skipped. */
+
+ while (optind < argc
+ && (argv[optind][0] != '-' || argv[optind][1] == '\0')
+#ifdef GETOPT_COMPAT
+ && (longopts == NULL
+ || argv[optind][0] != '+' || argv[optind][1] == '\0')
+#endif /* GETOPT_COMPAT */
+ )
+ optind++;
+ last_nonopt = optind;
+ }
+
+ /* Special ARGV-element `--' means premature end of options.
+ Skip it like a null option,
+ then exchange with previous non-options as if it were an option,
+ then skip everything else like a non-option. */
+
+ if (optind != argc && !strcmp (argv[optind], "--"))
+ {
+ optind++;
+
+ if (first_nonopt != last_nonopt && last_nonopt != optind)
+ exchange ((char **) argv);
+ else if (first_nonopt == last_nonopt)
+ first_nonopt = optind;
+ last_nonopt = argc;
+
+ optind = argc;
+ }
+
+ /* If we have done all the ARGV-elements, stop the scan
+ and back over any non-options that we skipped and permuted. */
+
+ if (optind == argc)
+ {
+ /* Set the next-arg-index to point at the non-options
+ that we previously skipped, so the caller will digest them. */
+ if (first_nonopt != last_nonopt)
+ optind = first_nonopt;
+ return EOF;
+ }
+
+ /* If we have come to a non-option and did not permute it,
+ either stop the scan or describe it to the caller and pass it by. */
+
+ if ((argv[optind][0] != '-' || argv[optind][1] == '\0')
+#ifdef GETOPT_COMPAT
+ && (longopts == NULL
+ || argv[optind][0] != '+' || argv[optind][1] == '\0')
+#endif /* GETOPT_COMPAT */
+ )
+ {
+ if (ordering == REQUIRE_ORDER)
+ return EOF;
+ optarg = argv[optind++];
+ return 1;
+ }
+
+ /* We have found another option-ARGV-element.
+ Start decoding its characters. */
+
+ nextchar = (argv[optind] + 1
+ + (longopts != NULL && argv[optind][1] == '-'));
+ }
+
+ if (longopts != NULL
+ && ((argv[optind][0] == '-'
+ && (argv[optind][1] == '-' || long_only))
+#ifdef GETOPT_COMPAT
+ || argv[optind][0] == '+'
+#endif /* GETOPT_COMPAT */
+ ))
+ {
+ const struct option *p;
+ char *s = nextchar;
+ int exact = 0;
+ int ambig = 0;
+ const struct option *pfound = NULL;
+ int indfound;
+
+ while (*s && *s != '=')
+ s++;
+
+ /* Test all options for either exact match or abbreviated matches. */
+ for (p = longopts, option_index = 0; p->name;
+ p++, option_index++)
+ if (!strncmp (p->name, nextchar, s - nextchar))
+ {
+ if (s - nextchar == strlen (p->name))
+ {
+ /* Exact match found. */
+ pfound = p;
+ indfound = option_index;
+ exact = 1;
+ break;
+ }
+ else if (pfound == NULL)
+ {
+ /* First nonexact match found. */
+ pfound = p;
+ indfound = option_index;
+ }
+ else
+ /* Second nonexact match found. */
+ ambig = 1;
+ }
+
+ if (ambig && !exact)
+ {
+ if (opterr)
+ fprintf (stderr, "%s: option `%s' is ambiguous\n",
+ argv[0], argv[optind]);
+ nextchar += strlen (nextchar);
+ optind++;
+ return '?';
+ }
+
+ if (pfound != NULL)
+ {
+ option_index = indfound;
+ optind++;
+ if (*s)
+ {
+ /* Don't test has_arg with >, because some C compilers don't
+ allow it to be used on enums. */
+ if (pfound->has_arg)
+ optarg = s + 1;
+ else
+ {
+ if (opterr)
+ {
+ if (argv[optind - 1][1] == '-')
+ /* --option */
+ fprintf (stderr,
+ "%s: option `--%s' doesn't allow an argument\n",
+ argv[0], pfound->name);
+ else
+ /* +option or -option */
+ fprintf (stderr,
+ "%s: option `%c%s' doesn't allow an argument\n",
+ argv[0], argv[optind - 1][0], pfound->name);
+ }
+ nextchar += strlen (nextchar);
+ return '?';
+ }
+ }
+ else if (pfound->has_arg == 1)
+ {
+ if (optind < argc)
+ optarg = argv[optind++];
+ else
+ {
+ if (opterr)
+ fprintf (stderr, "%s: option `%s' requires an argument\n",
+ argv[0], argv[optind - 1]);
+ nextchar += strlen (nextchar);
+ return optstring[0] == ':' ? ':' : '?';
+ }
+ }
+ nextchar += strlen (nextchar);
+ if (longind != NULL)
+ *longind = option_index;
+ if (pfound->flag)
+ {
+ *(pfound->flag) = pfound->val;
+ return 0;
+ }
+ return pfound->val;
+ }
+ /* Can't find it as a long option. If this is not getopt_long_only,
+ or the option starts with '--' or is not a valid short
+ option, then it's an error.
+ Otherwise interpret it as a short option. */
+ if (!long_only || argv[optind][1] == '-'
+#ifdef GETOPT_COMPAT
+ || argv[optind][0] == '+'
+#endif /* GETOPT_COMPAT */
+ || my_index (optstring, *nextchar) == NULL)
+ {
+ if (opterr)
+ {
+ if (argv[optind][1] == '-')
+ /* --option */
+ fprintf (stderr, "%s: unrecognized option `--%s'\n",
+ argv[0], nextchar);
+ else
+ /* +option or -option */
+ fprintf (stderr, "%s: unrecognized option `%c%s'\n",
+ argv[0], argv[optind][0], nextchar);
+ }
+ nextchar = (char *) "";
+ optind++;
+ return '?';
+ }
+ }
+
+ /* Look at and handle the next option-character. */
+
+ {
+ char c = *nextchar++;
+ char *temp = my_index (optstring, c);
+
+ /* Increment `optind' when we start to process its last character. */
+ if (*nextchar == '\0')
+ ++optind;
+
+ if (temp == NULL || c == ':')
+ {
+ if (opterr)
+ {
+#if 0
+ if (c < 040 || c >= 0177)
+ fprintf (stderr, "%s: unrecognized option, character code 0%o\n",
+ argv[0], c);
+ else
+ fprintf (stderr, "%s: unrecognized option `-%c'\n", argv[0], c);
+#else
+ /* 1003.2 specifies the format of this message. */
+ fprintf (stderr, "%s: illegal option -- %c\n", argv[0], c);
+#endif
+ }
+ optopt = c;
+ return '?';
+ }
+ if (temp[1] == ':')
+ {
+ if (temp[2] == ':')
+ {
+ /* This is an option that accepts an argument optionally. */
+ if (*nextchar != '\0')
+ {
+ optarg = nextchar;
+ optind++;
+ }
+ else
+ optarg = 0;
+ nextchar = NULL;
+ }
+ else
+ {
+ /* This is an option that requires an argument. */
+ if (*nextchar != '\0')
+ {
+ optarg = nextchar;
+ /* If we end this ARGV-element by taking the rest as an arg,
+ we must advance to the next element now. */
+ optind++;
+ }
+ else if (optind == argc)
+ {
+ if (opterr)
+ {
+#if 0
+ fprintf (stderr, "%s: option `-%c' requires an argument\n",
+ argv[0], c);
+#else
+ /* 1003.2 specifies the format of this message. */
+ fprintf (stderr, "%s: option requires an argument -- %c\n",
+ argv[0], c);
+#endif
+ }
+ optopt = c;
+ if (optstring[0] == ':')
+ c = ':';
+ else
+ c = '?';
+ }
+ else
+ /* We already incremented `optind' once;
+ increment it again when taking next ARGV-elt as argument. */
+ optarg = argv[optind++];
+ nextchar = NULL;
+ }
+ }
+ return c;
+ }
+}
+
+int
+getopt (int argc, char *const *argv, const char *optstring)
+{
+ return _getopt_internal (argc, argv, optstring,
+ (const struct option *) 0,
+ (int *) 0,
+ 0);
+}
+
+#endif /* _LIBC or not __GNU_LIBRARY__. */
+
+#ifdef TEST
+
+/* Compile with -DTEST to make an executable for use in testing
+ the above definition of `getopt'. */
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ int c;
+ int digit_optind = 0;
+
+ while (1)
+ {
+ int this_option_optind = optind ? optind : 1;
+
+ c = getopt (argc, argv, "abc:d:0123456789");
+ if (c == EOF)
+ break;
+
+ switch (c)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (digit_optind != 0 && digit_optind != this_option_optind)
+ printf ("digits occur in two different argv-elements.\n");
+ digit_optind = this_option_optind;
+ printf ("option %c\n", c);
+ break;
+
+ case 'a':
+ printf ("option a\n");
+ break;
+
+ case 'b':
+ printf ("option b\n");
+ break;
+
+ case 'c':
+ printf ("option c with value `%s'\n", optarg);
+ break;
+
+ case '?':
+ break;
+
+ default:
+ printf ("?? getopt returned character code 0%o ??\n", c);
+ }
+ }
+
+ if (optind < argc)
+ {
+ printf ("non-option ARGV-elements: ");
+ while (optind < argc)
+ printf ("%s ", argv[optind++]);
+ printf ("\n");
+ }
+
+ exit (0);
+}
+
+#endif /* TEST */
diff --git a/tools/bison++/getopt.h b/tools/bison++/getopt.h
new file mode 100644
index 000000000..695f89061
--- /dev/null
+++ b/tools/bison++/getopt.h
@@ -0,0 +1,128 @@
+/* Declarations for getopt.
+ Copyright (C) 1989, 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef _GETOPT_H
+#define _GETOPT_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* For communication from `getopt' to the caller.
+ When `getopt' finds an option that takes an argument,
+ the argument value is returned here.
+ Also, when `ordering' is RETURN_IN_ORDER,
+ each non-option ARGV-element is returned here. */
+
+extern char *optarg;
+
+/* Index in ARGV of the next element to be scanned.
+ This is used for communication to and from the caller
+ and for communication between successive calls to `getopt'.
+
+ On entry to `getopt', zero means this is the first call; initialize.
+
+ When `getopt' returns EOF, this is the index of the first of the
+ non-option elements that the caller should itself scan.
+
+ Otherwise, `optind' communicates from one call to the next
+ how much of ARGV has been scanned so far. */
+
+extern int optind;
+
+/* Callers store zero here to inhibit the error message `getopt' prints
+ for unrecognized options. */
+
+extern int opterr;
+
+ /* Set to an option character which was unrecognized. */
+
+ extern int optopt;
+
+/* Describe the long-named options requested by the application.
+ The LONG_OPTIONS argument to getopt_long or getopt_long_only is a vector
+ of `struct option' terminated by an element containing a name which is
+ zero.
+
+ The field `has_arg' is:
+ no_argument (or 0) if the option does not take an argument,
+ required_argument (or 1) if the option requires an argument,
+ optional_argument (or 2) if the option takes an optional argument.
+
+ If the field `flag' is not NULL, it points to a variable that is set
+ to the value given in the field `val' when the option is found, but
+ left unchanged if the option is not found.
+
+ To have a long-named option do something other than set an `int' to
+ a compiled-in constant, such as set a value from `optarg', set the
+ option's `flag' field to zero and its `val' field to a nonzero
+ value (the equivalent single-letter option character, if there is
+ one). For long options that have a zero `flag' field, `getopt'
+ returns the contents of the `val' field. */
+
+struct option
+{
+#if __STDC__
+ const char *name;
+#else
+ char *name;
+#endif
+ /* has_arg can't be an enum because some compilers complain about
+ type mismatches in all the code that assumes it is an int. */
+ int has_arg;
+ int *flag;
+ int val;
+};
+
+/* Names for the values of the `has_arg' field of `struct option'. */
+
+#define no_argument 0
+#define required_argument 1
+#define optional_argument 2
+
+#if __STDC__
+#if defined(__GNU_LIBRARY__)
+/* Many other libraries have conflicting prototypes for getopt, with
+ differences in the consts, in stdlib.h. To avoid compilation
+ errors, only prototype getopt for the GNU C library. */
+extern int getopt (int argc, char *const *argv, const char *shortopts);
+#else /* not __GNU_LIBRARY__ */
+extern int getopt ();
+#endif /* not __GNU_LIBRARY__ */
+extern int getopt_long (int argc, char *const *argv, const char *shortopts,
+ const struct option *longopts, int *longind);
+extern int getopt_long_only (int argc, char *const *argv,
+ const char *shortopts,
+ const struct option *longopts, int *longind);
+
+/* Internal only. Users should not call this directly. */
+extern int _getopt_internal (int argc, char *const *argv,
+ const char *shortopts,
+ const struct option *longopts, int *longind,
+ int long_only);
+#else /* not __STDC__ */
+extern int getopt ();
+extern int getopt_long ();
+extern int getopt_long_only ();
+
+extern int _getopt_internal ();
+#endif /* not __STDC__ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _GETOPT_H */
diff --git a/tools/bison++/getopt1.cc b/tools/bison++/getopt1.cc
new file mode 100644
index 000000000..12d0f4836
--- /dev/null
+++ b/tools/bison++/getopt1.cc
@@ -0,0 +1,175 @@
+/* getopt_long and getopt_long_only entry points for GNU getopt.
+ Copyright (C) 1987, 88, 89, 90, 91, 92, 1993
+ Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifdef HAVE_CONFIG_H
+/* We use <config.h> instead of "config.h" so that a compilation
+ using -I. -I$srcdir will use ./config.h rather than $srcdir/config.h
+ (which it would do because getopt1.c was found in $srcdir). */
+#include <config.h>
+#endif
+
+#include "getopt.h"
+
+#ifndef __STDC__
+/* This is a separate conditional since some stdc systems
+ reject `defined (const)'. */
+#ifndef const
+#define const
+#endif
+#endif
+
+#include <stdio.h>
+
+/* Comment out all this code if we are using the GNU C Library, and are not
+ actually compiling the library itself. This code is part of the GNU C
+ Library, but also included in many other GNU distributions. Compiling
+ and linking in this code is a waste when using the GNU C library
+ (especially if it is a shared library). Rather than having every GNU
+ program understand `configure --with-gnu-libc' and omit the object files,
+ it is simpler to just do this in the source for each such file. */
+
+#if defined (_LIBC) || !defined (__GNU_LIBRARY__)
+
+
+/* This needs to come after some library #include
+ to get __GNU_LIBRARY__ defined. */
+#ifdef __GNU_LIBRARY__
+#include <stdlib.h>
+#else
+char *getenv ();
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+int
+getopt_long (int argc, char *const *argv, const char *options,
+ const struct option *long_options, int *opt_index)
+{
+ return _getopt_internal (argc, argv, options, long_options, opt_index, 0);
+}
+
+/* Like getopt_long, but '-' as well as '--' can indicate a long option.
+ If an option that starts with '-' (not '--') doesn't match a long option,
+ but does match a short option, it is parsed as a short option
+ instead. */
+
+int
+getopt_long_only (int argc, char *const *argv, const char *options,
+ const struct option *long_options, int *opt_index)
+{
+ return _getopt_internal (argc, argv, options, long_options, opt_index, 1);
+}
+
+
+#endif /* _LIBC or not __GNU_LIBRARY__. */
+
+#ifdef TEST
+
+#include <stdio.h>
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ int c;
+ int digit_optind = 0;
+
+ while (1)
+ {
+ int this_option_optind = optind ? optind : 1;
+ int option_index = 0;
+ static struct option long_options[] =
+ {
+ {"add", 1, 0, 0},
+ {"append", 0, 0, 0},
+ {"delete", 1, 0, 0},
+ {"verbose", 0, 0, 0},
+ {"create", 0, 0, 0},
+ {"file", 1, 0, 0},
+ {0, 0, 0, 0}
+ };
+
+ c = getopt_long (argc, argv, "abc:d:0123456789",
+ long_options, &option_index);
+ if (c == EOF)
+ break;
+
+ switch (c)
+ {
+ case 0:
+ printf ("option %s", long_options[option_index].name);
+ if (optarg)
+ printf (" with arg %s", optarg);
+ printf ("\n");
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (digit_optind != 0 && digit_optind != this_option_optind)
+ printf ("digits occur in two different argv-elements.\n");
+ digit_optind = this_option_optind;
+ printf ("option %c\n", c);
+ break;
+
+ case 'a':
+ printf ("option a\n");
+ break;
+
+ case 'b':
+ printf ("option b\n");
+ break;
+
+ case 'c':
+ printf ("option c with value `%s'\n", optarg);
+ break;
+
+ case 'd':
+ printf ("option d with value `%s'\n", optarg);
+ break;
+
+ case '?':
+ break;
+
+ default:
+ printf ("?? getopt returned character code 0%o ??\n", c);
+ }
+ }
+
+ if (optind < argc)
+ {
+ printf ("non-option ARGV-elements: ");
+ while (optind < argc)
+ printf ("%s ", argv[optind++]);
+ printf ("\n");
+ }
+
+ exit (0);
+}
+
+#endif /* TEST */
diff --git a/tools/bison++/gram.cc b/tools/bison++/gram.cc
new file mode 100644
index 000000000..cc1418d9d
--- /dev/null
+++ b/tools/bison++/gram.cc
@@ -0,0 +1,58 @@
+/* Allocate input grammar variables for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* comments for these variables are in gram.h */
+
+int nitems;
+int nrules;
+int nsyms;
+int ntokens;
+int nvars;
+
+short *ritem;
+short *rlhs;
+short *rrhs;
+short *rprec;
+short *rprecsym;
+short *sprec;
+short *rassoc;
+short *sassoc;
+short *token_translations;
+short *rline;
+
+int start_symbol;
+
+int translations;
+
+int max_user_token_number;
+
+int semantic_parser;
+
+int pure_parser;
+
+int error_token_number;
+
+/* This is to avoid linker problems which occur on VMS when using GCC,
+ when the file in question contains data definitions only. */
+
+void
+dummy()
+{
+}
diff --git a/tools/bison++/gram.h b/tools/bison++/gram.h
new file mode 100644
index 000000000..4c670830e
--- /dev/null
+++ b/tools/bison++/gram.h
@@ -0,0 +1,122 @@
+/* Data definitions for internal representation of bison's input,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* representation of the grammar rules:
+
+ntokens is the number of tokens, and nvars is the number of variables
+(nonterminals). nsyms is the total number, ntokens + nvars.
+
+Each symbol (either token or variable) receives a symbol number.
+Numbers 0 to ntokens-1 are for tokens, and ntokens to nsyms-1 are for
+variables. Symbol number zero is the end-of-input token. This token
+is counted in ntokens.
+
+The rules receive rule numbers 1 to nrules in the order they are written.
+Actions and guards are accessed via the rule number.
+
+The rules themselves are described by three arrays: rrhs, rlhs and
+ritem. rlhs[R] is the symbol number of the left hand side of rule R.
+The right hand side is stored as symbol numbers in a portion of
+ritem. rrhs[R] contains the index in ritem of the beginning of the
+portion for rule R.
+
+If rlhs[R] is -1, the rule has been thrown out by reduce.c
+and should be ignored.
+
+The length of the portion is one greater
+ than the number of symbols in the rule's right hand side.
+The last element in the portion contains minus R, which
+identifies it as the end of a portion and says which rule it is for.
+
+The portions of ritem come in order of increasing rule number and are
+followed by an element which is zero to mark the end. nitems is the
+total length of ritem, not counting the final zero. Each element of
+ritem is called an "item" and its index in ritem is an item number.
+
+Item numbers are used in the finite state machine to represent
+places that parsing can get to.
+
+Precedence levels are recorded in the vectors sprec and rprec.
+sprec records the precedence level of each symbol,
+rprec the precedence level of each rule.
+rprecsym is the symbol-number of the symbol in %prec for this rule (if any).
+
+Precedence levels are assigned in increasing order starting with 1 so
+that numerically higher precedence values mean tighter binding as they
+ought to. Zero as a symbol or rule's precedence means none is
+assigned.
+
+Associativities are recorded similarly in rassoc and sassoc. */
+
+
+#define ISTOKEN(s) ((s) < ntokens)
+#define ISVAR(s) ((s) >= ntokens)
+
+
+extern int nitems;
+extern int nrules;
+extern int nsyms;
+extern int ntokens;
+extern int nvars;
+
+extern short *ritem;
+extern short *rlhs;
+extern short *rrhs;
+extern short *rprec;
+extern short *rprecsym;
+extern short *sprec;
+extern short *rassoc;
+extern short *sassoc;
+extern short *rline; /* Source line number of each rule */
+
+extern int start_symbol;
+
+
+/* associativity values in elements of rassoc, sassoc. */
+
+#define RIGHT_ASSOC 1
+#define LEFT_ASSOC 2
+#define NON_ASSOC 3
+
+/* token translation table:
+indexed by a token number as returned by the user's yylex routine,
+it yields the internal token number used by the parser and throughout bison.
+If translations is zero, the translation table is not used because
+the two kinds of token numbers are the same. */
+
+extern short *token_translations;
+extern int translations;
+extern int max_user_token_number;
+
+/* semantic_parser is nonzero if the input file says to use the hairy parser
+that provides for semantic error recovery. If it is zero, the yacc-compatible
+simplified parser is used. */
+
+extern int semantic_parser;
+
+/* pure_parser is nonzero if should generate a parser that is all pure and reentrant. */
+
+extern int pure_parser;
+
+/* error_token_number is the token number of the error token. */
+
+extern int error_token_number;
+
+
diff --git a/tools/bison++/install-sh b/tools/bison++/install-sh
new file mode 100644
index 000000000..e9de23842
--- /dev/null
+++ b/tools/bison++/install-sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ chmodcmd=""
+ else
+ instcmd=mkdir
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+'
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/tools/bison++/lalr.cc b/tools/bison++/lalr.cc
new file mode 100644
index 000000000..f79f16d98
--- /dev/null
+++ b/tools/bison++/lalr.cc
@@ -0,0 +1,761 @@
+/* Compute look-ahead criteria for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Compute how to make the finite state machine deterministic;
+ find which rules need lookahead in each state, and which lookahead tokens they accept.
+
+lalr(), the entry point, builds these data structures:
+
+goto_map, from_state and to_state
+ record each shift transition which accepts a variable (a nonterminal).
+ngotos is the number of such transitions.
+from_state[t] is the state number which a transition leads from
+and to_state[t] is the state number it leads to.
+All the transitions that accept a particular variable are grouped together and
+goto_map[i - ntokens] is the index in from_state and to_state of the first of them.
+
+consistent[s] is nonzero if no lookahead is needed to decide what to do in state s.
+
+LAruleno is a vector which records the rules that need lookahead in various states.
+The elements of LAruleno that apply to state s are those from
+ lookaheads[s] through lookaheads[s+1]-1.
+Each element of LAruleno is a rule number.
+
+If lr is the length of LAruleno, then a number from 0 to lr-1
+can specify both a rule and a state where the rule might be applied.
+
+LA is a lr by ntokens matrix of bits.
+LA[l, i] is 1 if the rule LAruleno[l] is applicable in the appropriate state
+ when the next token is symbol i.
+If LA[l, i] and LA[l, j] are both 1 for i != j, it is a conflict.
+*/
+
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+#include "types.h"
+#include "state.h"
+#include "new.h"
+#include "gram.h"
+
+
+extern short **derives;
+extern char *nullable;
+
+
+int tokensetsize;
+short *lookaheads;
+short *LAruleno;
+unsigned *LA;
+short *accessing_symbol;
+char *consistent;
+core **state_table;
+shifts **shift_table;
+reductions **reduction_table;
+short *goto_map;
+short *from_state;
+short *to_state;
+
+short **transpose(short**,int);
+void set_state_table();
+void set_accessing_symbol();
+void set_shift_table();
+void set_reduction_table();
+void set_maxrhs();
+void initialize_LA();
+void set_goto_map();
+void initialize_F();
+void build_relations();
+void add_lookback_edge(int,int,int);
+void compute_FOLLOWS();
+void compute_lookaheads();
+void digraph(short**);
+void traverse(int);
+
+extern void toomany(char*);
+extern void berror(char*);
+
+static int infinity;
+static int maxrhs;
+static int ngotos;
+static unsigned *F;
+static short **includes;
+static shorts **lookback;
+static short **R;
+static short *INDEX;
+static short *VERTICES;
+static int top;
+
+
+void
+lalr()
+{
+ tokensetsize = WORDSIZE(ntokens);
+
+ set_state_table();
+ set_accessing_symbol();
+ set_shift_table();
+ set_reduction_table();
+ set_maxrhs();
+ initialize_LA();
+ set_goto_map();
+ initialize_F();
+ build_relations();
+ compute_FOLLOWS();
+ compute_lookaheads();
+}
+
+
+void
+set_state_table()
+{
+ register core *sp;
+
+ state_table = NEW2(nstates, core *);
+
+ for (sp = first_state; sp; sp = sp->next)
+ state_table[sp->number] = sp;
+}
+
+
+void
+set_accessing_symbol()
+{
+ register core *sp;
+
+ accessing_symbol = NEW2(nstates, short);
+
+ for (sp = first_state; sp; sp = sp->next)
+ accessing_symbol[sp->number] = sp->accessing_symbol;
+}
+
+
+void
+set_shift_table()
+{
+ register shifts *sp;
+
+ shift_table = NEW2(nstates, shifts *);
+
+ for (sp = first_shift; sp; sp = sp->next)
+ shift_table[sp->number] = sp;
+}
+
+
+void
+set_reduction_table()
+{
+ register reductions *rp;
+
+ reduction_table = NEW2(nstates, reductions *);
+
+ for (rp = first_reduction; rp; rp = rp->next)
+ reduction_table[rp->number] = rp;
+}
+
+
+void
+set_maxrhs()
+{
+ register short *itemp;
+ register int length;
+ register int max;
+
+ length = 0;
+ max = 0;
+ for (itemp = ritem; *itemp; itemp++)
+ {
+ if (*itemp > 0)
+ {
+ length++;
+ }
+ else
+ {
+ if (length > max) max = length;
+ length = 0;
+ }
+ }
+
+ maxrhs = max;
+}
+
+
+void
+initialize_LA()
+{
+ register int i;
+ register int j;
+ register int count;
+ register reductions *rp;
+ register shifts *sp;
+ register short *np;
+
+ consistent = NEW2(nstates, char);
+ lookaheads = NEW2(nstates + 1, short);
+
+ count = 0;
+ for (i = 0; i < nstates; i++)
+ {
+ register int k;
+
+ lookaheads[i] = count;
+
+ rp = reduction_table[i];
+ sp = shift_table[i];
+ if (rp && (rp->nreds > 1
+ || (sp && ! ISVAR(accessing_symbol[sp->internalShifts[0]]))))
+ count += rp->nreds;
+ else
+ consistent[i] = 1;
+
+ if (sp)
+ for (k = 0; k < sp->nshifts; k++)
+ {
+ if (accessing_symbol[sp->internalShifts[k]] == error_token_number)
+ {
+ consistent[i] = 0;
+ break;
+ }
+ }
+ }
+
+ lookaheads[nstates] = count;
+
+ if (count == 0)
+ {
+ LA = NEW2(1 * tokensetsize, unsigned);
+ LAruleno = NEW2(1, short);
+ lookback = NEW2(1, shorts *);
+ }
+ else
+ {
+ LA = NEW2(count * tokensetsize, unsigned);
+ LAruleno = NEW2(count, short);
+ lookback = NEW2(count, shorts *);
+ }
+
+ np = LAruleno;
+ for (i = 0; i < nstates; i++)
+ {
+ if (!consistent[i])
+ {
+ if (rp = reduction_table[i])
+ for (j = 0; j < rp->nreds; j++)
+ *np++ = rp->rules[j];
+ }
+ }
+}
+
+
+void
+set_goto_map()
+{
+ register shifts *sp;
+ register int i;
+ register int symbol;
+ register int k;
+ register short *temp_map;
+ register int state2;
+ register int state1;
+
+ goto_map = NEW2(nvars + 1, short) - ntokens;
+ temp_map = NEW2(nvars + 1, short) - ntokens;
+
+ ngotos = 0;
+ for (sp = first_shift; sp; sp = sp->next)
+ {
+ for (i = sp->nshifts - 1; i >= 0; i--)
+ {
+ symbol = accessing_symbol[sp->internalShifts[i]];
+
+ if (ISTOKEN(symbol)) break;
+
+ if (ngotos == MAXSHORT)
+ toomany("gotos");
+
+ ngotos++;
+ goto_map[symbol]++;
+ }
+ }
+
+ k = 0;
+ for (i = ntokens; i < nsyms; i++)
+ {
+ temp_map[i] = k;
+ k += goto_map[i];
+ }
+
+ for (i = ntokens; i < nsyms; i++)
+ goto_map[i] = temp_map[i];
+
+ goto_map[nsyms] = ngotos;
+ temp_map[nsyms] = ngotos;
+
+ from_state = NEW2(ngotos, short);
+ to_state = NEW2(ngotos, short);
+
+ for (sp = first_shift; sp; sp = sp->next)
+ {
+ state1 = sp->number;
+ for (i = sp->nshifts - 1; i >= 0; i--)
+ {
+ state2 = sp->internalShifts[i];
+ symbol = accessing_symbol[state2];
+
+ if (ISTOKEN(symbol)) break;
+
+ k = temp_map[symbol]++;
+ from_state[k] = state1;
+ to_state[k] = state2;
+ }
+ }
+
+ FREE(temp_map + ntokens);
+}
+
+
+
+/* Map_goto maps a state/symbol pair into its numeric representation. */
+
+int
+map_goto(int state, int symbol)
+{
+ register int high;
+ register int low;
+ register int middle;
+ register int s;
+
+ low = goto_map[symbol];
+ high = goto_map[symbol + 1] - 1;
+
+ while (low <= high)
+ {
+ middle = (low + high) / 2;
+ s = from_state[middle];
+ if (s == state)
+ return (middle);
+ else if (s < state)
+ low = middle + 1;
+ else
+ high = middle - 1;
+ }
+
+ berror("map_goto");
+/* NOTREACHED */
+ return 0;
+}
+
+
+void
+initialize_F()
+{
+ register int i;
+ register int j;
+ register int k;
+ register shifts *sp;
+ register short *edge;
+ register unsigned *rowp;
+ register short *rp;
+ register short **reads;
+ register int nedges;
+ register int stateno;
+ register int symbol;
+ register int nwords;
+
+ nwords = ngotos * tokensetsize;
+ F = NEW2(nwords, unsigned);
+
+ reads = NEW2(ngotos, short *);
+ edge = NEW2(ngotos + 1, short);
+ nedges = 0;
+
+ rowp = F;
+ for (i = 0; i < ngotos; i++)
+ {
+ stateno = to_state[i];
+ sp = shift_table[stateno];
+
+ if (sp)
+ {
+ k = sp->nshifts;
+
+ for (j = 0; j < k; j++)
+ {
+ symbol = accessing_symbol[sp->internalShifts[j]];
+ if (ISVAR(symbol))
+ break;
+ SETBIT(rowp, symbol);
+ }
+
+ for (; j < k; j++)
+ {
+ symbol = accessing_symbol[sp->internalShifts[j]];
+ if (nullable[symbol])
+ edge[nedges++] = map_goto(stateno, symbol);
+ }
+
+ if (nedges)
+ {
+ reads[i] = rp = NEW2(nedges + 1, short);
+
+ for (j = 0; j < nedges; j++)
+ rp[j] = edge[j];
+
+ rp[nedges] = -1;
+ nedges = 0;
+ }
+ }
+
+ rowp += tokensetsize;
+ }
+
+ digraph(reads);
+
+ for (i = 0; i < ngotos; i++)
+ {
+ if (reads[i])
+ FREE(reads[i]);
+ }
+
+ FREE(reads);
+ FREE(edge);
+}
+
+
+void
+build_relations()
+{
+ register int i;
+ register int j;
+ register int k;
+ register short *rulep;
+ register short *rp;
+ register shifts *sp;
+ register int length;
+ register int nedges;
+ register int done;
+ register int state1;
+ register int stateno;
+ register int symbol1;
+ register int symbol2;
+ register short *shortp;
+ register short *edge;
+ register short *states;
+ register short **new_includes;
+
+ includes = NEW2(ngotos, short *);
+ edge = NEW2(ngotos + 1, short);
+ states = NEW2(maxrhs + 1, short);
+
+ for (i = 0; i < ngotos; i++)
+ {
+ nedges = 0;
+ state1 = from_state[i];
+ symbol1 = accessing_symbol[to_state[i]];
+
+ for (rulep = derives[symbol1]; *rulep > 0; rulep++)
+ {
+ length = 1;
+ states[0] = state1;
+ stateno = state1;
+
+ for (rp = ritem + rrhs[*rulep]; *rp > 0; rp++)
+ {
+ symbol2 = *rp;
+ sp = shift_table[stateno];
+ k = sp->nshifts;
+
+ for (j = 0; j < k; j++)
+ {
+ stateno = sp->internalShifts[j];
+ if (accessing_symbol[stateno] == symbol2) break;
+ }
+
+ states[length++] = stateno;
+ }
+
+ if (!consistent[stateno])
+ add_lookback_edge(stateno, *rulep, i);
+
+ length--;
+ done = 0;
+ while (!done)
+ {
+ done = 1;
+ rp--;
+ /* JF added rp>=ritem && I hope to god its right! */
+ if (rp>=ritem && ISVAR(*rp))
+ {
+ stateno = states[--length];
+ edge[nedges++] = map_goto(stateno, *rp);
+ if (nullable[*rp]) done = 0;
+ }
+ }
+ }
+
+ if (nedges)
+ {
+ includes[i] = shortp = NEW2(nedges + 1, short);
+ for (j = 0; j < nedges; j++)
+ shortp[j] = edge[j];
+ shortp[nedges] = -1;
+ }
+ }
+
+ new_includes = transpose(includes, ngotos);
+
+ for (i = 0; i < ngotos; i++)
+ if (includes[i])
+ FREE(includes[i]);
+
+ FREE(includes);
+
+ includes = new_includes;
+
+ FREE(edge);
+ FREE(states);
+}
+
+
+void
+add_lookback_edge(int stateno, int ruleno, int gotono)
+{
+ register int i;
+ register int k;
+ register int found;
+ register shorts *sp;
+
+ i = lookaheads[stateno];
+ k = lookaheads[stateno + 1];
+ found = 0;
+ while (!found && i < k)
+ {
+ if (LAruleno[i] == ruleno)
+ found = 1;
+ else
+ i++;
+ }
+
+ if (found == 0)
+ berror("add_lookback_edge");
+
+ sp = NEW(shorts);
+ sp->next = lookback[i];
+ sp->value = gotono;
+ lookback[i] = sp;
+}
+
+
+
+short **
+transpose(short** R_arg, int n)
+{
+ register short **new_R;
+ register short **temp_R;
+ register short *nedges;
+ register short *sp;
+ register int i;
+ register int k;
+
+ nedges = NEW2(n, short);
+
+ for (i = 0; i < n; i++)
+ {
+ sp = R_arg[i];
+ if (sp)
+ {
+ while (*sp >= 0)
+ nedges[*sp++]++;
+ }
+ }
+
+ new_R = NEW2(n, short *);
+ temp_R = NEW2(n, short *);
+
+ for (i = 0; i < n; i++)
+ {
+ k = nedges[i];
+ if (k > 0)
+ {
+ sp = NEW2(k + 1, short);
+ new_R[i] = sp;
+ temp_R[i] = sp;
+ sp[k] = -1;
+ }
+ }
+
+ FREE(nedges);
+
+ for (i = 0; i < n; i++)
+ {
+ sp = R_arg[i];
+ if (sp)
+ {
+ while (*sp >= 0)
+ *temp_R[*sp++]++ = i;
+ }
+ }
+
+ FREE(temp_R);
+
+ return (new_R);
+}
+
+
+void
+compute_FOLLOWS()
+{
+ register int i;
+
+ digraph(includes);
+
+ for (i = 0; i < ngotos; i++)
+ {
+ if (includes[i]) FREE(includes[i]);
+ }
+
+ FREE(includes);
+}
+
+
+void
+compute_lookaheads()
+{
+ register int i;
+ register int n;
+ register unsigned *fp1;
+ register unsigned *fp2;
+ register unsigned *fp3;
+ register shorts *sp;
+ register unsigned *rowp;
+/* register short *rulep; JF unused */
+/* register int count; JF unused */
+ register shorts *sptmp;/* JF */
+
+ rowp = LA;
+ n = lookaheads[nstates];
+ for (i = 0; i < n; i++)
+ {
+ fp3 = rowp + tokensetsize;
+ for (sp = lookback[i]; sp; sp = sp->next)
+ {
+ fp1 = rowp;
+ fp2 = F + tokensetsize * sp->value;
+ while (fp1 < fp3)
+ *fp1++ |= *fp2++;
+ }
+
+ rowp = fp3;
+ }
+
+ for (i = 0; i < n; i++)
+ {/* JF removed ref to freed storage */
+ for (sp = lookback[i]; sp; sp = sptmp) {
+ sptmp=sp->next;
+ FREE(sp);
+ }
+ }
+
+ FREE(lookback);
+ FREE(F);
+}
+
+
+void
+digraph(short** relation)
+{
+ register int i;
+
+ infinity = ngotos + 2;
+ INDEX = NEW2(ngotos + 1, short);
+ VERTICES = NEW2(ngotos + 1, short);
+ top = 0;
+
+ R = relation;
+
+ for (i = 0; i < ngotos; i++)
+ INDEX[i] = 0;
+
+ for (i = 0; i < ngotos; i++)
+ {
+ if (INDEX[i] == 0 && R[i])
+ traverse(i);
+ }
+
+ FREE(INDEX);
+ FREE(VERTICES);
+}
+
+
+void
+traverse(int i)
+{
+ register unsigned *fp1;
+ register unsigned *fp2;
+ register unsigned *fp3;
+ register int j;
+ register short *rp;
+
+ int height;
+ unsigned *base;
+
+ VERTICES[++top] = i;
+ INDEX[i] = height = top;
+
+ base = F + i * tokensetsize;
+ fp3 = base + tokensetsize;
+
+ rp = R[i];
+ if (rp)
+ {
+ while ((j = *rp++) >= 0)
+ {
+ if (INDEX[j] == 0)
+ traverse(j);
+
+ if (INDEX[i] > INDEX[j])
+ INDEX[i] = INDEX[j];
+
+ fp1 = base;
+ fp2 = F + j * tokensetsize;
+
+ while (fp1 < fp3)
+ *fp1++ |= *fp2++;
+ }
+ }
+
+ if (INDEX[i] == height)
+ {
+ for (;;)
+ {
+ j = VERTICES[top--];
+ INDEX[j] = infinity;
+
+ if (i == j)
+ break;
+
+ fp1 = base;
+ fp2 = F + j * tokensetsize;
+
+ while (fp1 < fp3)
+ *fp2++ = *fp1++;
+ }
+ }
+}
diff --git a/tools/bison++/lex.cc b/tools/bison++/lex.cc
new file mode 100644
index 000000000..8eb5c02a5
--- /dev/null
+++ b/tools/bison++/lex.cc
@@ -0,0 +1,516 @@
+/* Token-reader for Bison's input parser,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/*
+ lex() is the entry point. It is called from reader.c.
+ It returns one of the token-type codes defined in lex.h.
+ When an identifier is seen, the code IDENTIFIER is returned
+ and the name is looked up in the symbol table using symtab.c;
+ symval is set to a pointer to the entry found. */
+
+#include <stdio.h>
+#include <ctype.h>
+#include "system.h"
+#include "files.h"
+#include "symtab.h"
+#include "lex.h"
+#include "new.h"
+
+
+extern int lineno;
+extern int translations;
+
+int parse_percent_token();
+
+extern void fatal(const char*);
+extern void fatals(const char*,void*);
+extern void fatals(const char*,void*,void*);
+extern void fatals(const char*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*,void*);
+
+/* Buffer for storing the current token. */
+char *token_buffer;
+
+/* Allocated size of token_buffer, not including space for terminator. */
+static int maxtoken;
+
+bucket *symval;
+int numval;
+
+static int unlexed; /* these two describe a token to be reread */
+static bucket *unlexed_symval; /* by the next call to lex */
+
+
+void
+init_lex()
+{
+ maxtoken = 100;
+ token_buffer = NEW2 (maxtoken + 1, char);
+ unlexed = -1;
+}
+
+
+static char *
+grow_token_buffer (char* p)
+{
+ int offset = p - token_buffer;
+ maxtoken *= 2;
+ token_buffer = (char *) xrealloc(token_buffer, maxtoken + 1);
+ return token_buffer + offset;
+}
+
+
+int
+skip_white_space()
+{
+ register int c;
+ register int inside;
+
+ c = getc(finput);
+
+ for (;;)
+ {
+ int cplus_comment;
+
+ switch (c)
+ {
+ case '/':
+ c = getc(finput);
+ if (c != '*' && c != '/')
+ fatals("unexpected `/%c' found", (void*) c);
+ cplus_comment = (c == '/');
+
+ c = getc(finput);
+
+ inside = 1;
+ while (inside)
+ {
+ if (!cplus_comment && c == '*')
+ {
+ while (c == '*')
+ c = getc(finput);
+
+ if (c == '/')
+ {
+ inside = 0;
+ c = getc(finput);
+ }
+ }
+ else if (c == '\n')
+ {
+ lineno++;
+ if (cplus_comment)
+ inside = 0;
+ c = getc(finput);
+ }
+ else if (c == EOF)
+ fatal("unterminated comment");
+ else
+ c = getc(finput);
+ }
+
+ break;
+
+ case '\n':
+ lineno++;
+
+ case ' ':
+ case '\t':
+ case '\f':
+ c = getc(finput);
+ break;
+
+ default:
+ return (c);
+ }
+ }
+}
+
+
+void
+unlex(int token)
+{
+ unlexed = token;
+ unlexed_symval = symval;
+}
+
+
+
+int
+lex()
+{
+ register int c;
+ register char *p;
+
+ if (unlexed >= 0)
+ {
+ symval = unlexed_symval;
+ c = unlexed;
+ unlexed = -1;
+ return (c);
+ }
+
+ c = skip_white_space();
+
+ switch (c)
+ {
+ case EOF:
+ return (ENDFILE);
+
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'L': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ case '.': case '_':
+ p = token_buffer;
+ while (isalnum(c) || c == '_' || c == '.')
+ {
+ if (p == token_buffer + maxtoken)
+ p = grow_token_buffer(p);
+
+ *p++ = c;
+ c = getc(finput);
+ }
+
+ *p = 0;
+ ungetc(c, finput);
+ symval = getsym(token_buffer);
+ return (IDENTIFIER);
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ {
+ numval = 0;
+
+ while (isdigit(c))
+ {
+ numval = numval*10 + c - '0';
+ c = getc(finput);
+ }
+ ungetc(c, finput);
+ return (NUMBER);
+ }
+
+ case '\'':
+ translations = -1;
+
+ /* parse the literal token and compute character code in code */
+
+ c = getc(finput);
+ {
+ register int code = 0;
+
+ if (c == '\\')
+ {
+ c = getc(finput);
+
+ if (c <= '7' && c >= '0')
+ {
+ while (c <= '7' && c >= '0')
+ {
+ code = (code * 8) + (c - '0');
+ c = getc(finput);
+ if (code >= 256 || code < 0)
+ fatals("malformatted literal token `\\%03o'", (void*) code);
+ }
+ }
+ else
+ {
+ if (c == 't')
+ code = '\t';
+ else if (c == 'n')
+ code = '\n';
+ else if (c == 'a')
+ code = '\007';
+ else if (c == 'r')
+ code = '\r';
+ else if (c == 'f')
+ code = '\f';
+ else if (c == 'b')
+ code = '\b';
+ else if (c == 'v')
+ code = 013;
+ else if (c == 'x')
+ {
+ c = getc(finput);
+ while ((c <= '9' && c >= '0')
+ || (c >= 'a' && c <= 'z')
+ || (c >= 'A' && c <= 'Z'))
+ {
+ code *= 16;
+ if (c <= '9' && c >= '0')
+ code += c - '0';
+ else if (c >= 'a' && c <= 'z')
+ code += c - 'a' + 10;
+ else if (c >= 'A' && c <= 'Z')
+ code += c - 'A' + 10;
+ if (code >= 256 || code<0)/* JF this said if(c>=128) */
+ fatals("malformatted literal token `\\x%x'",(void*) code);
+ c = getc(finput);
+ }
+ ungetc(c, finput);
+ }
+ else if (c == '\\')
+ code = '\\';
+ else if (c == '\'')
+ code = '\'';
+ else if (c == '\"') /* JF this is a good idea */
+ code = '\"';
+ else
+ {
+ if (c >= 040 && c <= 0177)
+ fatals ("unknown escape sequence `\\%c'", (void*) c);
+ else
+ fatals ("unknown escape sequence: `\\' followed by char code 0x%x", (void*) c);
+ }
+
+ c = getc(finput);
+ }
+ }
+ else
+ {
+ code = c;
+ c = getc(finput);
+ }
+ if (c != '\'')
+ fatal("multicharacter literal tokens not supported");
+
+ /* now fill token_buffer with the canonical name for this character
+ as a literal token. Do not use what the user typed,
+ so that '\012' and '\n' can be interchangeable. */
+
+ p = token_buffer;
+ *p++ = '\'';
+ if (code == '\\')
+ {
+ *p++ = '\\';
+ *p++ = '\\';
+ }
+ else if (code == '\'')
+ {
+ *p++ = '\\';
+ *p++ = '\'';
+ }
+ else if (code >= 040 && code != 0177)
+ *p++ = code;
+ else if (code == '\t')
+ {
+ *p++ = '\\';
+ *p++ = 't';
+ }
+ else if (code == '\n')
+ {
+ *p++ = '\\';
+ *p++ = 'n';
+ }
+ else if (code == '\r')
+ {
+ *p++ = '\\';
+ *p++ = 'r';
+ }
+ else if (code == '\v')
+ {
+ *p++ = '\\';
+ *p++ = 'v';
+ }
+ else if (code == '\b')
+ {
+ *p++ = '\\';
+ *p++ = 'b';
+ }
+ else if (code == '\f')
+ {
+ *p++ = '\\';
+ *p++ = 'f';
+ }
+ else
+ {
+ *p++ = code / 0100 + '0';
+ *p++ = ((code / 010) & 07) + '0';
+ *p++ = (code & 07) + '0';
+ }
+ *p++ = '\'';
+ *p = 0;
+ symval = getsym(token_buffer);
+ symval->internalClass = STOKEN;
+ if (! symval->user_token_number)
+ symval->user_token_number = code;
+ return (IDENTIFIER);
+ }
+
+ case ',':
+ return (COMMA);
+
+ case ':':
+ return (COLON);
+
+ case ';':
+ return (SEMICOLON);
+
+ case '|':
+ return (BAR);
+
+ case '{':
+ return (LEFT_CURLY);
+
+ case '=':
+ do
+ {
+ c = getc(finput);
+ if (c == '\n') lineno++;
+ }
+ while(c==' ' || c=='\n' || c=='\t');
+
+ if (c == '{')
+ return(LEFT_CURLY);
+ else
+ {
+ ungetc(c, finput);
+ return(ILLEGAL);
+ }
+
+ case '<':
+ p = token_buffer;
+ c = getc(finput);
+ while (c != '>')
+ {
+ if (c == '\n' || c == EOF)
+ fatal("unterminated type name");
+
+ if (p == token_buffer + maxtoken)
+ p = grow_token_buffer(p);
+
+ *p++ = c;
+ c = getc(finput);
+ }
+ *p = 0;
+ return (TYPENAME);
+
+
+ case '%':
+ return (parse_percent_token());
+
+ default:
+ return (ILLEGAL);
+ }
+}
+
+
+/* parse a token which starts with %. Assumes the % has already been read and discarded. */
+
+int
+parse_percent_token ()
+{
+ register int c;
+ register char *p;
+
+ p = token_buffer;
+ c = getc(finput);
+
+ switch (c)
+ {
+ case '%':
+ return (TWO_PERCENTS);
+
+ case '{':
+ return (PERCENT_LEFT_CURLY);
+
+ case '<':
+ return (LEFT);
+
+ case '>':
+ return (RIGHT);
+
+ case '2':
+ return (NONASSOC);
+
+ case '0':
+ return (TOKEN);
+
+ case '=':
+ return (PREC);
+ }
+ if (!isalpha(c))
+ return (ILLEGAL);
+
+ while (isalpha(c) || c == '_')
+ {
+ if (p == token_buffer + maxtoken)
+ p = grow_token_buffer(p);
+
+ *p++ = c;
+ c = getc(finput);
+ }
+
+ ungetc(c, finput);
+
+ *p = 0;
+
+ if (strcmp(token_buffer, "token") == 0
+ ||
+ strcmp(token_buffer, "term") == 0)
+ return (TOKEN);
+ else if (strcmp(token_buffer, "nterm") == 0)
+ return (NTERM);
+ else if (strcmp(token_buffer, "type") == 0)
+ return (TYPE);
+ else if (strcmp(token_buffer, "guard") == 0)
+ return (GUARD);
+ else if (strcmp(token_buffer, "union") == 0)
+ return (UNION);
+ else if (strcmp(token_buffer, "expect") == 0)
+ return (EXPECT);
+ else if (strcmp(token_buffer, "start") == 0)
+ return (START);
+ else if (strcmp(token_buffer, "left") == 0)
+ return (LEFT);
+ else if (strcmp(token_buffer, "right") == 0)
+ return (RIGHT);
+ else if (strcmp(token_buffer, "nonassoc") == 0
+ ||
+ strcmp(token_buffer, "binary") == 0)
+ return (NONASSOC);
+ else if (strcmp(token_buffer, "semantic_parser") == 0)
+ return (SEMANTIC_PARSER);
+ else if (strcmp(token_buffer, "pure_parser") == 0)
+ return (PURE_PARSER);
+ else if (strcmp(token_buffer, "prec") == 0)
+ return (PREC);
+ else if (strcmp(token_buffer, "name") == 0)
+ return (PARSER_NAME);
+ else if (strcmp(token_buffer, "define") == 0)
+ return (DEFINE_SYM);
+ else if (strcmp(token_buffer, "header") == 0)
+ { c=getc(finput);
+ if(c=='{')
+ return (PERCENT_LEFT_CURLY_HEADER);
+ else return (ILLEGAL);
+ }
+ else return (ILLEGAL);
+}
diff --git a/tools/bison++/lex.h b/tools/bison++/lex.h
new file mode 100644
index 000000000..70589d13a
--- /dev/null
+++ b/tools/bison++/lex.h
@@ -0,0 +1,52 @@
+/* Token type definitions for bison's input reader,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#define ENDFILE 0
+#define IDENTIFIER 1
+#define COMMA 2
+#define COLON 3
+#define SEMICOLON 4
+#define BAR 5
+#define LEFT_CURLY 6
+#define TWO_PERCENTS 7
+#define PERCENT_LEFT_CURLY 8
+#define TOKEN 9
+#define NTERM 10
+#define GUARD 11
+#define TYPE 12
+#define UNION 13
+#define START 14
+#define LEFT 15
+#define RIGHT 16
+#define NONASSOC 17
+#define PREC 18
+#define SEMANTIC_PARSER 19
+#define PURE_PARSER 20
+#define TYPENAME 21
+#define NUMBER 22
+#define EXPECT 23
+
+#define PERCENT_LEFT_CURLY_HEADER 24
+#define PARSER_NAME 25
+#define DEFINE_SYM 26
+
+#define ILLEGAL 27
+
+#define MAXTOKEN 1024
diff --git a/tools/bison++/lr0.cc b/tools/bison++/lr0.cc
new file mode 100644
index 000000000..ab4c46a09
--- /dev/null
+++ b/tools/bison++/lr0.cc
@@ -0,0 +1,702 @@
+/* Generate the nondeterministic finite state machine for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* See comments in state.h for the data structures that represent it.
+ The entry point is generate_states. */
+
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+#include "new.h"
+#include "gram.h"
+#include "state.h"
+
+
+extern char *nullable;
+extern short *itemset;
+extern short *itemsetend;
+
+
+int nstates;
+int final_state;
+core *first_state;
+shifts *first_shift;
+reductions *first_reduction;
+
+int get_state(int);
+core *new_state(int);
+
+void new_itemsets();
+void append_states();
+void initialize_states();
+void save_shifts();
+void save_reductions();
+void augment_automaton();
+void insert_start_shift();
+extern void initialize_closure(int);
+extern void closure(short*,int);
+extern void finalize_closure();
+extern void toomany(char*);
+
+static core *this_state;
+static core *last_state;
+static shifts *last_shift;
+static reductions *last_reduction;
+
+static int nshifts;
+static short *shift_symbol;
+
+static short *redset;
+static short *shiftset;
+
+static short **kernel_base;
+static short **kernel_end;
+static short *kernel_items;
+
+/* hash table for states, to recognize equivalent ones. */
+
+#define STATE_TABLE_SIZE 1009
+static core **state_table;
+
+
+
+void
+allocate_itemsets()
+{
+ register short *itemp;
+ register int symbol;
+ register int i;
+ register int count;
+ register short *symbol_count;
+
+ count = 0;
+ symbol_count = NEW2(nsyms, short);
+
+ itemp = ritem;
+ symbol = *itemp++;
+ while (symbol)
+ {
+ if (symbol > 0)
+ {
+ count++;
+ symbol_count[symbol]++;
+ }
+ symbol = *itemp++;
+ }
+
+ /* see comments before new_itemsets. All the vectors of items
+ live inside kernel_items. The number of active items after
+ some symbol cannot be more than the number of times that symbol
+ appears as an item, which is symbol_count[symbol].
+ We allocate that much space for each symbol. */
+
+ kernel_base = NEW2(nsyms, short *);
+ kernel_items = NEW2(count, short);
+
+ count = 0;
+ for (i = 0; i < nsyms; i++)
+ {
+ kernel_base[i] = kernel_items + count;
+ count += symbol_count[i];
+ }
+
+ shift_symbol = symbol_count;
+ kernel_end = NEW2(nsyms, short *);
+}
+
+
+void
+allocate_storage()
+{
+ allocate_itemsets();
+
+ shiftset = NEW2(nsyms, short);
+ redset = NEW2(nrules + 1, short);
+ state_table = NEW2(STATE_TABLE_SIZE, core *);
+}
+
+
+void
+free_storage()
+{
+ FREE(shift_symbol);
+ FREE(redset);
+ FREE(shiftset);
+ FREE(kernel_base);
+ FREE(kernel_end);
+ FREE(kernel_items);
+ FREE(state_table);
+}
+
+
+
+/* compute the nondeterministic finite state machine (see state.h for details)
+from the grammar. */
+void
+generate_states()
+{
+ allocate_storage();
+ initialize_closure(nitems);
+ initialize_states();
+
+ while (this_state)
+ {
+ /* Set up ruleset and itemset for the transitions out of this state.
+ ruleset gets a 1 bit for each rule that could reduce now.
+ itemset gets a vector of all the items that could be accepted next. */
+ closure(this_state->items, this_state->nitems);
+ /* record the reductions allowed out of this state */
+ save_reductions();
+ /* find the itemsets of the states that shifts can reach */
+ new_itemsets();
+ /* find or create the core structures for those states */
+ append_states();
+
+ /* create the shifts structures for the shifts to those states,
+ now that the state numbers transitioning to are known */
+ if (nshifts > 0)
+ save_shifts();
+
+ /* states are queued when they are created; process them all */
+ this_state = this_state->next;
+ }
+
+ /* discard various storage */
+ finalize_closure();
+ free_storage();
+
+ /* set up initial and final states as parser wants them */
+ augment_automaton();
+}
+
+
+
+/* Find which symbols can be shifted in the current state,
+ and for each one record which items would be active after that shift.
+ Uses the contents of itemset.
+ shift_symbol is set to a vector of the symbols that can be shifted.
+ For each symbol in the grammar, kernel_base[symbol] points to
+ a vector of item numbers activated if that symbol is shifted,
+ and kernel_end[symbol] points after the end of that vector. */
+void
+new_itemsets()
+{
+ register int i;
+ register int shiftcount;
+ register short *isp;
+ register short *ksp;
+ register int symbol;
+
+#ifdef TRACE
+ fprintf(stderr, "Entering new_itemsets\n");
+#endif
+
+ for (i = 0; i < nsyms; i++)
+ kernel_end[i] = NULL;
+
+ shiftcount = 0;
+
+ isp = itemset;
+
+ while (isp < itemsetend)
+ {
+ i = *isp++;
+ symbol = ritem[i];
+ if (symbol > 0)
+ {
+ ksp = kernel_end[symbol];
+
+ if (!ksp)
+ {
+ shift_symbol[shiftcount++] = symbol;
+ ksp = kernel_base[symbol];
+ }
+
+ *ksp++ = i + 1;
+ kernel_end[symbol] = ksp;
+ }
+ }
+
+ nshifts = shiftcount;
+}
+
+
+
+/* Use the information computed by new_itemsets to find the state numbers
+ reached by each shift transition from the current state.
+
+ shiftset is set up as a vector of state numbers of those states. */
+void
+append_states()
+{
+ register int i;
+ register int j;
+ register int symbol;
+
+#ifdef TRACE
+ fprintf(stderr, "Entering append_states\n");
+#endif
+
+ /* first sort shift_symbol into increasing order */
+
+ for (i = 1; i < nshifts; i++)
+ {
+ symbol = shift_symbol[i];
+ j = i;
+ while (j > 0 && shift_symbol[j - 1] > symbol)
+ {
+ shift_symbol[j] = shift_symbol[j - 1];
+ j--;
+ }
+ shift_symbol[j] = symbol;
+ }
+
+ for (i = 0; i < nshifts; i++)
+ {
+ symbol = shift_symbol[i];
+ shiftset[i] = get_state(symbol);
+ }
+}
+
+
+
+/* find the state number for the state we would get to
+(from the current state) by shifting symbol.
+Create a new state if no equivalent one exists already.
+Used by append_states */
+
+int
+get_state(int symbol)
+{
+ register int key;
+ register short *isp1;
+ register short *isp2;
+ register short *iend;
+ register core *sp;
+ register int found;
+
+ int n;
+
+#ifdef TRACE
+ fprintf(stderr, "Entering get_state, symbol = %d\n", symbol);
+#endif
+
+ isp1 = kernel_base[symbol];
+ iend = kernel_end[symbol];
+ n = iend - isp1;
+
+ /* add up the target state's active item numbers to get a hash key */
+ key = 0;
+ while (isp1 < iend)
+ key += *isp1++;
+
+ key = key % STATE_TABLE_SIZE;
+
+ sp = state_table[key];
+
+ if (sp)
+ {
+ found = 0;
+ while (!found)
+ {
+ if (sp->nitems == n)
+ {
+ found = 1;
+ isp1 = kernel_base[symbol];
+ isp2 = sp->items;
+
+ while (found && isp1 < iend)
+ {
+ if (*isp1++ != *isp2++)
+ found = 0;
+ }
+ }
+
+ if (!found)
+ {
+ if (sp->link)
+ {
+ sp = sp->link;
+ }
+ else /* bucket exhausted and no match */
+ {
+ sp = sp->link = new_state(symbol);
+ found = 1;
+ }
+ }
+ }
+ }
+ else /* bucket is empty */
+ {
+ state_table[key] = sp = new_state(symbol);
+ }
+
+ return (sp->number);
+}
+
+
+
+/* subroutine of get_state. create a new state for those items, if necessary. */
+
+core *
+new_state(int symbol)
+{
+ register int n;
+ register core *p;
+ register short *isp1;
+ register short *isp2;
+ register short *iend;
+
+#ifdef TRACE
+ fprintf(stderr, "Entering new_state, symbol = %d\n", symbol);
+#endif
+
+ if (nstates >= MAXSHORT)
+ toomany("states");
+
+ isp1 = kernel_base[symbol];
+ iend = kernel_end[symbol];
+ n = iend - isp1;
+
+ p = (core *) xmalloc((unsigned) (sizeof(core) + (n - 1) * sizeof(short)));
+ p->accessing_symbol = symbol;
+ p->number = nstates;
+ p->nitems = n;
+
+ isp2 = p->items;
+ while (isp1 < iend)
+ *isp2++ = *isp1++;
+
+ last_state->next = p;
+ last_state = p;
+
+ nstates++;
+
+ return (p);
+}
+
+
+void
+initialize_states()
+{
+ register core *p;
+/* register unsigned *rp1; JF unused */
+/* register unsigned *rp2; JF unused */
+/* register unsigned *rend; JF unused */
+
+ p = (core *) xmalloc((unsigned) (sizeof(core) - sizeof(short)));
+ first_state = last_state = this_state = p;
+ nstates = 1;
+}
+
+
+void
+save_shifts()
+{
+ register shifts *p;
+ register short *sp1;
+ register short *sp2;
+ register short *send;
+
+ p = (shifts *) xmalloc((unsigned) (sizeof(shifts) +
+ (nshifts - 1) * sizeof(short)));
+
+ p->number = this_state->number;
+ p->nshifts = nshifts;
+
+ sp1 = shiftset;
+ sp2 = p->internalShifts;
+ send = shiftset + nshifts;
+
+ while (sp1 < send)
+ *sp2++ = *sp1++;
+
+ if (last_shift)
+ {
+ last_shift->next = p;
+ last_shift = p;
+ }
+ else
+ {
+ first_shift = p;
+ last_shift = p;
+ }
+}
+
+
+
+/* find which rules can be used for reduction transitions from the current state
+ and make a reductions structure for the state to record their rule numbers. */
+void
+save_reductions()
+{
+ register short *isp;
+ register short *rp1;
+ register short *rp2;
+ register int item;
+ register int count;
+ register reductions *p;
+
+ short *rend;
+
+ /* find and count the active items that represent ends of rules */
+
+ count = 0;
+ for (isp = itemset; isp < itemsetend; isp++)
+ {
+ item = ritem[*isp];
+ if (item < 0)
+ {
+ redset[count++] = -item;
+ }
+ }
+
+ /* make a reductions structure and copy the data into it. */
+
+ if (count)
+ {
+ p = (reductions *) xmalloc((unsigned) (sizeof(reductions) +
+ (count - 1) * sizeof(short)));
+
+ p->number = this_state->number;
+ p->nreds = count;
+
+ rp1 = redset;
+ rp2 = p->rules;
+ rend = rp1 + count;
+
+ while (rp1 < rend)
+ *rp2++ = *rp1++;
+
+ if (last_reduction)
+ {
+ last_reduction->next = p;
+ last_reduction = p;
+ }
+ else
+ {
+ first_reduction = p;
+ last_reduction = p;
+ }
+ }
+}
+
+
+
+/* Make sure that the initial state has a shift that accepts the
+grammar's start symbol and goes to the next-to-final state,
+which has a shift going to the final state, which has a shift
+to the termination state.
+Create such states and shifts if they don't happen to exist already. */
+void
+augment_automaton()
+{
+ register int i;
+ register int k;
+/* register int found; JF unused */
+ register core *statep;
+ register shifts *sp;
+ register shifts *sp2;
+ register shifts *sp1;
+
+ sp = first_shift;
+
+ if (sp)
+ {
+ if (sp->number == 0)
+ {
+ k = sp->nshifts;
+ statep = first_state->next;
+
+ /* The states reached by shifts from first_state are numbered 1...K.
+ Look for one reached by start_symbol. */
+ while (statep->accessing_symbol < start_symbol
+ && statep->number < k)
+ statep = statep->next;
+
+ if (statep->accessing_symbol == start_symbol)
+ {
+ /* We already have a next-to-final state.
+ Make sure it has a shift to what will be the final state. */
+ k = statep->number;
+
+ while (sp && sp->number < k)
+ {
+ sp1 = sp;
+ sp = sp->next;
+ }
+
+ if (sp && sp->number == k)
+ {
+ sp2 = (shifts *) xmalloc((unsigned) (sizeof(shifts)
+ + sp->nshifts * sizeof(short)));
+ sp2->number = k;
+ sp2->nshifts = sp->nshifts + 1;
+ sp2->internalShifts[0] = nstates;
+ for (i = sp->nshifts; i > 0; i--)
+ sp2->internalShifts[i] = sp->internalShifts[i - 1];
+
+ /* Patch sp2 into the chain of shifts in place of sp,
+ following sp1. */
+ sp2->next = sp->next;
+ sp1->next = sp2;
+ if (sp == last_shift)
+ last_shift = sp2;
+ FREE(sp);
+ }
+ else
+ {
+ sp2 = NEW(shifts);
+ sp2->number = k;
+ sp2->nshifts = 1;
+ sp2->internalShifts[0] = nstates;
+
+ /* Patch sp2 into the chain of shifts between sp1 and sp. */
+ sp2->next = sp;
+ sp1->next = sp2;
+ if (sp == 0)
+ last_shift = sp2;
+ }
+ }
+ else
+ {
+ /* There is no next-to-final state as yet. */
+ /* Add one more shift in first_shift,
+ going to the next-to-final state (yet to be made). */
+ sp = first_shift;
+
+ sp2 = (shifts *) xmalloc(sizeof(shifts)
+ + sp->nshifts * sizeof(short));
+ sp2->nshifts = sp->nshifts + 1;
+
+ /* Stick this shift into the vector at the proper place. */
+ statep = first_state->next;
+ for (k = 0, i = 0; i < sp->nshifts; k++, i++)
+ {
+ if (statep->accessing_symbol > start_symbol && i == k)
+ sp2->internalShifts[k++] = nstates;
+ sp2->internalShifts[k] = sp->internalShifts[i];
+ statep = statep->next;
+ }
+ if (i == k)
+ sp2->internalShifts[k++] = nstates;
+
+ /* Patch sp2 into the chain of shifts
+ in place of sp, at the beginning. */
+ sp2->next = sp->next;
+ first_shift = sp2;
+ if (last_shift == sp)
+ last_shift = sp2;
+
+ FREE(sp);
+
+ /* Create the next-to-final state, with shift to
+ what will be the final state. */
+ insert_start_shift();
+ }
+ }
+ else
+ {
+ /* The initial state didn't even have any shifts.
+ Give it one shift, to the next-to-final state. */
+ sp = NEW(shifts);
+ sp->nshifts = 1;
+ sp->internalShifts[0] = nstates;
+
+ /* Patch sp into the chain of shifts at the beginning. */
+ sp->next = first_shift;
+ first_shift = sp;
+
+ /* Create the next-to-final state, with shift to
+ what will be the final state. */
+ insert_start_shift();
+ }
+ }
+ else
+ {
+ /* There are no shifts for any state.
+ Make one shift, from the initial state to the next-to-final state. */
+
+ sp = NEW(shifts);
+ sp->nshifts = 1;
+ sp->internalShifts[0] = nstates;
+
+ /* Initialize the chain of shifts with sp. */
+ first_shift = sp;
+ last_shift = sp;
+
+ /* Create the next-to-final state, with shift to
+ what will be the final state. */
+ insert_start_shift();
+ }
+
+ /* Make the final state--the one that follows a shift from the
+ next-to-final state.
+ The symbol for that shift is 0 (end-of-file). */
+ statep = (core *) xmalloc((unsigned) (sizeof(core) - sizeof(short)));
+ statep->number = nstates;
+ last_state->next = statep;
+ last_state = statep;
+
+ /* Make the shift from the final state to the termination state. */
+ sp = NEW(shifts);
+ sp->number = nstates++;
+ sp->nshifts = 1;
+ sp->internalShifts[0] = nstates;
+ last_shift->next = sp;
+ last_shift = sp;
+
+ /* Note that the variable `final_state' refers to what we sometimes call
+ the termination state. */
+ final_state = nstates;
+
+ /* Make the termination state. */
+ statep = (core *) xmalloc((unsigned) (sizeof(core) - sizeof(short)));
+ statep->number = nstates++;
+ last_state->next = statep;
+ last_state = statep;
+}
+
+
+/* subroutine of augment_automaton.
+ Create the next-to-final state, to which a shift has already been made in
+ the initial state. */
+void
+insert_start_shift()
+{
+ register core *statep;
+ register shifts *sp;
+
+ statep = (core *) xmalloc((unsigned) (sizeof(core) - sizeof(short)));
+ statep->number = nstates;
+ statep->accessing_symbol = start_symbol;
+
+ last_state->next = statep;
+ last_state = statep;
+
+ /* Make a shift from this state to (what will be) the final state. */
+ sp = NEW(shifts);
+ sp->number = nstates++;
+ sp->nshifts = 1;
+ sp->internalShifts[0] = nstates;
+
+ last_shift->next = sp;
+ last_shift = sp;
+}
diff --git a/tools/bison++/machine.h b/tools/bison++/machine.h
new file mode 100644
index 000000000..573a3fadd
--- /dev/null
+++ b/tools/bison++/machine.h
@@ -0,0 +1,39 @@
+/* Define machine-dependencies for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifdef eta10
+#define MAXSHORT 2147483647
+#define MINSHORT -2147483648
+#else
+#define MAXSHORT 32767
+#define MINSHORT -32768
+#endif
+
+#if defined (_MSDOS) && !defined (__GO32__)
+#define BITS_PER_WORD 16
+#define MAXTABLE 16383
+#else
+#define BITS_PER_WORD 32
+#define MAXTABLE 32767
+#endif
+
+#define WORDSIZE(n) (((n) + BITS_PER_WORD - 1) / BITS_PER_WORD)
+#define SETBIT(x, i) ((x)[(i)/BITS_PER_WORD] |= (1<<((i) % BITS_PER_WORD)))
+#define RESETBIT(x, i) ((x)[(i)/BITS_PER_WORD] &= ~(1<<((i) % BITS_PER_WORD)))
+#define BITISSET(x, i) (((x)[(i)/BITS_PER_WORD] & (1<<((i) % BITS_PER_WORD))) != 0)
diff --git a/tools/bison++/main.cc b/tools/bison++/main.cc
new file mode 100644
index 000000000..874f2587f
--- /dev/null
+++ b/tools/bison++/main.cc
@@ -0,0 +1,184 @@
+/* Top level entry point of bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include <string>
+using namespace std;
+#include "system.h"
+#include "machine.h" /* JF for MAXSHORT */
+
+bool bison_compability;
+extern int lineno;
+extern int verboseflag;
+
+/* Nonzero means failure has been detected; don't write a parser file. */
+int failure;
+
+/* The name this program was run with, for messages. */
+char *program_name;
+
+extern void getargs(int,char**), openfiles(), reader(), reduce_grammar();
+extern void set_derives(), set_nullable(), generate_states();
+extern void lalr(), initialize_conflicts(), verbose(), terse();
+extern void output(), done(int);
+void fatal(const char*);
+extern int fixed_outfiles;
+
+string getName(string FullPath)
+{
+ int i = FullPath.length()-2;
+ while(i > 0 && FullPath[i] != '/') i--;
+
+ if(i<=0 || i+1>= FullPath.length()-1) return FullPath;
+ else return FullPath.substr(i+1,FullPath.length()-1);
+}
+
+
+/* VMS complained about using `int'. */
+int
+main(int argc, char** argv)
+{
+ program_name = argv[0];
+ failure = 0;
+ lineno = 0;
+ getargs(argc, argv);
+ string onlyName = getName(string(program_name));
+ if(onlyName != "bison++")
+ {
+ bison_compability=true;
+ }
+ else
+ {
+ if(onlyName != "yacc")
+ fixed_outfiles = 1;
+
+ bison_compability=false;
+ }
+
+ openfiles();
+
+ /* read the input. Copy some parts of it to fguard, faction, ftable and fattrs.
+ In file reader.c.
+ The other parts are recorded in the grammar; see gram.h. */
+ reader();
+
+ /* find useless nonterminals and productions and reduce the grammar. In
+ file reduce.c */
+ reduce_grammar();
+
+ /* record other info about the grammar. In files derives and nullable. */
+ set_derives();
+ set_nullable();
+
+ /* convert to nondeterministic finite state machine. In file LR0.
+ See state.h for more info. */
+ generate_states();
+
+ /* make it deterministic. In file lalr. */
+ lalr();
+
+ /* Find and record any conflicts: places where one token of lookahead is not
+ enough to disambiguate the parsing. In file conflicts.
+ Currently this does not do anything to resolve them;
+ the trivial form of conflict resolution that exists is done in output. */
+ initialize_conflicts();
+
+ /* print information about results, if requested. In file print. */
+ if (verboseflag)
+ verbose();
+ else
+ terse();
+
+ /* output the tables and the parser to ftable. In file output. */
+ output();
+ done(failure);
+}
+
+
+/* functions to report errors which prevent a parser from being generated */
+
+
+
+void
+toomany(char* s)
+{
+ char buffer[200];
+
+ /* JF new msg */
+ sprintf(buffer, "limit of %d exceeded, too many %s", MAXSHORT, s);
+ fatal(buffer);
+}
+
+
+void
+berror(char* s)
+{
+ fprintf(stderr, "internal error, %s\n", s);
+ abort();
+}
+
+
+void
+fatal(const char* s)
+{
+ extern char *infile;
+
+ if (infile == 0)
+ fprintf(stderr, "fatal error: %s\n", s);
+ else
+ fprintf(stderr, "\"%s\", line %d: %s\n", infile, lineno, s);
+ done(1);
+}
+
+void
+fatals(const char* fmt,void* x1)
+{
+ char buffer[200];
+
+ sprintf(buffer, fmt, x1);
+ fatal(buffer);
+}
+
+
+void
+fatals(const char* fmt,void* x1,void* x2)
+{
+ char buffer[200];
+
+ sprintf(buffer, fmt, x1,x2);
+ fatal(buffer);
+}
+
+void
+fatals(const char* fmt,void* x1,void* x2,void* x3)
+{
+ char buffer[200];
+
+ sprintf(buffer, fmt, x1,x2,x3);
+ fatal(buffer);
+}
+
+void
+fatals(const char* fmt,void* x1,void* x2,void* x3,void* x4)
+{
+ char buffer[200];
+
+ sprintf(buffer, fmt, x1,x2,x3,x4);
+ fatal(buffer);
+}
diff --git a/tools/bison++/mdate-sh b/tools/bison++/mdate-sh
new file mode 100644
index 000000000..37171f21f
--- /dev/null
+++ b/tools/bison++/mdate-sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+# Get modification time of a file or directory and pretty-print it.
+# Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+# written by Ulrich Drepper <drepper@gnu.ai.mit.edu>, June 1995
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# Prevent date giving response in another language.
+LANG=C
+export LANG
+LC_ALL=C
+export LC_ALL
+LC_TIME=C
+export LC_TIME
+
+# Get the extended ls output of the file or directory.
+# On HPUX /bin/sh, "set" interprets "-rw-r--r--" as options, so the "x" below.
+if ls -L /dev/null 1>/dev/null 2>&1; then
+ set - x`ls -L -l -d $1`
+else
+ set - x`ls -l -d $1`
+fi
+# The month is at least the fourth argument
+# (3 shifts here, the next inside the loop).
+shift
+shift
+shift
+
+# Find the month. Next argument is day, followed by the year or time.
+month=
+until test $month
+do
+ shift
+ case $1 in
+ Jan) month=January; nummonth=1;;
+ Feb) month=February; nummonth=2;;
+ Mar) month=March; nummonth=3;;
+ Apr) month=April; nummonth=4;;
+ May) month=May; nummonth=5;;
+ Jun) month=June; nummonth=6;;
+ Jul) month=July; nummonth=7;;
+ Aug) month=August; nummonth=8;;
+ Sep) month=September; nummonth=9;;
+ Oct) month=October; nummonth=10;;
+ Nov) month=November; nummonth=11;;
+ Dec) month=December; nummonth=12;;
+ esac
+done
+
+day=$2
+
+# Here we have to deal with the problem that the ls output gives either
+# the time of day or the year.
+case $3 in
+ *:*) set `date`; eval year=\$$#
+ case $2 in
+ Jan) nummonthtod=1;;
+ Feb) nummonthtod=2;;
+ Mar) nummonthtod=3;;
+ Apr) nummonthtod=4;;
+ May) nummonthtod=5;;
+ Jun) nummonthtod=6;;
+ Jul) nummonthtod=7;;
+ Aug) nummonthtod=8;;
+ Sep) nummonthtod=9;;
+ Oct) nummonthtod=10;;
+ Nov) nummonthtod=11;;
+ Dec) nummonthtod=12;;
+ esac
+ # For the first six month of the year the time notation can also
+ # be used for files modified in the last year.
+ if (expr $nummonth \> $nummonthtod) > /dev/null;
+ then
+ year=`expr $year - 1`
+ fi;;
+ *) year=$3;;
+esac
+
+# The result.
+echo $day $month $year
diff --git a/tools/bison++/missing b/tools/bison++/missing
new file mode 100644
index 000000000..7789652e8
--- /dev/null
+++ b/tools/bison++/missing
@@ -0,0 +1,190 @@
+#! /bin/sh
+# Common stub for a few missing GNU programs while installing.
+# Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+# Franc,ois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+# 02111-1307, USA.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+fi
+
+case "$1" in
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
+error status if there is no known handling for PROGRAM.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+
+Supported PROGRAM values:
+ aclocal touch file \`aclocal.m4'
+ autoconf touch file \`configure'
+ autoheader touch file \`config.h.in'
+ automake touch all \`Makefile.in' files
+ bison create \`y.tab.[ch]', if possible, from existing .[ch]
+ flex create \`lex.yy.c', if possible, from existing .c
+ lex create \`lex.yy.c', if possible, from existing .c
+ makeinfo touch the output file
+ yacc create \`y.tab.[ch]', if possible, from existing .[ch]"
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing - GNU libit 0.0"
+ ;;
+
+ -*)
+ echo 1>&2 "$0: Unknown \`$1' option"
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+ ;;
+
+ aclocal)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified \`acinclude.m4' or \`configure.in'. You might want
+ to install the \`Automake' and \`Perl' packages. Grab them from
+ any GNU archive site."
+ touch aclocal.m4
+ ;;
+
+ autoconf)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified \`configure.in'. You might want to install the
+ \`Autoconf' and \`GNU m4' packages. Grab them from any GNU
+ archive site."
+ touch configure
+ ;;
+
+ autoheader)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified \`acconfig.h' or \`configure.in'. You might want
+ to install the \`Autoconf' and \`GNU m4' packages. Grab them
+ from any GNU archive site."
+ files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' configure.in`
+ test -z "$files" && files="config.h"
+ touch_files=
+ for f in $files; do
+ case "$f" in
+ *:*) touch_files="$touch_files "`echo "$f" |
+ sed -e 's/^[^:]*://' -e 's/:.*//'`;;
+ *) touch_files="$touch_files $f.in";;
+ esac
+ done
+ touch $touch_files
+ ;;
+
+ automake)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified \`Makefile.am', \`acinclude.m4' or \`configure.in'.
+ You might want to install the \`Automake' and \`Perl' packages.
+ Grab them from any GNU archive site."
+ find . -type f -name Makefile.am -print |
+ sed 's/\.am$/.in/' |
+ while read f; do touch "$f"; done
+ ;;
+
+ bison|yacc)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified a \`.y' file. You may need the \`Bison' package
+ in order for those modifications to take effect. You can get
+ \`Bison' from any GNU archive site."
+ rm -f y.tab.c y.tab.h
+ if [ $# -ne 1 ]; then
+ eval LASTARG="\${$#}"
+ case "$LASTARG" in
+ *.y)
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
+ if [ -f "$SRCFILE" ]; then
+ cp "$SRCFILE" y.tab.c
+ fi
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
+ if [ -f "$SRCFILE" ]; then
+ cp "$SRCFILE" y.tab.h
+ fi
+ ;;
+ esac
+ fi
+ if [ ! -f y.tab.h ]; then
+ echo >y.tab.h
+ fi
+ if [ ! -f y.tab.c ]; then
+ echo 'main() { return 0; }' >y.tab.c
+ fi
+ ;;
+
+ lex|flex)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified a \`.l' file. You may need the \`Flex' package
+ in order for those modifications to take effect. You can get
+ \`Flex' from any GNU archive site."
+ rm -f lex.yy.c
+ if [ $# -ne 1 ]; then
+ eval LASTARG="\${$#}"
+ case "$LASTARG" in
+ *.l)
+ SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
+ if [ -f "$SRCFILE" ]; then
+ cp "$SRCFILE" lex.yy.c
+ fi
+ ;;
+ esac
+ fi
+ if [ ! -f lex.yy.c ]; then
+ echo 'main() { return 0; }' >lex.yy.c
+ fi
+ ;;
+
+ makeinfo)
+ echo 1>&2 "\
+WARNING: \`$1' is missing on your system. You should only need it if
+ you modified a \`.texi' or \`.texinfo' file, or any other file
+ indirectly affecting the aspect of the manual. The spurious
+ call might also be the consequence of using a buggy \`make' (AIX,
+ DU, IRIX). You might want to install the \`Texinfo' package or
+ the \`GNU make' package. Grab either from any GNU archive site."
+ file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'`
+ if test -z "$file"; then
+ file=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
+ file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $file`
+ fi
+ touch $file
+ ;;
+
+ *)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, and you do not seem to have it handy on your
+ system. You might have modified some files without having the
+ proper tools for further handling them. Check the \`README' file,
+ it often tells you about the needed prerequirements for installing
+ this package. You may also peek at any GNU archive site, in case
+ some other package would contain this missing \`$1' program."
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/tools/bison++/mkinstalldirs b/tools/bison++/mkinstalldirs
new file mode 100644
index 000000000..0e2937731
--- /dev/null
+++ b/tools/bison++/mkinstalldirs
@@ -0,0 +1,35 @@
+#!/bin/sh
+# Make directory hierarchy.
+# Written by Noah Friedman <friedman@prep.ai.mit.edu>
+# Public domain.
+
+defaultIFS='
+'
+IFS="${IFS-${defaultIFS}}"
+
+errstatus=0
+
+for file in ${1+"$@"} ; do
+ oIFS="${IFS}"
+ # Some sh's can't handle IFS=/ for some reason.
+ IFS='%'
+ set - `echo ${file} | sed -e 's@/@%@g' -e 's@^%@/@'`
+ IFS="${oIFS}"
+
+ pathcomp=''
+
+ for d in ${1+"$@"} ; do
+ pathcomp="${pathcomp}${d}"
+
+ if test ! -d "${pathcomp}"; then
+ echo "mkdir $pathcomp" 1>&2
+ mkdir "${pathcomp}" || errstatus=$?
+ fi
+
+ pathcomp="${pathcomp}/"
+ done
+done
+
+exit $errstatus
+
+# eof
diff --git a/tools/bison++/new.h b/tools/bison++/new.h
new file mode 100644
index 000000000..490a6415a
--- /dev/null
+++ b/tools/bison++/new.h
@@ -0,0 +1,31 @@
+/* Storage allocation interface for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#define NEW(t) ((t *) xmalloc((unsigned) sizeof(t)))
+#define NEW2(n, t) ((t *) xmalloc((unsigned) ((n) * sizeof(t))))
+
+#ifdef __STDC__
+#define FREE(x) (x ? (void) free((char *) (x)) : (void)0)
+#else
+#define FREE(x) ((x) != 0 && (free ((char *) (x)), 0))
+#endif
+
+extern char *xmalloc(unsigned);
+extern char *xrealloc(char*,unsigned);
diff --git a/tools/bison++/nullable.cc b/tools/bison++/nullable.cc
new file mode 100644
index 000000000..b85dec610
--- /dev/null
+++ b/tools/bison++/nullable.cc
@@ -0,0 +1,136 @@
+/* Part of the bison parser generator,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* set up nullable, a vector saying which nonterminals can expand into the null string.
+ nullable[i - ntokens] is nonzero if symbol i can do so. */
+
+#include <stdio.h>
+#include "system.h"
+#include "types.h"
+#include "gram.h"
+#include "new.h"
+
+
+char *nullable;
+
+
+void
+set_nullable()
+{
+ register short *r;
+ register short *s1;
+ register short *s2;
+ register int ruleno;
+ register int symbol;
+ register shorts *p;
+
+ short *squeue;
+ short *rcount;
+ shorts **rsets;
+ shorts *relts;
+ char any_tokens;
+ short *r1;
+
+#ifdef TRACE
+ fprintf(stderr, "Entering set_nullable");
+#endif
+
+ nullable = NEW2(nvars, char) - ntokens;
+
+ squeue = NEW2(nvars, short);
+ s1 = s2 = squeue;
+
+ rcount = NEW2(nrules + 1, short);
+ rsets = NEW2(nvars, shorts *) - ntokens;
+ /* This is said to be more elements than we actually use.
+ Supposedly nitems - nrules is enough.
+ But why take the risk? */
+ relts = NEW2(nitems + nvars + 1, shorts);
+ p = relts;
+
+ r = ritem;
+ while (*r)
+ {
+ if (*r < 0)
+ {
+ symbol = rlhs[-(*r++)];
+ if (symbol >= 0 && !nullable[symbol])
+ {
+ nullable[symbol] = 1;
+ *s2++ = symbol;
+ }
+ }
+ else
+ {
+ r1 = r;
+ any_tokens = 0;
+ for (symbol = *r++; symbol > 0; symbol = *r++)
+ {
+ if (ISTOKEN(symbol))
+ any_tokens = 1;
+ }
+
+ if (!any_tokens)
+ {
+ ruleno = -symbol;
+ r = r1;
+ for (symbol = *r++; symbol > 0; symbol = *r++)
+ {
+ rcount[ruleno]++;
+ p->next = rsets[symbol];
+ p->value = ruleno;
+ rsets[symbol] = p;
+ p++;
+ }
+ }
+ }
+ }
+
+ while (s1 < s2)
+ {
+ p = rsets[*s1++];
+ while (p)
+ {
+ ruleno = p->value;
+ p = p->next;
+ if (--rcount[ruleno] == 0)
+ {
+ symbol = rlhs[ruleno];
+ if (symbol >= 0 && !nullable[symbol])
+ {
+ nullable[symbol] = 1;
+ *s2++ = symbol;
+ }
+ }
+ }
+ }
+
+ FREE(squeue);
+ FREE(rcount);
+ FREE(rsets + ntokens);
+ FREE(relts);
+}
+
+
+void
+free_nullable()
+{
+ FREE(nullable + ntokens);
+}
diff --git a/tools/bison++/old.c b/tools/bison++/old.c
new file mode 100644
index 000000000..bfaa77ef5
--- /dev/null
+++ b/tools/bison++/old.c
@@ -0,0 +1,6 @@
+/* JF changed to accept/deal with variable args.
+ DO NOT change this to use varargs. It will appear to work
+ but will break on systems that don't have the necessary library
+ functions. This is the ONLY safe way to write such a function. */
+/*VARARGS1*/
+#include <stdio.h>
diff --git a/tools/bison++/output.cc b/tools/bison++/output.cc
new file mode 100644
index 000000000..6753fdac7
--- /dev/null
+++ b/tools/bison++/output.cc
@@ -0,0 +1,1663 @@
+/* Output the generated parsing program for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* functions to output parsing data to various files. Entries are:
+
+ output_headers ()
+
+Output constant strings to the beginning of certain files.
+
+ output_trailers()
+
+Output constant strings to the ends of certain files.
+
+ output ()
+
+Output the parsing tables and the parser code to ftable.
+
+The parser tables consist of these tables.
+Starred ones needed only for the semantic parser.
+
+yytranslate = vector mapping yylex's token numbers into bison's token numbers.
+
+yytname = vector of string-names indexed by bison token number
+
+yyrline = vector of line-numbers of all rules. For yydebug printouts.
+
+yyrhs = vector of items of all rules.
+ This is exactly what ritems contains. For yydebug and for semantic
+ parser.
+
+yyprhs[r] = index in yyrhs of first item for rule r.
+
+yyr1[r] = symbol number of symbol that rule r derives.
+
+yyr2[r] = number of symbols composing right hand side of rule r.
+
+* yystos[s] = the symbol number of the symbol that leads to state s.
+
+yydefact[s] = default rule to reduce with in state s,
+ when yytable doesn't specify something else to do.
+ Zero means the default is an error.
+
+yydefgoto[i] = default state to go to after a reduction of a rule that
+ generates variable ntokens + i, except when yytable
+ specifies something else to do.
+
+yypact[s] = index in yytable of the portion describing state s.
+ The lookahead token's type is used to index that portion
+ to find out what to do.
+
+ If the value in yytable is positive,
+ we shift the token and go to that state.
+
+ If the value is negative, it is minus a rule number to reduce by.
+
+ If the value is zero, the default action from yydefact[s] is used.
+
+yypgoto[i] = the index in yytable of the portion describing
+ what to do after reducing a rule that derives variable i + ntokens.
+ This portion is indexed by the parser state number
+ as of before the text for this nonterminal was read.
+ The value from yytable is the state to go to.
+
+yytable = a vector filled with portions for different uses,
+ found via yypact and yypgoto.
+
+yycheck = a vector indexed in parallel with yytable.
+ It indicates, in a roundabout way, the bounds of the
+ portion you are trying to examine.
+
+ Suppose that the portion of yytable starts at index p
+ and the index to be examined within the portion is i.
+ Then if yycheck[p+i] != i, i is outside the bounds
+ of what is actually allocated, and the default
+ (from yydefact or yydefgoto) should be used.
+ Otherwise, yytable[p+i] should be used.
+
+YYFINAL = the state number of the termination state.
+YYFLAG = most negative short int. Used to flag ??
+YYNTBASE = ntokens.
+
+*/
+
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+#include "new.h"
+#include "files.h"
+#include "gram.h"
+#include "state.h"
+#include "symtab.h"
+
+
+extern bool bison_compability;
+extern int debugflag;
+extern int nolinesflag;
+extern int definesflag;
+char *quoted_filename(char* f);
+extern char **tags;
+extern int tokensetsize;
+extern int final_state;
+extern core **state_table;
+extern shifts **shift_table;
+extern errs **err_table;
+extern reductions **reduction_table;
+extern short *accessing_symbol;
+extern unsigned *LA;
+extern short *LAruleno;
+extern short *lookaheads;
+extern char *consistent;
+extern short *goto_map;
+extern short *from_state;
+extern short *to_state;
+
+extern char* xmalloc(unsigned);
+
+
+void output_token_translations();
+void output_gram();
+void output_stos();
+void output_rule_data();
+void output_defines();
+void output_actions();
+void token_actions();
+void save_row(int);
+void goto_actions();
+void save_column(int,int);
+void sort_actions();
+void pack_table();
+void output_base();
+void output_table();
+void output_check();
+void output_parser();
+void output_program();
+void free_itemset();
+void free_shifts();
+void free_reductions();
+void free_itemsets();
+int action_row(int);
+int default_goto(int);
+int matching_state(int);
+int pack_vector(int);
+
+extern void berror(char*);
+extern void fatal(const char*);
+extern void fatals(const char*,void*);
+extern void fatals(const char*,void*,void*);
+extern void fatals(const char*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*,void*);
+
+static int nvectors;
+static int nentries;
+static short **froms;
+static short **tos;
+static short *tally;
+static short *width;
+static short *actrow;
+static short *state_count;
+static short *order;
+static short *base;
+static short *pos;
+static short *table;
+static short *check;
+static int lowzero;
+static int high;
+
+void output_section(FILE* fin,FILE* fout);
+void output_token_defines_fmt(FILE* file,char* fmt,int notrans);
+extern bucket *errtoken;
+void output_token_defines(FILE* file);
+void output_token_const_def(FILE* file);
+void output_token_const_decl(FILE* file);
+void output_token_enum(FILE* file);
+extern int line_fparser;
+extern int line_fhskel;
+extern char *parser_fname;
+extern char *hskel_fname;
+extern char *version_string;
+
+
+
+
+#define GUARDSTR "\n#include \"%s\"\nextern int yyerror;\n\
+extern int yycost;\nextern char * yymsg;\nextern YYSTYPE yyval;\n\n\
+yyguard(n, yyvsp, yylsp)\nregister int n;\nregister YYSTYPE *yyvsp;\n\
+register YYLTYPE *yylsp;\n\
+{\n yyerror = 0;\nyycost = 0;\n yymsg = 0;\nswitch (n)\n {"
+
+#define ACTSTR "\n#include \"%s\"\nextern YYSTYPE yyval;\
+\nextern int yychar;\
+yyaction(n, yyvsp, yylsp)\nregister int n;\nregister YYSTYPE *yyvsp;\n\
+register YYLTYPE *yylsp;\n{\n switch (n)\n{"
+
+#define ACTSTR_SIMPLE "\n switch (yyn) {\n"
+
+void
+output_before_read()
+{
+ fprintf(ftable, "\n/* A Bison++ parser, made from %s */\n\n", infile);
+ fprintf(ftable, " /* with Bison++ version %s */\n\n", version_string);
+ /* Redefine certain symbols if -p was specified. */
+ if (spec_name_prefix)
+ {
+ fprintf(ftable, "#define yyparse %sparse\n",spec_name_prefix);
+ fprintf(ftable, "#define yylex %slex\n",spec_name_prefix);
+ fprintf(ftable, "#define yyerror %serror\n",spec_name_prefix);
+ fprintf(ftable, "#define yylval %slval\n",spec_name_prefix);
+ fprintf(ftable, "#define yychar %schar\n",spec_name_prefix);
+ fprintf(ftable, "#define yydebug %sdebug\n",spec_name_prefix);
+ }
+ if(bison_compability==false)
+ fprintf(ftable,"#define YY_USE_CLASS\n");
+ output_section(fparser,ftable);
+ if(definesflag) output_section(fhskel,fdefines);
+};
+
+void
+output_headers()
+{
+ if(bison_compability==false)
+ {
+ fprintf(fbisoncomp,"#define YY_USE_CLASS\n");
+ }
+ else
+ {
+ fprintf(fbisoncomp,"/*#define YY_USE_CLASS \n*/");
+ }
+ output_section(fhskel,fbisoncomp);
+ if(definesflag) output_section(fhskel,fdefines);
+ output_section(fparser,ftable);
+ if (pure_parser)
+ {
+ fprintf(ftable, "#define YY_%s_PURE 1\n",parser_name);
+ if(definesflag) fprintf(fdefines, "#define YY_%s_PURE 1\n\n",parser_name);
+ }
+ /* start writing the guard and action files, if they are needed. */
+ if (semantic_parser)
+ fprintf(fguard, GUARDSTR, attrsfile);
+ fprintf(faction, (semantic_parser ? ACTSTR : ACTSTR_SIMPLE), attrsfile);
+ if(definesflag) output_section(fhskel,fdefines);
+ output_section(fparser,ftable);
+
+
+ /* Rename certain symbols if -p was specified. */
+ if (spec_name_prefix)
+ {
+ fprintf(ftable, "#define YY_%s_PARSE %sparse\n",
+ parser_name, spec_name_prefix);
+ fprintf(ftable, "#define YY_%s_LEX %slex\n",
+ parser_name, spec_name_prefix);
+ fprintf(ftable, "#define YY_%s_ERROR %serror\n",
+ parser_name, spec_name_prefix);
+ fprintf(ftable, "#define YY_%s_LVAL %slval\n",
+ parser_name, spec_name_prefix);
+ fprintf(ftable, "#define YY_%s_CHAR %schar\n",
+ parser_name, spec_name_prefix);
+ fprintf(ftable, "#define YY_%s_DEBUG %sdebug\n",
+ parser_name, spec_name_prefix);
+ }
+ if (spec_name_prefix && definesflag)
+ {
+ fprintf(fdefines, "#define YY_%s_PARSE %sparse\n",
+ parser_name, spec_name_prefix);
+ fprintf(fdefines, "#define YY_%s_LEX %slex\n",
+ parser_name, spec_name_prefix);
+ fprintf(fdefines, "#define YY_%s_ERROR %serror\n",
+ parser_name, spec_name_prefix);
+ fprintf(fdefines, "#define YY_%s_LVAL %slval\n",
+ parser_name, spec_name_prefix);
+ fprintf(fdefines, "#define YY_%s_CHAR %schar\n",
+ parser_name, spec_name_prefix);
+ fprintf(fdefines, "#define YY_%s_DEBUG %sdebug\n",
+ parser_name, spec_name_prefix);
+ }
+
+}
+
+
+void
+output_trailers()
+{
+ if(definesflag) output_section(fhskel,fdefines);
+ output_section(fparser,ftable);
+ /* output the definition of YYLTYPE into the fattrs and fdefines files. */
+ if(debugflag)
+ {fprintf(ftable,
+ "#define YY_%s_DEBUG %d\n"
+ ,parser_name,!!debugflag);
+ if (definesflag)
+ fprintf(fdefines,
+ "#define YY_%s_DEBUG %d\n",
+ parser_name,!!debugflag);
+ }
+ if(definesflag) output_section(fhskel,fdefines);
+ output_section(fparser,ftable);
+ /* Now we know whether we need the line-number stack.
+ If we do, write its type into the .tab.h file. */
+ if (yylsp_needed)
+ {
+ /* fattrs winds up in the .tab.c file, before bison.simple. */
+ fprintf(ftable, "#define YYLSP_%s_NEEDED\n",parser_name);
+ if (debugflag)
+
+ if (definesflag)
+ {
+ fprintf(fdefines,
+ "#define YY_%s_LSP_NEEDED\n",
+ parser_name);
+ }
+ }
+ if (semantic_parser)
+ {
+ fprintf(fguard, "\n }\n}\n");
+ fprintf(faction, "\n }\n}\n");
+ }
+ else
+ fprintf(faction, "\n}\n");
+}
+
+
+void
+output()
+{
+ int c;
+
+ if (!semantic_parser) /* JF Put out other stuff */
+ {
+ rewind(fattrs);
+ while ((c=getc(fattrs))!=EOF)
+ putc(c,ftable);
+ }
+
+ if (semantic_parser)
+ fprintf(ftable, "#include \"%s\"\n", attrsfile);
+
+
+ free_itemsets();
+ output_defines();
+ output_token_translations();
+/* if (semantic_parser) */
+ /* This is now unconditional because debugging printouts can use it. */
+ output_gram();
+ FREE(ritem);
+ if (semantic_parser)
+ output_stos();
+ output_rule_data();
+ output_actions();
+ output_parser();
+ output_program();
+}
+
+
+void
+output_token_translations()
+{
+ register int i, j;
+/* register short *sp; JF unused */
+
+ if (translations)
+ {
+ fprintf(ftable,
+ "\n#define YYTRANSLATE(x) ((unsigned)(x) <= %d ? yytranslate[x] : %d)\n",
+ max_user_token_number, nsyms);
+
+ if (ntokens < 127) /* play it very safe; check maximum element value. */
+ fprintf(ftable, "\nstatic const char yytranslate[] = { 0");
+ else
+ fprintf(ftable, "\nstatic const short yytranslate[] = { 0");
+
+ j = 10;
+ for (i = 1; i <= max_user_token_number; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", token_translations[i]);
+ }
+
+ fprintf(ftable, "\n};\n");
+ }
+ else
+ {
+ fprintf(ftable, "\n#define YYTRANSLATE(x) (x)\n");
+ }
+}
+
+
+void
+output_gram()
+{
+ register int i;
+ register int j;
+ register short *sp;
+
+ /* With the ordinary parser,
+ yyprhs and yyrhs are needed only for yydebug. */
+ if (!semantic_parser)
+ fprintf(ftable, "\n#if YY_%s_DEBUG != 0",parser_name);
+
+ fprintf(ftable, "\nstatic const short yyprhs[] = { 0");
+
+ j = 10;
+ for (i = 1; i <= nrules; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", rrhs[i]);
+ }
+
+ fprintf(ftable, "\n};\n");
+
+ fprintf(ftable, "\nstatic const short yyrhs[] = {%6d", ritem[0]);
+
+ j = 10;
+ for (sp = ritem + 1; *sp; sp++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ if (*sp > 0)
+ fprintf(ftable, "%6d", *sp);
+ else
+ fprintf(ftable, " 0");
+ }
+
+ fprintf(ftable, "\n};\n");
+ if(!semantic_parser)
+ fprintf(ftable, "\n#endif\n");
+
+}
+
+
+void
+output_stos()
+{
+ register int i;
+ register int j;
+
+ fprintf(ftable, "\nstatic const short yystos[] = { 0");
+
+ j = 10;
+ for (i = 1; i < nstates; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", accessing_symbol[i]);
+ }
+
+ fprintf(ftable, "\n};\n");
+}
+
+
+void
+output_rule_data()
+{
+ register int i;
+ register int j;
+
+ fprintf(ftable,
+ "\n#if (YY_%s_DEBUG != 0) || defined(YY_%s_ERROR_VERBOSE) \nstatic const short yyrline[] = { 0",parser_name,parser_name);
+
+ j = 10;
+ for (i = 1; i <= nrules; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", rline[i]);
+ }
+
+ /* Output the table of symbol names. */
+
+ fprintf(ftable,
+ "\n};\n\nstatic const char * const yytname[] = { \"%s\"",
+ tags[0]);
+
+ j = strlen (tags[0]) + 44;
+ for (i = 1; i <= nsyms; i++)
+ {
+ register char *p;
+ putc(',', ftable);
+ j++;
+
+ if (j > 75)
+ {
+ putc('\n', ftable);
+ j = 0;
+ }
+
+ putc ('\"', ftable);
+ j++;
+
+ for (p = tags[i]; p && *p; p++)
+ {
+ if (*p == '"' || *p == '\\')
+ {
+ fprintf(ftable, "\\%c", *p);
+ j += 2;
+ }
+ else if (*p == '\n')
+ {
+ fprintf(ftable, "\\n");
+ j += 2;
+ }
+ else if (*p == '\t')
+ {
+ fprintf(ftable, "\\t");
+ j += 2;
+ }
+ else if (*p == '\b')
+ {
+ fprintf(ftable, "\\b");
+ j += 2;
+ }
+ else if (*p < 040 || *p >= 0177)
+ {
+ fprintf(ftable, "\\%03o", *p);
+ j += 4;
+ }
+ else
+ {
+ putc(*p, ftable);
+ j++;
+ }
+ }
+
+ putc ('\"', ftable);
+ j++;
+ }
+
+ fprintf(ftable, "\n};\n#endif\n\nstatic const short yyr1[] = { 0");
+
+ j = 10;
+ for (i = 1; i <= nrules; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", rlhs[i]);
+ }
+
+ FREE(rlhs + 1);
+
+ fprintf(ftable, "\n};\n\nstatic const short yyr2[] = { 0");
+
+ j = 10;
+ for (i = 1; i < nrules; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", rrhs[i + 1] - rrhs[i] - 1);
+ }
+
+ putc(',', ftable);
+ if (j >= 10)
+ putc('\n', ftable);
+
+ fprintf(ftable, "%6d\n};\n", nitems - rrhs[nrules] - 1);
+ FREE(rrhs + 1);
+}
+
+
+void
+output_defines()
+{
+ fprintf(ftable, "\n\n#define\tYYFINAL\t\t%d\n", final_state);
+ fprintf(ftable, "#define\tYYFLAG\t\t%d\n", MINSHORT);
+ fprintf(ftable, "#define\tYYNTBASE\t%d\n", ntokens);
+}
+
+
+
+/* compute and output yydefact, yydefgoto, yypact, yypgoto, yytable and yycheck. */
+
+void
+output_actions()
+{
+ nvectors = nstates + nvars;
+
+ froms = NEW2(nvectors, short *);
+ tos = NEW2(nvectors, short *);
+ tally = NEW2(nvectors, short);
+ width = NEW2(nvectors, short);
+
+ token_actions();
+ free_shifts();
+ free_reductions();
+ FREE(lookaheads);
+ FREE(LA);
+ FREE(LAruleno);
+ FREE(accessing_symbol);
+
+ goto_actions();
+ FREE(goto_map + ntokens);
+ FREE(from_state);
+ FREE(to_state);
+
+ sort_actions();
+ pack_table();
+ output_base();
+ output_table();
+ output_check();
+}
+
+
+
+/* figure out the actions for the specified state, indexed by lookahead token type.
+
+ The yydefact table is output now. The detailed info
+ is saved for putting into yytable later. */
+
+void
+token_actions()
+{
+ register int i;
+ register int j;
+ register int k;
+
+ actrow = NEW2(ntokens, short);
+
+ k = action_row(0);
+ fprintf(ftable, "\nstatic const short yydefact[] = {%6d", k);
+ save_row(0);
+
+ j = 10;
+ for (i = 1; i < nstates; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ k = action_row(i);
+ fprintf(ftable, "%6d", k);
+ save_row(i);
+ }
+
+ fprintf(ftable, "\n};\n");
+ FREE(actrow);
+}
+
+
+
+/* Decide what to do for each type of token if seen as the lookahead token in specified state.
+ The value returned is used as the default action (yydefact) for the state.
+ In addition, actrow is filled with what to do for each kind of token,
+ index by symbol number, with zero meaning do the default action.
+ The value MINSHORT, a very negative number, means this situation
+ is an error. The parser recognizes this value specially.
+
+ This is where conflicts are resolved. The loop over lookahead rules
+ considered lower-numbered rules last, and the last rule considered that likes
+ a token gets to handle it. */
+
+int
+action_row(int state)
+{
+ register int i;
+ register int j;
+ register int k;
+ register int m;
+ register int n;
+ register int count;
+ register int default_rule;
+ register int nreds;
+ register int max;
+ register int rule;
+ register int shift_state;
+ register int symbol;
+ register unsigned mask;
+ register unsigned *wordp;
+ register reductions *redp;
+ register shifts *shiftp;
+ register errs *errp;
+ int nodefault = 0; /* set nonzero to inhibit having any default reduction */
+
+ for (i = 0; i < ntokens; i++)
+ actrow[i] = 0;
+
+ default_rule = 0;
+ nreds = 0;
+ redp = reduction_table[state];
+
+ if (redp)
+ {
+ nreds = redp->nreds;
+
+ if (nreds >= 1)
+ {
+ /* loop over all the rules available here which require lookahead */
+ m = lookaheads[state];
+ n = lookaheads[state + 1];
+
+ for (i = n - 1; i >= m; i--)
+ {
+ rule = - LAruleno[i];
+ wordp = LA + i * tokensetsize;
+ mask = 1;
+
+ /* and find each token which the rule finds acceptable to come next */
+ for (j = 0; j < ntokens; j++)
+ {
+ /* and record this rule as the rule to use if that token follows. */
+ if (mask & *wordp)
+ actrow[j] = rule;
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ wordp++;
+ }
+ }
+ }
+ }
+ }
+
+ shiftp = shift_table[state];
+
+ /* now see which tokens are allowed for shifts in this state.
+ For them, record the shift as the thing to do. So shift is preferred to reduce. */
+
+ if (shiftp)
+ {
+ k = shiftp->nshifts;
+
+ for (i = 0; i < k; i++)
+ {
+ shift_state = shiftp->internalShifts[i];
+ if (! shift_state) continue;
+
+ symbol = accessing_symbol[shift_state];
+
+ if (ISVAR(symbol))
+ break;
+
+ actrow[symbol] = shift_state;
+
+ /* do not use any default reduction if there is a shift for error */
+
+ if (symbol == error_token_number) nodefault = 1;
+ }
+ }
+
+ errp = err_table[state];
+
+ /* See which tokens are an explicit error in this state
+ (due to %nonassoc). For them, record MINSHORT as the action. */
+
+ if (errp)
+ {
+ k = errp->nerrs;
+
+ for (i = 0; i < k; i++)
+ {
+ symbol = errp->internalErrs[i];
+ actrow[symbol] = MINSHORT;
+ }
+ }
+
+ /* now find the most common reduction and make it the default action for this state. */
+
+ if (nreds >= 1 && ! nodefault)
+ {
+ if (consistent[state])
+ default_rule = redp->rules[0];
+ else
+ {
+ max = 0;
+ for (i = m; i < n; i++)
+ {
+ count = 0;
+ rule = - LAruleno[i];
+
+ for (j = 0; j < ntokens; j++)
+ {
+ if (actrow[j] == rule)
+ count++;
+ }
+
+ if (count > max)
+ {
+ max = count;
+ default_rule = rule;
+ }
+ }
+
+ /* actions which match the default are replaced with zero,
+ which means "use the default" */
+
+ if (max > 0)
+ {
+ for (j = 0; j < ntokens; j++)
+ {
+ if (actrow[j] == default_rule)
+ actrow[j] = 0;
+ }
+
+ default_rule = - default_rule;
+ }
+ }
+ }
+
+ /* If have no default rule, the default is an error.
+ So replace any action which says "error" with "use default". */
+
+ if (default_rule == 0)
+ for (j = 0; j < ntokens; j++)
+ {
+ if (actrow[j] == MINSHORT)
+ actrow[j] = 0;
+ }
+
+ return (default_rule);
+}
+
+
+void
+save_row(int state)
+{
+ register int i;
+ register int count;
+ register short *sp;
+ register short *sp1;
+ register short *sp2;
+
+ count = 0;
+ for (i = 0; i < ntokens; i++)
+ {
+ if (actrow[i] != 0)
+ count++;
+ }
+
+ if (count == 0)
+ return;
+
+ froms[state] = sp1 = sp = NEW2(count, short);
+ tos[state] = sp2 = NEW2(count, short);
+
+ for (i = 0; i < ntokens; i++)
+ {
+ if (actrow[i] != 0)
+ {
+ *sp1++ = i;
+ *sp2++ = actrow[i];
+ }
+ }
+
+ tally[state] = count;
+ width[state] = sp1[-1] - sp[0] + 1;
+}
+
+
+
+/* figure out what to do after reducing with each rule,
+ depending on the saved state from before the beginning
+ of parsing the data that matched this rule.
+
+ The yydefgoto table is output now. The detailed info
+ is saved for putting into yytable later. */
+
+void
+goto_actions()
+{
+ register int i;
+ register int j;
+ register int k;
+
+ state_count = NEW2(nstates, short);
+
+ k = default_goto(ntokens);
+ fprintf(ftable, "\nstatic const short yydefgoto[] = {%6d", k);
+ save_column(ntokens, k);
+
+ j = 10;
+ for (i = ntokens + 1; i < nsyms; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ k = default_goto(i);
+ fprintf(ftable, "%6d", k);
+ save_column(i, k);
+ }
+
+ fprintf(ftable, "\n};\n");
+ FREE(state_count);
+}
+
+
+
+int
+default_goto(int symbol)
+{
+ register int i;
+ register int m;
+ register int n;
+ register int default_state;
+ register int max;
+
+ m = goto_map[symbol];
+ n = goto_map[symbol + 1];
+
+ if (m == n)
+ return (-1);
+
+ for (i = 0; i < nstates; i++)
+ state_count[i] = 0;
+
+ for (i = m; i < n; i++)
+ state_count[to_state[i]]++;
+
+ max = 0;
+ default_state = -1;
+
+ for (i = 0; i < nstates; i++)
+ {
+ if (state_count[i] > max)
+ {
+ max = state_count[i];
+ default_state = i;
+ }
+ }
+
+ return (default_state);
+}
+
+
+void
+save_column(int symbol, int default_state)
+{
+ register int i;
+ register int m;
+ register int n;
+ register short *sp;
+ register short *sp1;
+ register short *sp2;
+ register int count;
+ register int symno;
+
+ m = goto_map[symbol];
+ n = goto_map[symbol + 1];
+
+ count = 0;
+ for (i = m; i < n; i++)
+ {
+ if (to_state[i] != default_state)
+ count++;
+ }
+
+ if (count == 0)
+ return;
+
+ symno = symbol - ntokens + nstates;
+
+ froms[symno] = sp1 = sp = NEW2(count, short);
+ tos[symno] = sp2 = NEW2(count, short);
+
+ for (i = m; i < n; i++)
+ {
+ if (to_state[i] != default_state)
+ {
+ *sp1++ = from_state[i];
+ *sp2++ = to_state[i];
+ }
+ }
+
+ tally[symno] = count;
+ width[symno] = sp1[-1] - sp[0] + 1;
+}
+
+
+
+/* the next few functions decide how to pack
+ the actions and gotos information into yytable. */
+
+void
+sort_actions()
+{
+ register int i;
+ register int j;
+ register int k;
+ register int t;
+ register int w;
+
+ order = NEW2(nvectors, short);
+ nentries = 0;
+
+ for (i = 0; i < nvectors; i++)
+ {
+ if (tally[i] > 0)
+ {
+ t = tally[i];
+ w = width[i];
+ j = nentries - 1;
+
+ while (j >= 0 && (width[order[j]] < w))
+ j--;
+
+ while (j >= 0 && (width[order[j]] == w) && (tally[order[j]] < t))
+ j--;
+
+ for (k = nentries - 1; k > j; k--)
+ order[k + 1] = order[k];
+
+ order[j + 1] = i;
+ nentries++;
+ }
+ }
+}
+
+
+void
+pack_table()
+{
+ register int i;
+ register int place;
+ register int state;
+
+ base = NEW2(nvectors, short);
+ pos = NEW2(nentries, short);
+ table = NEW2(MAXTABLE, short);
+ check = NEW2(MAXTABLE, short);
+
+ lowzero = 0;
+ high = 0;
+
+ for (i = 0; i < nvectors; i++)
+ base[i] = MINSHORT;
+
+ for (i = 0; i < MAXTABLE; i++)
+ check[i] = -1;
+
+ for (i = 0; i < nentries; i++)
+ {
+ state = matching_state(i);
+
+ if (state < 0)
+ place = pack_vector(i);
+ else
+ place = base[state];
+
+ pos[i] = place;
+ base[order[i]] = place;
+ }
+
+ for (i = 0; i < nvectors; i++)
+ {
+ if (froms[i])
+ FREE(froms[i]);
+ if (tos[i])
+ FREE(tos[i]);
+ }
+
+ FREE(froms);
+ FREE(tos);
+ FREE(pos);
+}
+
+
+
+int
+matching_state(int vector)
+{
+ register int i;
+ register int j;
+ register int k;
+ register int t;
+ register int w;
+ register int match;
+ register int prev;
+
+ i = order[vector];
+ if (i >= nstates)
+ return (-1);
+
+ t = tally[i];
+ w = width[i];
+
+ for (prev = vector - 1; prev >= 0; prev--)
+ {
+ j = order[prev];
+ if (width[j] != w || tally[j] != t)
+ return (-1);
+
+ match = 1;
+ for (k = 0; match && k < t; k++)
+ {
+ if (tos[j][k] != tos[i][k] || froms[j][k] != froms[i][k])
+ match = 0;
+ }
+
+ if (match)
+ return (j);
+ }
+
+ return (-1);
+}
+
+
+
+int
+pack_vector(int vector)
+{
+ register int i;
+ register int j;
+ register int k;
+ register int t;
+ register int loc;
+ register int ok;
+ register short *from;
+ register short *to;
+
+ i = order[vector];
+ t = tally[i];
+
+ if (t == 0)
+ berror("pack_vector");
+
+ from = froms[i];
+ to = tos[i];
+
+ for (j = lowzero - from[0]; j < MAXTABLE; j++)
+ {
+ ok = 1;
+
+ for (k = 0; ok && k < t; k++)
+ {
+ loc = j + from[k];
+ if (loc > MAXTABLE)
+ fatals("maximum table size (%d) exceeded",(void*) MAXTABLE);
+
+ if (table[loc] != 0)
+ ok = 0;
+ }
+
+ for (k = 0; ok && k < vector; k++)
+ {
+ if (pos[k] == j)
+ ok = 0;
+ }
+
+ if (ok)
+ {
+ for (k = 0; k < t; k++)
+ {
+ loc = j + from[k];
+ table[loc] = to[k];
+ check[loc] = from[k];
+ }
+
+ while (table[lowzero] != 0)
+ lowzero++;
+
+ if (loc > high)
+ high = loc;
+
+ return (j);
+ }
+ }
+
+ berror("pack_vector");
+ return 0; /* JF keep lint happy */
+}
+
+
+
+/* the following functions output yytable, yycheck
+ and the vectors whose elements index the portion starts */
+
+void
+output_base()
+{
+ register int i;
+ register int j;
+
+ fprintf(ftable, "\nstatic const short yypact[] = {%6d", base[0]);
+
+ j = 10;
+ for (i = 1; i < nstates; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", base[i]);
+ }
+
+ fprintf(ftable, "\n};\n\nstatic const short yypgoto[] = {%6d", base[nstates]);
+
+ j = 10;
+ for (i = nstates + 1; i < nvectors; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", base[i]);
+ }
+
+ fprintf(ftable, "\n};\n");
+ FREE(base);
+}
+
+
+void
+output_table()
+{
+ register int i;
+ register int j;
+
+ fprintf(ftable, "\n\n#define\tYYLAST\t\t%d\n\n", high);
+ fprintf(ftable, "\nstatic const short yytable[] = {%6d", table[0]);
+
+ j = 10;
+ for (i = 1; i <= high; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", table[i]);
+ }
+
+ fprintf(ftable, "\n};\n");
+ FREE(table);
+}
+
+
+void
+output_check()
+{
+ register int i;
+ register int j;
+
+ fprintf(ftable, "\nstatic const short yycheck[] = {%6d", check[0]);
+
+ j = 10;
+ for (i = 1; i <= high; i++)
+ {
+ putc(',', ftable);
+
+ if (j >= 10)
+ {
+ putc('\n', ftable);
+ j = 1;
+ }
+ else
+ {
+ j++;
+ }
+
+ fprintf(ftable, "%6d", check[i]);
+ }
+
+ fprintf(ftable, "\n};\n");
+ FREE(check);
+}
+
+
+
+/* copy the parser code into the ftable file at the end. */
+
+void
+ output_parser()
+{
+ register int c;
+ output_section(fparser,ftable);
+ rewind(faction);
+ for(c=getc(faction);c!=EOF;c=getc(faction))
+ putc(c,ftable);
+ output_section(fparser,ftable);
+}
+void
+output_section(FILE* fin,FILE* fout)
+{
+ register int c;
+ int dummy;
+ int *pcounter=&dummy;
+ char *fil_name;
+ fil_name="?";
+ if(fin==fparser)
+ {pcounter=&line_fparser;fil_name=parser_fname;}
+ else if(fin==fhskel)
+ {pcounter=&line_fhskel;fil_name=hskel_fname;}
+ /* Loop over lines in the standard parser file. */
+ if (!nolinesflag)
+ fprintf(fout, "\n#line %d \"%s\"\n", (*pcounter), quoted_filename(fil_name));
+
+ while (1)
+ {
+
+
+ /* now write out the line... */
+ for ( c = getc(fin); c != '\n' && c != EOF; c = getc(fin))
+ {if (c == '$')
+ {
+ if (!nolinesflag)
+ {//something is wrong "\n/* #line %d \"%s\" */\n#line @\n",
+ fprintf(fout, "\n #line %d \"%s\"\n", (*pcounter), quoted_filename(fil_name));
+ }
+ return;
+ }
+ else if(c=='@')
+ {fprintf(fout,"%s",parser_name);
+ }
+ else
+ putc(c, fout);
+ }
+ if (c == EOF)
+ break;
+ else if(c=='\n') (*pcounter)++;
+ putc(c, fout);
+ }
+}
+
+
+void
+output_program()
+{
+ register int c;
+ extern int lineno;
+ int is_escaped=0,is_commented=0;
+ char quoted='\0',last='\0';
+ int len_match=0,i;
+ char *match_open="%header{";
+ char *match_close="%}";
+ char *match_wait=match_open;
+ if (!nolinesflag)
+ fprintf(ftable, "#line %d \"%s\"\n", lineno, quoted_filename(infile));
+
+
+ for (c = getc(finput);c != EOF;last=c,c = getc(finput))
+ {
+ if(!match_wait[len_match])
+ {if(match_wait==match_open)
+ {match_wait=match_close;
+ if (!nolinesflag && definesflag)
+ fprintf(fdefines, "\n#line %d \"%s\"\n", lineno, quoted_filename(infile));
+
+ }
+ else
+ {match_wait=match_open;}
+ len_match=0;
+ }
+ else if(c!=match_wait[len_match] || is_escaped || is_commented || quoted)
+ {for(i=0;i<len_match;i++)
+ {if(match_wait==match_close && definesflag)
+ putc(match_wait[i],fdefines);
+ putc(match_wait[i],ftable);}
+ len_match=0;
+ };
+ if(c==match_wait[len_match] && !is_escaped && !is_commented && !quoted)
+ {len_match++;}
+ else
+ {if(match_wait==match_close && definesflag)
+ putc(c,fdefines);
+ putc(c,ftable);
+ }
+ if(c=='\n') lineno++;
+ if(is_escaped)
+ {is_escaped=0;}
+ else if(c=='\\')
+ {is_escaped=1;}
+ else if(is_commented==1)
+ {if(last=='*' && c=='/')
+ is_commented=0;}
+ else if(is_commented==2)
+ {if(c=='\n')
+ is_commented=0;}
+ else if((c=='"'|| c== '\''))
+ {if(!quoted) quoted=c;
+ else if(quoted==c) quoted='\0';
+ }
+ else if(quoted) {}
+ else if(last=='/' && c=='*') is_commented=1;
+ else if(last=='/' && c=='/') is_commented=2;
+
+
+ }
+}
+
+
+void
+free_itemsets()
+{
+ register core *cp,*cptmp;
+
+ FREE(state_table);
+
+ for (cp = first_state; cp; cp = cptmp) {
+ cptmp=cp->next;
+ FREE(cp);
+ }
+}
+
+
+void
+free_shifts()
+{
+ register shifts *sp,*sptmp;/* JF derefrenced freed ptr */
+
+ FREE(shift_table);
+
+ for (sp = first_shift; sp; sp = sptmp) {
+ sptmp=sp->next;
+ FREE(sp);
+ }
+}
+
+
+void
+free_reductions()
+{
+ register reductions *rp,*rptmp;/* JF fixed freed ptr */
+
+ FREE(reduction_table);
+
+ for (rp = first_reduction; rp; rp = rptmp) {
+ rptmp=rp->next;
+ FREE(rp);
+ }
+}
+void output_token_defines();
+void output_token_const_def();
+void output_token_const_decl();
+
+void output_about_token()
+{ register int i;
+
+ output_section(fparser,ftable);
+ output_token_defines(ftable);
+ output_section(fparser,ftable);
+ output_token_const_decl(ftable);
+ output_section(fparser,ftable); /* new section */
+ output_token_enum(ftable); /* enum */
+ output_section(fparser,ftable);
+ output_token_const_def(ftable);
+ output_section(fparser,ftable);
+ if (definesflag)
+ {
+ output_section(fhskel,fdefines);
+ output_token_defines(fdefines);
+ output_section(fhskel,fdefines);
+ output_token_const_decl(fdefines);
+ output_section(fhskel,fdefines); /* new section */
+ output_token_enum(fdefines); /* enum */
+ output_section(fhskel,fdefines);
+ if (semantic_parser)
+ for (i = ntokens; i < nsyms; i++)
+ {
+ /* don't make these for dummy nonterminals made by gensym. */
+ if (*tags[i] != '@')
+ fprintf(fdefines, "#define\tNT%s\t%d\n", tags[i], i);
+ }
+ }
+
+};
+void output_token_defines(FILE* file)
+{output_token_defines_fmt(file,"#define\t%s\t%d\n",0);
+ if (semantic_parser)
+ output_token_defines_fmt(file,"#define\tT%s\t%d\n",1);
+};
+void output_token_const_def(FILE* file)
+{char line[256];
+ sprintf(line,"const int YY_%s_CLASS::%%s=%%d;\n",parser_name);
+ output_token_defines_fmt(file,line,0);
+ sprintf(line,"const int YY_%s_CLASS::T%%s=%%d;\n",parser_name);
+ if (semantic_parser)
+ output_token_defines_fmt(file,line,1);
+};
+void output_token_const_decl(FILE* file)
+{char line[256];
+ output_token_defines_fmt(file,"static const int %s;\n",0);
+ if (semantic_parser)
+ output_token_defines_fmt(file,"static const int T%s;\n",1);
+};
+/* create a list like
+ ,FIRST_TOKEN=256
+ ,SECOND_TOKEN=257
+*/
+void output_token_enum(FILE* file)
+{
+ output_token_defines_fmt(file,"\t,%s=%d\n",0);
+ if (semantic_parser) /* just for compatibility with semantic parser */
+ output_token_defines_fmt(file,"\t,T%s=%d\n",1);
+};
+
+
+void
+output_token_defines_fmt(FILE* file,char* fmt,int notrans)
+{
+ bucket *bp;
+
+ for (bp = firstsymbol; bp; bp = bp->next)
+ {
+ if (bp->value >= ntokens) continue;
+
+ /* For named tokens, but not literal ones, define the name. */
+ /* The value is the user token number. */
+
+ if ('\'' != *tags[bp->value] && bp != errtoken)
+ {
+ register char *cp = tags[bp->value];
+ register char c;
+
+ /* Don't #define nonliteral tokens whose names contain periods. */
+
+ while ((c = *cp++) && c != '.');
+ if (!c)
+ {
+ fprintf(file, fmt, tags[bp->value],
+ (translations && !notrans ? bp->user_token_number : bp->value));
+ }
+ }
+ }
+
+ putc('\n', file);
+}
+
+
+char *quoted_filename(char* f)
+{
+ static char *buffer=NULL;
+ static int buff_size=0;
+ char *p;
+ if(buff_size<strlen(f)*2+1)
+ {
+ if(buffer !=NULL ) free(buffer);
+ buffer=xmalloc(strlen(f)*2+1);
+ }
+ for(p=buffer;*f;f++)
+ {if(*f=='\\')
+ *p++ ='\\';
+ *p++ =*f;
+ }
+ *p++='\0';
+ return buffer;
+}
diff --git a/tools/bison++/print.cc b/tools/bison++/print.cc
new file mode 100644
index 000000000..ef045195a
--- /dev/null
+++ b/tools/bison++/print.cc
@@ -0,0 +1,369 @@
+/* Print information on generated parser, for bison,
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+#include "new.h"
+#include "files.h"
+#include "gram.h"
+#include "state.h"
+
+
+extern char **tags;
+extern int nstates;
+extern short *accessing_symbol;
+extern core **state_table;
+extern shifts **shift_table;
+extern errs **err_table;
+extern reductions **reduction_table;
+extern char *consistent;
+extern char any_conflicts;
+extern char *conflicts;
+extern int final_state;
+
+extern void conflict_log();
+extern void verbose_conflict_log();
+extern void print_reductions(int);
+
+void print_token(int,int);
+void print_state(int);
+void print_core(int);
+void print_actions(int);
+void print_grammar();
+
+void
+terse()
+{
+ if (any_conflicts)
+ {
+ conflict_log();
+ }
+}
+
+
+void
+verbose()
+{
+ register int i;
+
+ if (any_conflicts)
+ verbose_conflict_log();
+
+ print_grammar();
+
+ for (i = 0; i < nstates; i++)
+ {
+ print_state(i);
+ }
+}
+
+
+void
+print_token(int extnum, int token)
+{
+ fprintf(foutput, " type %d is %s\n", extnum, tags[token]);
+}
+
+
+void
+print_state(int state)
+{
+ fprintf(foutput, "\n\nstate %d\n\n", state);
+ print_core(state);
+ print_actions(state);
+}
+
+
+void
+print_core(int state)
+{
+ register int i;
+ register int k;
+ register int rule;
+ register core *statep;
+ register short *sp;
+ register short *sp1;
+
+ statep = state_table[state];
+ k = statep->nitems;
+
+ if (k == 0) return;
+
+ for (i = 0; i < k; i++)
+ {
+ sp1 = sp = ritem + statep->items[i];
+
+ while (*sp > 0)
+ sp++;
+
+ rule = -(*sp);
+ fprintf(foutput, " %s -> ", tags[rlhs[rule]]);
+
+ for (sp = ritem + rrhs[rule]; sp < sp1; sp++)
+ {
+ fprintf(foutput, "%s ", tags[*sp]);
+ }
+
+ putc('.', foutput);
+
+ while (*sp > 0)
+ {
+ fprintf(foutput, " %s", tags[*sp]);
+ sp++;
+ }
+
+ fprintf (foutput, " (rule %d)", rule);
+ putc('\n', foutput);
+ }
+
+ putc('\n', foutput);
+}
+
+
+void
+print_actions(int state)
+{
+ register int i;
+ register int k;
+ register int state1;
+ register int symbol;
+ register shifts *shiftp;
+ register errs *errp;
+ register reductions *redp;
+ register int rule;
+
+ shiftp = shift_table[state];
+ redp = reduction_table[state];
+ errp = err_table[state];
+
+ if (!shiftp && !redp)
+ {
+ if (final_state == state)
+ fprintf(foutput, " $default\taccept\n");
+ else
+ fprintf(foutput, " NO ACTIONS\n");
+ return;
+ }
+
+ if (shiftp)
+ {
+ k = shiftp->nshifts;
+
+ for (i = 0; i < k; i++)
+ {
+ if (! shiftp->internalShifts[i]) continue;
+ state1 = shiftp->internalShifts[i];
+ symbol = accessing_symbol[state1];
+ /* The following line used to be turned off. */
+ if (ISVAR(symbol)) break;
+ if (symbol==0) /* I.e. strcmp(tags[symbol],"$")==0 */
+ fprintf(foutput, " $ \tgo to state %d\n", state1);
+ else
+ fprintf(foutput, " %-4s\tshift, and go to state %d\n",
+ tags[symbol], state1);
+ }
+
+ if (i > 0)
+ putc('\n', foutput);
+ }
+ else
+ {
+ i = 0;
+ k = 0;
+ }
+
+ if (errp)
+ {
+ int j, nerrs;
+
+ nerrs = errp->nerrs;
+
+ for (j = 0; j < nerrs; j++)
+ {
+ if (! errp->internalErrs[j]) continue;
+ symbol = errp->internalErrs[j];
+ fprintf(foutput, " %-4s\terror (nonassociative)\n", tags[symbol]);
+ }
+
+ if (j > 0)
+ putc('\n', foutput);
+ }
+
+ if (consistent[state] && redp)
+ {
+ rule = redp->rules[0];
+ symbol = rlhs[rule];
+ fprintf(foutput, " $default\treduce using rule %d (%s)\n\n",
+ rule, tags[symbol]);
+ }
+ else if (redp)
+ {
+ print_reductions(state);
+ }
+
+ if (i < k)
+ {
+ for (; i < k; i++)
+ {
+ if (! shiftp->internalShifts[i]) continue;
+ state1 = shiftp->internalShifts[i];
+ symbol = accessing_symbol[state1];
+ fprintf(foutput, " %-4s\tgo to state %d\n", tags[symbol], state1);
+ }
+
+ putc('\n', foutput);
+ }
+}
+
+#define END_TEST(end) \
+ if (column + strlen(buffer) > (end)) \
+ { fprintf (foutput, "%s\n ", buffer); column = 3; buffer[0] = 0; } \
+ else
+
+void
+print_grammar()
+{
+ int i, j;
+ short* rule;
+ char buffer[90];
+ int column = 0;
+
+ /* rule # : LHS -> RHS */
+ fputs("\nGrammar\n", foutput);
+ for (i = 1; i <= nrules; i++)
+ /* Don't print rules disabled in reduce_grammar_tables. */
+ if (rlhs[i] >= 0)
+ {
+ fprintf(foutput, "rule %-4d %s ->", i, tags[rlhs[i]]);
+ rule = &ritem[rrhs[i]];
+ if (*rule > 0)
+ while (*rule > 0)
+ fprintf(foutput, " %s", tags[*rule++]);
+ else
+ fputs (" /* empty */", foutput);
+ putc('\n', foutput);
+ }
+
+ /* TERMINAL (type #) : rule #s terminal is on RHS */
+ fputs("\nTerminals, with rules where they appear\n\n", foutput);
+ fprintf(foutput, "%s (-1)\n", tags[0]);
+ if (translations)
+ {
+ for (i = 0; i <= max_user_token_number; i++)
+ if (token_translations[i] != 2)
+ {
+ buffer[0] = 0;
+ column = strlen (tags[token_translations[i]]);
+ fprintf(foutput, "%s", tags[token_translations[i]]);
+ END_TEST (50);
+ sprintf (buffer, " (%d)", i);
+
+ for (j = 1; j <= nrules; j++)
+ {
+ for (rule = &ritem[rrhs[j]]; *rule > 0; rule++)
+ if (*rule == token_translations[i])
+ {
+ END_TEST (65);
+ sprintf (buffer + strlen(buffer), " %d", j);
+ break;
+ }
+ }
+ fprintf (foutput, "%s\n", buffer);
+ }
+ }
+ else
+ for (i = 1; i < ntokens; i++)
+ {
+ buffer[0] = 0;
+ column = strlen (tags[i]);
+ fprintf(foutput, "%s", tags[i]);
+ END_TEST (50);
+ sprintf (buffer, " (%d)", i);
+
+ for (j = 1; j <= nrules; j++)
+ {
+ for (rule = &ritem[rrhs[j]]; *rule > 0; rule++)
+ if (*rule == i)
+ {
+ END_TEST (65);
+ sprintf (buffer + strlen(buffer), " %d", j);
+ break;
+ }
+ }
+ fprintf (foutput, "%s\n", buffer);
+ }
+
+ fputs("\nNonterminals, with rules where they appear\n\n", foutput);
+ for (i = ntokens; i <= nsyms - 1; i++)
+ {
+ int left_count = 0, right_count = 0;
+
+ for (j = 1; j <= nrules; j++)
+ {
+ if (rlhs[j] == i)
+ left_count++;
+ for (rule = &ritem[rrhs[j]]; *rule > 0; rule++)
+ if (*rule == i)
+ {
+ right_count++;
+ break;
+ }
+ }
+
+ buffer[0] = 0;
+ fprintf(foutput, "%s", tags[i]);
+ column = strlen (tags[i]);
+ sprintf (buffer, " (%d)", i);
+ END_TEST (0);
+
+ if (left_count > 0)
+ {
+ END_TEST (50);
+ sprintf (buffer + strlen(buffer), " on left:");
+
+ for (j = 1; j <= nrules; j++)
+ {
+ END_TEST (65);
+ if (rlhs[j] == i)
+ sprintf (buffer + strlen(buffer), " %d", j);
+ }
+ }
+
+ if (right_count > 0)
+ {
+ if (left_count > 0)
+ sprintf (buffer + strlen(buffer), ",");
+ END_TEST (50);
+ sprintf (buffer + strlen(buffer), " on right:");
+ for (j = 1; j <= nrules; j++)
+ {
+ for (rule = &ritem[rrhs[j]]; *rule > 0; rule++)
+ if (*rule == i)
+ {
+ END_TEST (65);
+ sprintf (buffer + strlen(buffer), " %d", j);
+ break;
+ }
+ }
+ }
+ fprintf (foutput, "%s\n", buffer);
+ }
+}
diff --git a/tools/bison++/reader.cc b/tools/bison++/reader.cc
new file mode 100644
index 000000000..dd35aebe0
--- /dev/null
+++ b/tools/bison++/reader.cc
@@ -0,0 +1,1912 @@
+/* Input parser for bison
+ Copyright (C) 1984, 1986, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* read in the grammar specification and record it in the format described in gram.h.
+ All guards are copied into the fguard file and all actions into faction,
+ in each case forming the body of a C function (yyguard or yyaction)
+ which contains a switch statement to decide which guard or action to execute.
+
+The entry point is reader(). */
+
+#include <stdio.h>
+#include <ctype.h>
+#include "system.h"
+#include "files.h"
+#include "new.h"
+#include "symtab.h"
+#include "lex.h"
+#include "gram.h"
+#include "machine.h"
+
+extern bool bison_compability;
+
+/* Number of slots allocated (but not necessarily used yet) in `rline' */
+int rline_allocated;
+
+extern char *program_name;
+extern int definesflag;
+extern int nolinesflag;
+extern bucket *symval;
+extern int numval;
+extern int failure;
+extern int expected_conflicts;
+extern char *token_buffer;
+
+
+
+extern void init_lex();
+extern void tabinit();
+extern void output_headers();
+extern void output_trailers();
+extern void free_symtab();
+extern void open_extra_files();
+extern void fatal(const char*);
+extern void fatals(const char*,void*);
+extern void fatals(const char*,void*,void*);
+extern void fatals(const char*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*,void*);
+
+extern void unlex(int);
+extern void done(int);
+
+extern int skip_white_space();
+extern int parse_percent_token();
+extern int lex();
+
+void read_declarations();
+void copy_definition();
+void parse_token_decl(int,int);
+void parse_start_decl();
+void parse_type_decl();
+void parse_assoc_decl(int);
+void parse_union_decl();
+void parse_expect_decl();
+void readgram();
+void record_rule_line();
+void packsymbols();
+void output_token_defines();
+void packgram();
+int read_signed_integer(FILE*);
+int get_type();
+
+typedef
+ struct symbol_list
+ {
+ struct symbol_list *next;
+ bucket *sym;
+ bucket *ruleprec;
+ }
+ symbol_list;
+
+void copy_action(symbol_list*,int);
+
+
+int lineno;
+symbol_list *grammar;
+int start_flag;
+bucket *startval;
+char **tags;
+
+/* Nonzero if components of semantic values are used, implying
+ they must be unions. */
+static int value_components_used;
+
+static int typed; /* nonzero if %union has been seen. */
+
+static int lastprec; /* incremented for each %left, %right or %nonassoc seen */
+
+static int gensym_count; /* incremented for each generated symbol */
+
+bucket *errtoken;
+
+/* Nonzero if any action or guard uses the @n construct. */
+int yylsp_needed;
+
+extern char *version_string;
+
+extern void output_before_read();
+extern void output_about_token();
+void set_parser_name(char*);
+void cputc(int);
+void hputc(int);
+void copy_header_definition();
+void parse_name_declaration();
+void parse_define();
+void read_a_name();
+
+extern FILE *finput;
+extern int lineno;
+
+
+void
+copy_a_definition (void (*do_put)(int))
+{
+ register int c;
+ register int match;
+ register int ended;
+ register int after_percent; /* -1 while reading a character if prev char was % */
+ int cplus_comment;
+
+ after_percent = 0;
+
+ c = getc(finput);
+
+ for (;;)
+ {
+ switch (c)
+ {
+ case '\n':
+ (*do_put)(c);
+ lineno++;
+ break;
+
+ case '%':
+ after_percent = -1;
+ break;
+
+ case '\'':
+ case '"':
+ match = c;
+ (*do_put)(c);
+ c = getc(finput);
+
+ while (c != match)
+ {
+ if (c == EOF || c == '\n')
+ fatal("unterminated string");
+
+ (*do_put)(c);
+
+ if (c == '\\')
+ {
+ c = getc(finput);
+ if (c == EOF)
+ fatal("unterminated string");
+ (*do_put)(c);
+ if (c == '\n')
+ lineno++;
+ }
+
+ c = getc(finput);
+ }
+
+ (*do_put)(c);
+ break;
+
+ case '/':
+ (*do_put)(c);
+ c = getc(finput);
+ if (c != '*' && c != '/')
+ continue;
+
+ cplus_comment = (c == '/');
+ (*do_put)(c);
+ c = getc(finput);
+
+ ended = 0;
+ while (!ended)
+ {
+ if (!cplus_comment && c == '*')
+ {
+ while (c == '*')
+ {
+ (*do_put)(c);
+ c = getc(finput);
+ }
+
+ if (c == '/')
+ {
+ (*do_put)(c);
+ ended = 1;
+ }
+ }
+ else if (c == '\n')
+ {
+ lineno++;
+ (*do_put)(c);
+ if (cplus_comment)
+ ended = 1;
+ else
+ c = getc(finput);
+ }
+ //else if (c == EOF)
+ //fatal("unterminated comment in `%{' definition");
+ else
+ {
+ (*do_put)(c);
+ c = getc(finput);
+ }
+ }
+
+ break;
+
+ case EOF:
+ //fatal("unterminated `%{' definition");
+
+ default:
+ (*do_put)(c);
+ }
+
+ c = getc(finput);
+
+ if (after_percent)
+ {
+ if (c == '}')
+ return;
+ (*do_put)('%');
+ }
+ after_percent = 0;
+
+ }
+
+}
+
+
+
+
+void
+reader()
+{
+ start_flag = 0;
+ startval = NULL; /* start symbol not specified yet. */
+
+#if 0
+ translations = 0; /* initially assume token number translation not needed. */
+#endif
+ /* Nowadays translations is always set to 1,
+ since we give `error' a user-token-number
+ to satisfy the Posix demand for YYERRCODE==256. */
+ translations = 1;
+
+ nsyms = 1;
+ nvars = 0;
+ nrules = 0;
+ nitems = 0;
+ rline_allocated = 10;
+ rline = NEW2(rline_allocated, short);
+
+ typed = 0;
+ lastprec = 0;
+
+ gensym_count = 0;
+
+ semantic_parser = 0;
+ pure_parser = 0;
+ yylsp_needed = 0;
+
+ grammar = NULL;
+
+ init_lex();
+ lineno = 1;
+
+ /* initialize the symbol table. */
+ tabinit();
+ /* construct the error token */
+ errtoken = getsym("error");
+ errtoken->internalClass = STOKEN;
+ errtoken->user_token_number = 256; /* Value specified by posix. */
+ /* construct a token that represents all undefined literal tokens. */
+ /* it is always token number 2. */
+ getsym("$illegal.")->internalClass = STOKEN;
+ /* Read the declaration section. Copy %{ ... %} groups to ftable and fdefines file.
+ Also notice any %token, %left, etc. found there. */
+ output_before_read();
+
+ read_declarations();
+ output_headers();
+ /* read in the grammar, build grammar in list form. write out guards and actions. */
+ readgram();
+ /* write closing delimiters for actions and guards. */
+ output_trailers();
+ /* assign the symbols their symbol numbers.
+ Write #defines for the token symbols into fdefines if requested. */
+ packsymbols();
+ /* convert the grammar into the format described in gram.h. */
+ packgram();
+ /* free the symbol table data structure
+ since symbols are now all referred to by symbol number. */
+ free_symtab();
+}
+
+
+
+/* read from finput until %% is seen. Discard the %%.
+Handle any % declarations,
+and copy the contents of any %{ ... %} groups to ftable. */
+
+void
+read_declarations ()
+{
+ register int c;
+ register int tok;
+
+ for (;;)
+ {
+ c = skip_white_space();
+
+ if (c == '%')
+ {
+ tok = parse_percent_token();
+
+ switch (tok)
+ {
+ case TWO_PERCENTS:
+ return;
+
+ case PERCENT_LEFT_CURLY:
+ copy_definition();
+ break;
+ case PERCENT_LEFT_CURLY_HEADER:
+ copy_header_definition();
+ break;
+
+ case TOKEN:
+ parse_token_decl (STOKEN, SNTERM);
+ break;
+
+ case NTERM:
+ parse_token_decl (SNTERM, STOKEN);
+ break;
+
+ case TYPE:
+ parse_type_decl();
+ break;
+
+ case START:
+ parse_start_decl();
+ break;
+
+ case UNION:
+ parse_union_decl();
+ break;
+
+ case EXPECT:
+ parse_expect_decl();
+ break;
+
+ case LEFT:
+ parse_assoc_decl(LEFT_ASSOC);
+ break;
+
+ case RIGHT:
+ parse_assoc_decl(RIGHT_ASSOC);
+ break;
+
+ case NONASSOC:
+ parse_assoc_decl(NON_ASSOC);
+ break;
+
+ case SEMANTIC_PARSER:
+ if (semantic_parser == 0)
+ {
+ semantic_parser = 1;
+ open_extra_files();
+ fprintf(stderr,
+ "%semantic_parser no more supported in this version of bison !!! \n errors will be done ! use classic bison, use simple parser, or addapt this version to semantic parser\n");
+ }
+ break;
+
+ case PURE_PARSER:
+ pure_parser = 1;
+ break;
+
+
+ case PARSER_NAME:
+ parse_name_declaration();
+ break;
+
+ case DEFINE_SYM:
+ parse_define();
+ break;
+
+
+ default:
+ fatal("junk after `%%' in definition section");
+ }
+ }
+ else if (c == EOF)
+ fatal("no input grammar");
+ else if (c >= 040 && c <= 0177)
+ fatals ("unknown character `%c' in declaration section", (void*) c);
+ else
+ fatals ("unknown character with code 0x%x in declaration section", (void*) c);
+ }
+}
+
+
+/* copy the contents of a %{ ... %} into the definitions file.
+The %{ has already been read. Return after reading the %}. */
+
+void
+copy_definition ()
+{
+ if (!nolinesflag)
+ fprintf(ftable, "#line %d \"%s\"\n", lineno, quoted_filename(infile));
+
+ copy_a_definition (cputc);
+}
+
+void
+copy_header_definition ()
+{
+ if (!nolinesflag)
+ {fprintf(ftable, "#line %d \"%s\"\n", lineno, quoted_filename(infile));
+ if(definesflag)
+ fprintf(fdefines, "#line %d \"%s\"\n", lineno, quoted_filename(infile));
+ }
+ copy_a_definition (hputc);
+}
+void
+hputc(int c)
+{
+ putc(c,ftable);
+ if(definesflag) putc(c,fdefines);
+}
+
+void
+cputc(int c)
+{
+ putc(c,ftable);
+}
+
+
+
+
+/* parse what comes after %token or %nterm.
+For %token, what_is is STOKEN and what_is_not is SNTERM.
+For %nterm, the arguments are reversed. */
+
+void
+parse_token_decl (int what_is, int what_is_not)
+{
+ register int token = 0;
+ register int prev;
+ char *internalTypename = 0;
+ int k;
+
+/* start_lineno = lineno; JF */
+
+ for (;;)
+ {
+ if(ungetc(skip_white_space(), finput) == '%')
+ return;
+
+/* if (lineno != start_lineno)
+ return; JF */
+
+ /* we have not passed a newline, so the token now starting is in this declaration */
+ prev = token;
+
+ token = lex();
+ if (token == COMMA)
+ continue;
+ if (token == TYPENAME)
+ {
+ k = strlen(token_buffer);
+ internalTypename = NEW2(k + 1, char);
+ strcpy(internalTypename, token_buffer);
+ value_components_used = 1;
+ }
+ else if (token == IDENTIFIER)
+ {
+ int oldclass = symval->internalClass;
+
+ if (symval->internalClass == what_is_not)
+ fatals("symbol %s redefined", (void*) symval->tag);
+ symval->internalClass = what_is;
+ if (what_is == SNTERM && oldclass != SNTERM)
+ symval->value = nvars++;
+
+ if (internalTypename)
+ {
+ if (symval->type_name == NULL)
+ symval->type_name = internalTypename;
+ else
+ fatals("type redeclaration for %s",(void*) symval->tag);
+ }
+ }
+ else if (prev == IDENTIFIER && token == NUMBER)
+ {
+ symval->user_token_number = numval;
+ translations = 1;
+ }
+ else
+ fatal("invalid text in %token or %nterm declaration");
+ }
+
+}
+
+
+
+/* parse what comes after %start */
+
+void
+parse_start_decl ()
+{
+ if (start_flag)
+ fatal("multiple %start declarations");
+ start_flag = 1;
+ if (lex() != IDENTIFIER)
+ fatal("invalid %start declaration");
+ startval = symval;
+}
+
+
+
+/* read in a %type declaration and record its information for get_type_name to access */
+
+void
+parse_type_decl ()
+{
+ register int k;
+ register char *name;
+/* register int start_lineno; JF */
+
+ if (lex() != TYPENAME)
+ fatal("ill-formed %type declaration");
+
+ k = strlen(token_buffer);
+ name = NEW2(k + 1, char);
+ strcpy(name, token_buffer);
+
+/* start_lineno = lineno; */
+
+ for (;;)
+ {
+ register int t;
+
+ if(ungetc(skip_white_space(), finput) == '%')
+ return;
+
+/* if (lineno != start_lineno)
+ return; JF */
+
+ /* we have not passed a newline, so the token now starting is in this declaration */
+
+ t = lex();
+
+ switch (t)
+ {
+
+ case COMMA:
+ case SEMICOLON:
+ break;
+
+ case IDENTIFIER:
+ if (symval->type_name == NULL)
+ symval->type_name = name;
+ else
+ fatals("type redeclaration for %s", (void*) symval->tag);
+
+ break;
+
+ default:
+ fatal("invalid %type declaration");
+ }
+ }
+}
+
+
+
+/* read in a %left, %right or %nonassoc declaration and record its information. */
+/* assoc is either LEFT_ASSOC, RIGHT_ASSOC or NON_ASSOC. */
+
+void
+parse_assoc_decl (int assoc)
+{
+ register int k;
+ register char *name = NULL;
+/* register int start_lineno; JF */
+ register int prev = 0; /* JF added = 0 to keep lint happy */
+
+ lastprec++; /* Assign a new precedence level, never 0. */
+
+/* start_lineno = lineno; */
+
+ for (;;)
+ {
+ register int t;
+
+ if(ungetc(skip_white_space(), finput) == '%')
+ return;
+
+ /* if (lineno != start_lineno)
+ return; JF */
+
+ /* we have not passed a newline, so the token now starting is in this declaration */
+
+ t = lex();
+
+ switch (t)
+ {
+
+ case TYPENAME:
+ k = strlen(token_buffer);
+ name = NEW2(k + 1, char);
+ strcpy(name, token_buffer);
+ break;
+
+ case COMMA:
+ break;
+
+ case IDENTIFIER:
+ if (symval->prec != 0)
+ fatals("redefining precedence of %s", (void*) symval->tag);
+ symval->prec = lastprec;
+ symval->assoc = assoc;
+ if (symval->internalClass == SNTERM)
+ fatals("symbol %s redefined", (void*) symval->tag);
+ symval->internalClass = STOKEN;
+ if (name)
+ { /* record the type, if one is specified */
+ if (symval->type_name == NULL)
+ symval->type_name = name;
+ else
+ fatals("type redeclaration for %s", (void*) symval->tag);
+ }
+ break;
+
+ case NUMBER:
+ if (prev == IDENTIFIER)
+ {
+ symval->user_token_number = numval;
+ translations = 1;
+ }
+ else
+ fatal("invalid text in association declaration");
+ break;
+
+ case SEMICOLON:
+ return;
+
+ default:
+ fatal("malformatted association declaration");
+ }
+
+ prev = t;
+
+ }
+}
+
+
+
+/* copy the union declaration into ftable (and fdefines),
+ where it is made into the
+ definition of YYSTYPE, the type of elements of the parser value stack. */
+
+void
+parse_union_decl()
+{
+ register int c;
+ register int count;
+ register int in_comment;
+ int cplus_comment;
+
+ if (typed)
+ fatal("multiple %union declarations");
+
+ typed = 1;
+
+ if (!nolinesflag)
+ fprintf(ftable, "\n#line %d \"%s\"\n", lineno, quoted_filename(infile));
+ else
+ fprintf(ftable, "\n");
+ fprintf(ftable, "typedef union");
+ if (definesflag)
+ {
+ if (!nolinesflag)
+ fprintf(fdefines, "\n#line %d \"%s\"\n", lineno, quoted_filename(infile));
+ else
+ fprintf(fdefines, "\n");
+ fprintf(fdefines, "typedef union");
+ }
+
+
+ count = 0;
+ in_comment = 0;
+
+ c = getc(finput);
+
+ while (c != EOF)
+ {
+ hputc(c);
+ switch (c)
+ {
+ case '\n':
+ lineno++;
+ break;
+
+ case '/':
+ c = getc(finput);
+ if (c != '*' && c != '/')
+ ungetc(c, finput);
+ else
+ {
+ hputc(c);
+ cplus_comment = (c == '/');
+ in_comment = 1;
+ c = getc(finput);
+ while (in_comment)
+ {
+ hputc(c);
+ if (c == '\n')
+ {
+ lineno++;
+ if (cplus_comment)
+ {
+ in_comment = 0;
+ break;
+ }
+ }
+ if (c == EOF)
+ fatal("unterminated comment");
+
+ if (!cplus_comment && c == '*')
+ {
+ c = getc(finput);
+ if (c == '/')
+ {
+ hputc('/');
+ in_comment = 0;
+ }
+ }
+ else
+ c = getc(finput);
+ }
+ }
+ break;
+
+
+ case '{':
+ count++;
+ break;
+
+ case '}':
+ if (count == 0)
+ fatal ("unmatched close-brace (`}')");
+ count--;
+ if (count == 0)
+ {
+ set_parser_name(NULL); /* if undef, use default */
+ fprintf(ftable,
+ " yy_%s_stype;\n#define YY_%s_STYPE yy_%s_stype\n",
+ parser_name,parser_name,parser_name);
+ if(bison_compability==true)
+ {
+ fprintf(ftable,
+ "#ifndef YY_USE_CLASS\n#define YYSTYPE yy_%s_stype\n#endif\n",parser_name);
+
+ }
+ if (definesflag)
+ {
+ fprintf(fdefines,
+ " yy_%s_stype;\n#define YY_%s_STYPE yy_%s_stype\n",
+ parser_name,parser_name,parser_name);
+ if(bison_compability==true)
+ {
+ fprintf(fdefines,
+ "#ifndef YY_USE_CLASS\n#define YYSTYPE yy_%s_stype\n#endif\n",parser_name);
+
+ }
+ }
+ /* JF don't choke on trailing semi */
+ c=skip_white_space();
+ if(c!=';') ungetc(c,finput);
+ return;
+ }
+ }
+
+ c = getc(finput);
+ }
+}
+
+/* parse the declaration %expect N which says to expect N
+ shift-reduce conflicts. */
+
+void
+parse_expect_decl()
+{
+ register int c;
+ register int count;
+ char buffer[20];
+
+ c = getc(finput);
+ while (c == ' ' || c == '\t')
+ c = getc(finput);
+
+ count = 0;
+ while (c >= '0' && c <= '9')
+ {
+ if (count < 20)
+ buffer[count++] = c;
+ c = getc(finput);
+ }
+ buffer[count] = 0;
+
+ ungetc (c, finput);
+
+ expected_conflicts = atoi (buffer);
+}
+
+/* that's all of parsing the declaration section */
+
+/* Get the data type (alternative in the union) of the value for symbol n in rule rule. */
+
+char *
+get_type_name(int n, symbol_list* rule)
+{
+ static char *msg = "invalid $ value";
+
+ register int i;
+ register symbol_list *rp;
+
+ if (n < 0)
+ fatal(msg);
+
+ rp = rule;
+ i = 0;
+
+ while (i < n)
+ {
+ rp = rp->next;
+ if (rp == NULL || rp->sym == NULL)
+ fatal(msg);
+ i++;
+ }
+
+ return (rp->sym->type_name);
+}
+
+
+
+/* after %guard is seen in the input file,
+copy the actual guard into the guards file.
+If the guard is followed by an action, copy that into the actions file.
+stack_offset is the number of values in the current rule so far,
+which says where to find $0 with respect to the top of the stack,
+for the simple parser in which the stack is not popped until after the guard is run. */
+
+void
+copy_guard(symbol_list* rule, int stack_offset)
+{
+ register int c;
+ register int n;
+ register int count;
+ register int match;
+ register int ended;
+ register char *type_name;
+ int brace_flag = 0;
+ int cplus_comment;
+
+ /* offset is always 0 if parser has already popped the stack pointer */
+ if (semantic_parser) stack_offset = 0;
+
+ fprintf(fguard, "\ncase %d:\n", nrules);
+ if (!nolinesflag)
+ fprintf(fguard, "#line %d \"%s\"\n", lineno, quoted_filename(infile));
+ putc('{', fguard);
+
+ count = 0;
+ c = getc(finput);
+
+ while (brace_flag ? (count > 0) : (c != ';'))
+ {
+ switch (c)
+ {
+ case '\n':
+ putc(c, fguard);
+ lineno++;
+ break;
+
+ case '{':
+ putc(c, fguard);
+ brace_flag = 1;
+ count++;
+ break;
+
+ case '}':
+ putc(c, fguard);
+ if (count > 0)
+ count--;
+ else
+ fatal("unmatched right brace ('}')");
+ break;
+
+ case '\'':
+ case '"':
+ match = c;
+ putc(c, fguard);
+ c = getc(finput);
+
+ while (c != match)
+ {
+ if (c == EOF || c == '\n')
+ fatal("unterminated string");
+
+ putc(c, fguard);
+
+ if (c == '\\')
+ {
+ c = getc(finput);
+ if (c == EOF)
+ fatal("unterminated string");
+ putc(c, fguard);
+ if (c == '\n')
+ lineno++;
+ }
+
+ c = getc(finput);
+ }
+
+ putc(c, fguard);
+ break;
+
+ case '/':
+ putc(c, fguard);
+ c = getc(finput);
+ if (c != '*' && c != '/')
+ continue;
+
+ cplus_comment = (c == '/');
+ putc(c, fguard);
+ c = getc(finput);
+
+ ended = 0;
+ while (!ended)
+ {
+ if (!cplus_comment && c == '*')
+ {
+ while (c == '*')
+ {
+ putc(c, fguard);
+ c = getc(finput);
+ }
+
+ if (c == '/')
+ {
+ putc(c, fguard);
+ ended = 1;
+ }
+ }
+ else if (c == '\n')
+ {
+ lineno++;
+ putc(c, fguard);
+ if (cplus_comment)
+ ended = 1;
+ else
+ c = getc(finput);
+ }
+ else if (c == EOF)
+ fatal("unterminated comment");
+ else
+ {
+ putc(c, fguard);
+ c = getc(finput);
+ }
+ }
+
+ break;
+
+ case '$':
+ c = getc(finput);
+ type_name = NULL;
+
+ if (c == '<')
+ {
+ register char *cp = token_buffer;
+
+ while ((c = getc(finput)) != '>' && c > 0)
+ *cp++ = c;
+ *cp = 0;
+ type_name = token_buffer;
+
+ c = getc(finput);
+ }
+
+ if (c == '$')
+ {
+ fprintf(fguard, "yyval");
+ if (!type_name) type_name = rule->sym->type_name;
+ if (type_name)
+ fprintf(fguard, ".%s", type_name);
+ if(!type_name && typed) /* JF */
+ fprintf(stderr,"%s:%d: warning: $$ of '%s' has no declared type.\n",infile,lineno,rule->sym->tag);
+ }
+
+ else if (isdigit(c) || c == '-')
+ {
+ ungetc (c, finput);
+ n = read_signed_integer(finput);
+ c = getc(finput);
+
+ if (!type_name && n > 0)
+ type_name = get_type_name(n, rule);
+
+ fprintf(fguard, "yyvsp[%d]", n - stack_offset);
+ if (type_name)
+ fprintf(fguard, ".%s", type_name);
+ if(!type_name && typed) /* JF */
+ fprintf(stderr,"%s:%d: warning: $%d of '%s' has no declared type.\n",infile,lineno,n,rule->sym->tag);
+ continue;
+ }
+ else
+ fatals("$%c is invalid",(void*) c); /* JF changed style */
+
+ break;
+
+ case '@':
+ c = getc(finput);
+ if (isdigit(c) || c == '-')
+ {
+ ungetc (c, finput);
+ n = read_signed_integer(finput);
+ c = getc(finput);
+ }
+ else
+ fatals("@%c is invalid",(void*) c); /* JF changed style */
+
+ fprintf(fguard, "yylsp[%d]", n - stack_offset);
+ yylsp_needed = 1;
+
+ continue;
+
+ case EOF:
+ fatal("unterminated %guard clause");
+
+ default:
+ putc(c, fguard);
+ }
+
+ if (c != '}' || count != 0)
+ c = getc(finput);
+ }
+
+ c = skip_white_space();
+
+ fprintf(fguard, ";\n break;}");
+ if (c == '{')
+ copy_action(rule, stack_offset);
+ else if (c == '=')
+ {
+ c = getc(finput);
+ if (c == '{')
+ copy_action(rule, stack_offset);
+ }
+ else
+ ungetc(c, finput);
+}
+
+
+
+/* Assuming that a { has just been seen, copy everything up to the matching }
+into the actions file.
+stack_offset is the number of values in the current rule so far,
+which says where to find $0 with respect to the top of the stack. */
+
+void
+copy_action(symbol_list* rule, int stack_offset)
+{
+ register int c;
+ register int n;
+ register int count;
+ register int match;
+ register int ended;
+ register char *type_name;
+ int cplus_comment;
+
+ /* offset is always 0 if parser has already popped the stack pointer */
+ if (semantic_parser) stack_offset = 0;
+
+ fprintf(faction, "\ncase %d:\n", nrules);
+ if (!nolinesflag)
+ fprintf(faction, "#line %d \"%s\"\n", lineno, quoted_filename(infile));
+ putc('{', faction);
+
+ count = 1;
+ c = getc(finput);
+
+ while (count > 0)
+ {
+ while (c != '}')
+ {
+ switch (c)
+ {
+ case '\n':
+ putc(c, faction);
+ lineno++;
+ break;
+
+ case '{':
+ putc(c, faction);
+ count++;
+ break;
+
+ case '\'':
+ case '"':
+ match = c;
+ putc(c, faction);
+ c = getc(finput);
+
+ while (c != match)
+ {
+ if (c == EOF || c == '\n')
+ fatal("unterminated string");
+
+ putc(c, faction);
+
+ if (c == '\\')
+ {
+ c = getc(finput);
+ if (c == EOF)
+ fatal("unterminated string");
+ putc(c, faction);
+ if (c == '\n')
+ lineno++;
+ }
+
+ c = getc(finput);
+ }
+
+ putc(c, faction);
+ break;
+
+ case '/':
+ putc(c, faction);
+ c = getc(finput);
+ if (c != '*' && c != '/')
+ continue;
+
+ cplus_comment = (c == '/');
+ putc(c, faction);
+ c = getc(finput);
+
+ ended = 0;
+ while (!ended)
+ {
+ if (!cplus_comment && c == '*')
+ {
+ while (c == '*')
+ {
+ putc(c, faction);
+ c = getc(finput);
+ }
+
+ if (c == '/')
+ {
+ putc(c, faction);
+ ended = 1;
+ }
+ }
+ else if (c == '\n')
+ {
+ lineno++;
+ putc(c, faction);
+ if (cplus_comment)
+ ended = 1;
+ else
+ c = getc(finput);
+ }
+ else if (c == EOF)
+ fatal("unterminated comment");
+ else
+ {
+ putc(c, faction);
+ c = getc(finput);
+ }
+ }
+
+ break;
+
+ case '$':
+ c = getc(finput);
+ type_name = NULL;
+
+ if (c == '<')
+ {
+ register char *cp = token_buffer;
+
+ while ((c = getc(finput)) != '>' && c > 0)
+ *cp++ = c;
+ *cp = 0;
+ type_name = token_buffer;
+ value_components_used = 1;
+
+ c = getc(finput);
+ }
+ if (c == '$')
+ {
+ fprintf(faction, "yyval");
+ if (!type_name) type_name = get_type_name(0, rule);
+ if (type_name)
+ fprintf(faction, ".%s", type_name);
+ if(!type_name && typed) /* JF */
+ fprintf(stderr,"%s:%d: warning: $$ of '%s' has no declared type.\n",infile,lineno,rule->sym->tag);
+ }
+ else if (isdigit(c) || c == '-')
+ {
+ ungetc (c, finput);
+ n = read_signed_integer(finput);
+ c = getc(finput);
+
+ if (!type_name && n > 0)
+ type_name = get_type_name(n, rule);
+
+ fprintf(faction, "yyvsp[%d]", n - stack_offset);
+ if (type_name)
+ fprintf(faction, ".%s", type_name);
+ if(!type_name && typed) /* JF */
+ fprintf(stderr,"%s:%d: warning: $%d of '%s' has no declared type.\n",infile,lineno,n,rule->sym->tag);
+ continue;
+ }
+ else
+ fatals("$%c is invalid",(void*) c); /* JF changed format */
+
+ break;
+
+ case '@':
+ c = getc(finput);
+ if (isdigit(c) || c == '-')
+ {
+ ungetc (c, finput);
+ n = read_signed_integer(finput);
+ c = getc(finput);
+ }
+ else
+ fatal("invalid @-construct");
+
+ fprintf(faction, "yylsp[%d]", n - stack_offset);
+ yylsp_needed = 1;
+
+ continue;
+
+ case EOF:
+ fatal("unmatched '{'");
+
+ default:
+ putc(c, faction);
+ }
+
+ c = getc(finput);
+ }
+
+ /* above loop exits when c is '}' */
+
+ if (--count)
+ {
+ putc(c, faction);
+ c = getc(finput);
+ }
+ }
+
+ fprintf(faction, ";\n break;}");
+}
+
+
+
+/* generate a dummy symbol, a nonterminal,
+whose name cannot conflict with the user's names. */
+
+bucket *
+gensym()
+{
+ register bucket *sym;
+
+ sprintf (token_buffer, "@%d", ++gensym_count);
+ sym = getsym(token_buffer);
+ sym->internalClass = SNTERM;
+ sym->value = nvars++;
+ return (sym);
+}
+
+/* Parse the input grammar into a one symbol_list structure.
+Each rule is represented by a sequence of symbols: the left hand side
+followed by the contents of the right hand side, followed by a null pointer
+instead of a symbol to terminate the rule.
+The next symbol is the lhs of the following rule.
+
+All guards and actions are copied out to the appropriate files,
+labelled by the rule number they apply to. */
+
+void
+readgram()
+{
+ register int t;
+ register bucket *lhs;
+ register symbol_list *p;
+ register symbol_list *p1;
+ register bucket *bp;
+
+ symbol_list *crule; /* points to first symbol_list of current rule. */
+ /* its symbol is the lhs of the rule. */
+ symbol_list *crule1; /* points to the symbol_list preceding crule. */
+
+ p1 = NULL;
+
+ t = lex();
+
+ while (t != TWO_PERCENTS && t != ENDFILE)
+ {
+ if (t == IDENTIFIER || t == BAR)
+ {
+ register int actionflag = 0;
+ int rulelength = 0; /* number of symbols in rhs of this rule so far */
+ int xactions = 0; /* JF for error checking */
+ bucket *first_rhs = 0;
+
+ if (t == IDENTIFIER)
+ {
+ lhs = symval;
+
+ t = lex();
+ if (t != COLON)
+ fatal("ill-formed rule");
+ }
+
+ if (nrules == 0)
+ {
+ if (t == BAR)
+ fatal("grammar starts with vertical bar");
+
+ if (!start_flag)
+ startval = lhs;
+ }
+
+ /* start a new rule and record its lhs. */
+
+ nrules++;
+ nitems++;
+
+ record_rule_line ();
+
+ p = NEW(symbol_list);
+ p->sym = lhs;
+
+ crule1 = p1;
+ if (p1)
+ p1->next = p;
+ else
+ grammar = p;
+
+ p1 = p;
+ crule = p;
+
+ /* mark the rule's lhs as a nonterminal if not already so. */
+
+ if (lhs->internalClass == SUNKNOWN)
+ {
+ lhs->internalClass = SNTERM;
+ lhs->value = nvars;
+ nvars++;
+ }
+ else if (lhs->internalClass == STOKEN)
+ fatals("rule given for %s, which is a token", (void*) lhs->tag);
+
+ /* read the rhs of the rule. */
+
+ for (;;)
+ {
+ t = lex();
+
+ if (! (t == IDENTIFIER || t == LEFT_CURLY)) break;
+
+ /* If next token is an identifier, see if a colon follows it.
+ If one does, exit this rule now. */
+ if (t == IDENTIFIER)
+ {
+ register bucket *ssave;
+ register int t1;
+
+ ssave = symval;
+ t1 = lex();
+ unlex(t1);
+ symval = ssave;
+ if (t1 == COLON) break;
+
+ if(!first_rhs) /* JF */
+ first_rhs = symval;
+ /* Not followed by colon =>
+ process as part of this rule's rhs. */
+ }
+
+ /* If we just passed an action, that action was in the middle
+ of a rule, so make a dummy rule to reduce it to a
+ non-terminal. */
+ if (actionflag)
+ {
+ register bucket *sdummy;
+
+ /* Since the action was written out with this rule's */
+ /* number, we must write give the new rule this number */
+ /* by inserting the new rule before it. */
+
+ /* Make a dummy nonterminal, a gensym. */
+ sdummy = gensym();
+
+ /* Make a new rule, whose body is empty,
+ before the current one, so that the action
+ just read can belong to it. */
+ nrules++;
+ nitems++;
+ record_rule_line ();
+ p = NEW(symbol_list);
+ if (crule1)
+ crule1->next = p;
+ else grammar = p;
+ p->sym = sdummy;
+ crule1 = NEW(symbol_list);
+ p->next = crule1;
+ crule1->next = crule;
+
+ /* insert the dummy generated by that rule into this rule. */
+ nitems++;
+ p = NEW(symbol_list);
+ p->sym = sdummy;
+ p1->next = p;
+ p1 = p;
+
+ actionflag = 0;
+ }
+
+ if (t == IDENTIFIER)
+ {
+ nitems++;
+ p = NEW(symbol_list);
+ p->sym = symval;
+ p1->next = p;
+ p1 = p;
+ }
+ else /* handle an action. */
+ {
+ copy_action(crule, rulelength);
+ actionflag = 1;
+ xactions++; /* JF */
+ }
+ rulelength++;
+ }
+
+ /* Put an empty link in the list to mark the end of this rule */
+ p = NEW(symbol_list);
+ p1->next = p;
+ p1 = p;
+
+ if (t == PREC)
+ {
+ t = lex();
+ crule->ruleprec = symval;
+ t = lex();
+ }
+ if (t == GUARD)
+ {
+ if (! semantic_parser)
+ fatal("%guard present but %semantic_parser not specified");
+
+ copy_guard(crule, rulelength);
+ t = lex();
+ }
+ else if (t == LEFT_CURLY)
+ {
+ if (actionflag) fatal("two actions at end of one rule");
+ copy_action(crule, rulelength);
+ t = lex();
+ }
+ /* If $$ is being set in default way,
+ warn if any type mismatch. */
+ else if (!xactions && first_rhs && lhs->type_name != first_rhs->type_name)
+ {
+ if (lhs->type_name == 0 || first_rhs->type_name == 0
+ || strcmp(lhs->type_name,first_rhs->type_name))
+ fprintf(stderr, "%s:%d: warning: type clash ('%s' '%s') on default action\n",
+ infile,
+ lineno,
+ lhs->type_name ? lhs->type_name : "",
+ first_rhs->type_name ? first_rhs->type_name : "");
+ }
+ /* Warn if there is no default for $$ but we need one. */
+ else if (!xactions && !first_rhs && lhs->type_name != 0)
+ fprintf(stderr,
+ "%s:%d: warning: empty rule for typed nonterminal, and no action\n",
+ infile,
+ lineno);
+ if (t == SEMICOLON)
+ t = lex();
+ }
+ /* these things can appear as alternatives to rules. */
+ else if (t == TOKEN)
+ {
+ parse_token_decl(STOKEN, SNTERM);
+ t = lex();
+ }
+ else if (t == NTERM)
+ {
+ parse_token_decl(SNTERM, STOKEN);
+ t = lex();
+ }
+ else if (t == TYPE)
+ {
+ t = get_type();
+ }
+ else if (t == UNION)
+ {
+ parse_union_decl();
+ t = lex();
+ }
+ else if (t == EXPECT)
+ {
+ parse_expect_decl();
+ t = lex();
+ }
+ else if (t == START)
+ {
+ parse_start_decl();
+ t = lex();
+ }
+ else
+ fatal("invalid input");
+ }
+ set_parser_name(NULL); /* if undef, use default */
+
+ if (nsyms > MAXSHORT)
+ fatals("too many symbols (tokens plus nonterminals); maximum %d",
+ (void*) MAXSHORT);
+ if (nrules == 0)
+ fatal("no input grammar");
+
+ if (typed == 0 /* JF put out same default YYSTYPE as YACC does */
+ && !value_components_used)
+ {
+ /* We used to use `unsigned long' as YYSTYPE on MSDOS,
+ but it seems better to be consistent.
+ Most programs should declare their own type anyway. */
+ fprintf(ftable, "\
+#ifndef YY_USE_CLASS\n\
+# ifndef YYSTYPE\n\
+# define YYSTYPE int\n\
+# define YYSTYPE_IS_TRIVIAL 1\n\
+# endif\n\
+#endif\n");
+ if (definesflag)
+ fprintf(fdefines, "\
+\
+#ifndef YY_USE_CLASS\n\
+# ifndef YYSTYPE\n\
+# define YYSTYPE int\n\
+# define YYSTYPE_IS_TRIVIAL 1\n\
+# endif\n\
+#endif\n");
+ }
+
+ /* Report any undefined symbols and consider them nonterminals. */
+
+ for (bp = firstsymbol; bp; bp = bp->next)
+ if (bp->internalClass == SUNKNOWN)
+ {
+ fprintf(stderr, "symbol %s used, not defined as token, and no rules for it\n",
+ bp->tag);
+ failure = 1;
+ bp->internalClass = SNTERM;
+ bp->value = nvars++;
+ }
+
+ ntokens = nsyms - nvars;
+}
+
+
+void
+record_rule_line ()
+{
+ /* Record each rule's source line number in rline table. */
+
+ if (nrules >= rline_allocated)
+ {
+ rline_allocated = nrules * 2;
+ rline = (short *) xrealloc (( char*) rline,
+ rline_allocated * sizeof (short));
+ }
+ rline[nrules] = lineno;
+}
+
+
+/* read in a %type declaration and record its information for get_type_name to access */
+
+int
+get_type()
+{
+ register int k;
+ register int t;
+ register char *name;
+
+ t = lex();
+
+ if (t != TYPENAME)
+ fatal("ill-formed %type declaration");
+
+ k = strlen(token_buffer);
+ name = NEW2(k + 1, char);
+ strcpy(name, token_buffer);
+
+ for (;;)
+ {
+ t = lex();
+
+ switch (t)
+ {
+ case SEMICOLON:
+ return (lex());
+
+ case COMMA:
+ break;
+
+ case IDENTIFIER:
+ if (symval->type_name == NULL)
+ symval->type_name = name;
+ else
+ fatals("type redeclaration for %s", (void*) symval->tag);
+
+ break;
+
+ default:
+ return (t);
+ }
+ }
+}
+
+
+
+/* assign symbol numbers, and write definition of token names into fdefines.
+Set up vectors tags and sprec of names and precedences of symbols. */
+
+void
+packsymbols()
+{
+ register bucket *bp;
+ register int tokno = 1;
+ register int i;
+ register int last_user_token_number;
+
+ /* int lossage = 0; JF set but not used */
+
+ tags = NEW2(nsyms + 1, char *);
+ tags[0] = "$";
+
+ sprec = NEW2(nsyms, short);
+ sassoc = NEW2(nsyms, short);
+
+ max_user_token_number = 256;
+ last_user_token_number = 256;
+
+ for (bp = firstsymbol; bp; bp = bp->next)
+ {
+ if (bp->internalClass == SNTERM)
+ {
+ bp->value += ntokens;
+ }
+ else
+ {
+ if (translations && !(bp->user_token_number))
+ bp->user_token_number = ++last_user_token_number;
+ if (bp->user_token_number > max_user_token_number)
+ max_user_token_number = bp->user_token_number;
+ bp->value = tokno++;
+ }
+
+ tags[bp->value] = bp->tag;
+ sprec[bp->value] = bp->prec;
+ sassoc[bp->value] = bp->assoc;
+
+ }
+
+ if (translations)
+ {
+ register int i;
+
+ token_translations = NEW2(max_user_token_number+1, short);
+
+ /* initialize all entries for literal tokens to 2,
+ the internal token number for $illegal., which represents all invalid inputs. */
+ for (i = 0; i <= max_user_token_number; i++)
+ token_translations[i] = 2;
+ }
+
+ for (bp = firstsymbol; bp; bp = bp->next)
+ {
+ if (bp->value >= ntokens) continue;
+ if (translations)
+ {
+ if (token_translations[bp->user_token_number] != 2)
+ {
+ /* JF made this a call to fatals() */
+ fatals( "tokens %s and %s both assigned number %d",
+ (void*) tags[token_translations[bp->user_token_number]],
+ (void*) bp->tag,
+ (void*) bp->user_token_number);
+ }
+ token_translations[bp->user_token_number] = bp->value;
+ }
+ }
+
+ error_token_number = errtoken->value;
+
+
+ if (startval->internalClass == SUNKNOWN)
+ fatals("the start symbol %s is undefined", (void*) startval->tag);
+ else if (startval->internalClass == STOKEN)
+ fatals("the start symbol %s is a token", (void*) startval->tag);
+
+ start_symbol = startval->value;
+ output_about_token();
+}
+
+
+
+
+/* convert the rules into the representation using rrhs, rlhs and ritems. */
+
+void
+packgram()
+{
+ register int itemno;
+ register int ruleno;
+ register symbol_list *p;
+/* register bucket *bp; JF unused */
+
+ bucket *ruleprec;
+
+ ritem = NEW2(nitems + 1, short);
+ rlhs = NEW2(nrules, short) - 1;
+ rrhs = NEW2(nrules, short) - 1;
+ rprec = NEW2(nrules, short) - 1;
+ rprecsym = NEW2(nrules, short) - 1;
+ rassoc = NEW2(nrules, short) - 1;
+
+ itemno = 0;
+ ruleno = 1;
+
+ p = grammar;
+ while (p)
+ {
+ rlhs[ruleno] = p->sym->value;
+ rrhs[ruleno] = itemno;
+ ruleprec = p->ruleprec;
+
+ p = p->next;
+ while (p && p->sym)
+ {
+ ritem[itemno++] = p->sym->value;
+ /* A rule gets by default the precedence and associativity
+ of the last token in it. */
+ if (p->sym->internalClass == STOKEN)
+ {
+ rprec[ruleno] = p->sym->prec;
+ rassoc[ruleno] = p->sym->assoc;
+ }
+ if (p) p = p->next;
+ }
+
+ /* If this rule has a %prec,
+ the specified symbol's precedence replaces the default. */
+ if (ruleprec)
+ {
+ rprec[ruleno] = ruleprec->prec;
+ rassoc[ruleno] = ruleprec->assoc;
+ rprecsym[ruleno] = ruleprec->value;
+ }
+
+ ritem[itemno++] = -ruleno;
+ ruleno++;
+
+ if (p) p = p->next;
+ }
+
+ ritem[itemno] = 0;
+}
+/* Read a signed integer from STREAM and return its value. */
+
+int
+read_signed_integer (FILE* stream)
+{
+ register int c = getc(stream);
+ register int sign = 1;
+ register int n;
+
+ if (c == '-')
+ {
+ c = getc(stream);
+ sign = -1;
+ }
+ n = 0;
+ while (isdigit(c))
+ {
+ n = 10*n + (c - '0');
+ c = getc(stream);
+ }
+
+ ungetc(c, stream);
+
+ return n * sign;
+}
+
+void set_parser_name(char* n)
+{
+ if(n) /* define */
+ {if(parser_defined) /* redef */
+ fatals("parser name already defined as \"%s\"and redefined to \"%s\"\n",(void*) parser_name,(void*) n);
+ else
+ parser_defined++;
+ parser_name=(char *)xmalloc(strlen(n)+1);
+ strcpy(parser_name,n);
+ }
+ else /* use only */
+ {
+ if(!parser_defined) /* first use, default */
+ {parser_defined++;
+ fprintf(stderr,"%s:%d parser name defined to default :\"%s\"\n"
+ ,infile,lineno,parser_name);
+ }
+ else /* next use ok*/;
+ }
+}
+
+void read_a_name(char* buf,int len)
+{int c,l;
+ for(c = skip_white_space(),l=0;
+ (isalnum(c) || c == '_');
+ c=getc(finput),l++)
+ if(l<len) buf[l]=c;
+ if(l>=len)
+ {buf[len-1]=0;
+ fprintf(stderr,"%s:%d name too long, truncated to :\"%s\"\n"
+ ,infile,lineno,buf);
+ }
+ else
+ buf[l]=0;
+
+ ungetc(c, finput);
+
+} ;
+
+void
+parse_name_declaration()
+{char name[65];
+ read_a_name(name,sizeof(name));
+ set_parser_name(name);
+}
+void
+parse_define()
+{char name[65];
+ int c;
+ int after_backslash;
+ read_a_name(name,sizeof(name));
+ set_parser_name(NULL);
+ fprintf(ftable,"#define YY_%s_%s ",parser_name,name);
+ if (definesflag)
+ fprintf(fdefines,"#define YY_%s_%s ",parser_name,name);
+ for(after_backslash=0,c=getc(finput);
+ (after_backslash || c!='\n');
+ c=getc(finput))
+ {after_backslash=(c=='\\');
+ if(c=='\n') lineno++;
+ if(c==EOF)
+ {fatal("unexpected EOF in %define");}
+ hputc(c);
+ }
+ hputc('\n');lineno++;
+}
diff --git a/tools/bison++/reduce.cc b/tools/bison++/reduce.cc
new file mode 100644
index 000000000..7b0954af1
--- /dev/null
+++ b/tools/bison++/reduce.cc
@@ -0,0 +1,593 @@
+/* Grammar reduction for Bison.
+ Copyright (C) 1988, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/*
+ * Reduce the grammar: Find and eliminate unreachable terminals,
+ * nonterminals, and productions. David S. Bakin.
+ */
+
+/*
+ * Don't eliminate unreachable terminals: They may be used by the user's
+ * parser.
+ */
+
+#include <stdio.h>
+#include "system.h"
+#include "files.h"
+#include "gram.h"
+#include "machine.h"
+#include "new.h"
+
+
+extern char **tags; /* reader.c */
+extern int verboseflag; /* getargs.c */
+static int statisticsflag; /* XXXXXXX */
+
+#ifndef TRUE
+#define TRUE (1)
+#define FALSE (0)
+#endif
+
+typedef unsigned *BSet;
+typedef short *rule;
+
+
+/*
+ * N is set of all nonterminals which are not useless. P is set of all rules
+ * which have no useless nonterminals in their RHS. V is the set of all
+ * accessible symbols.
+ */
+
+static BSet N, P, V, V1;
+
+static int nuseful_productions, nuseless_productions,
+ nuseful_nonterminals, nuseless_nonterminals;
+
+
+static void useless_nonterminals();
+static void inaccessable_symbols();
+static void reduce_grammar_tables();
+static void print_results();
+static void print_notices();
+void dump_grammar();
+
+extern void fatals(const char*,void*);
+extern void fatals(const char*,void*,void*);
+extern void fatals(const char*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*);
+extern void fatals(const char*,void*,void*,void*,void*,void*);
+
+bool
+bits_equal (BSet L, BSet R, int n)
+{
+ int i;
+
+ for (i = n - 1; i >= 0; i--)
+ if (L[i] != R[i])
+ return FALSE;
+ return TRUE;
+}
+
+
+int
+nbits (unsigned i)
+{
+ int count = 0;
+
+ while (i != 0) {
+ i ^= (i & -i);
+ ++count;
+ }
+ return count;
+}
+
+
+int
+bits_size (BSet S, int n)
+{
+ int i, count = 0;
+
+ for (i = n - 1; i >= 0; i--)
+ count += nbits(S[i]);
+ return count;
+}
+
+void
+reduce_grammar ()
+{
+ bool reduced;
+
+ /* Allocate the global sets used to compute the reduced grammar */
+
+ N = NEW2(WORDSIZE(nvars), unsigned);
+ P = NEW2(WORDSIZE(nrules + 1), unsigned);
+ V = NEW2(WORDSIZE(nsyms), unsigned);
+ V1 = NEW2(WORDSIZE(nsyms), unsigned);
+
+ useless_nonterminals();
+ inaccessable_symbols();
+
+ reduced = (bool) (nuseless_nonterminals + nuseless_productions > 0);
+
+ if (verboseflag)
+ print_results();
+
+ if (reduced == FALSE)
+ goto done_reducing;
+
+ print_notices();
+
+ if (!BITISSET(N, start_symbol - ntokens))
+ fatals("Start symbol %s does not derive any sentence.",
+ tags[start_symbol]);
+
+ reduce_grammar_tables();
+ /* if (verboseflag) {
+ fprintf(foutput, "REDUCED GRAMMAR\n\n");
+ dump_grammar();
+ }
+ */
+
+ /**/ statisticsflag = FALSE; /* someday getopts should handle this */
+ if (statisticsflag == TRUE)
+ fprintf(stderr,
+ "reduced %s defines %d terminal%s, %d nonterminal%s\
+, and %d production%s.\n", infile,
+ ntokens, (ntokens == 1 ? "" : "s"),
+ nvars, (nvars == 1 ? "" : "s"),
+ nrules, (nrules == 1 ? "" : "s"));
+
+ done_reducing:
+
+ /* Free the global sets used to compute the reduced grammar */
+
+ FREE(N);
+ FREE(V);
+ FREE(P);
+
+}
+
+/*
+ * Another way to do this would be with a set for each production and then do
+ * subset tests against N, but even for the C grammar the whole reducing
+ * process takes only 2 seconds on my 8Mhz AT.
+ */
+
+static bool
+useful_production (int i, BSet N)
+{
+ rule r;
+ short n;
+
+ /*
+ * A production is useful if all of the nonterminals in its RHS
+ * appear in the set of useful nonterminals.
+ */
+
+ for (r = &ritem[rrhs[i]]; *r > 0; r++)
+ if (ISVAR(n = *r))
+ if (!BITISSET(N, n - ntokens))
+ return FALSE;
+ return TRUE;
+}
+
+
+/* Remember that rules are 1-origin, symbols are 0-origin. */
+
+static void
+useless_nonterminals ()
+{
+ BSet Np, Ns;
+ int i, n;
+
+ /*
+ * N is set as built. Np is set being built this iteration. P is set
+ * of all productions which have a RHS all in N.
+ */
+
+ Np = NEW2(WORDSIZE(nvars), unsigned);
+
+ /*
+ * The set being computed is a set of nonterminals which can derive
+ * the empty string or strings consisting of all terminals. At each
+ * iteration a nonterminal is added to the set if there is a
+ * production with that nonterminal as its LHS for which all the
+ * nonterminals in its RHS are already in the set. Iterate until the
+ * set being computed remains unchanged. Any nonterminals not in the
+ * set at that point are useless in that they will never be used in
+ * deriving a sentence of the language.
+ *
+ * This iteration doesn't use any special traversal over the
+ * productions. A set is kept of all productions for which all the
+ * nonterminals in the RHS are in useful. Only productions not in
+ * this set are scanned on each iteration. At the end, this set is
+ * saved to be used when finding useful productions: only productions
+ * in this set will appear in the final grammar.
+ */
+
+ n = 0;
+ while (1)
+ {
+ for (i = WORDSIZE(nvars) - 1; i >= 0; i--)
+ Np[i] = N[i];
+ for (i = 1; i <= nrules; i++)
+ {
+ if (!BITISSET(P, i))
+ {
+ if (useful_production(i, N))
+ {
+ SETBIT(Np, rlhs[i] - ntokens);
+ SETBIT(P, i);
+ }
+ }
+ }
+ if (bits_equal(N, Np, WORDSIZE(nvars)))
+ break;
+ Ns = Np;
+ Np = N;
+ N = Ns;
+ }
+ FREE(N);
+ N = Np;
+}
+
+static void
+inaccessable_symbols ()
+{
+ BSet Vp, Vs, Pp;
+ int i, n;
+ short t;
+ rule r;
+
+ /*
+ * Find out which productions are reachable and which symbols are
+ * used. Starting with an empty set of productions and a set of
+ * symbols which only has the start symbol in it, iterate over all
+ * productions until the set of productions remains unchanged for an
+ * iteration. For each production which has a LHS in the set of
+ * reachable symbols, add the production to the set of reachable
+ * productions, and add all of the nonterminals in the RHS of the
+ * production to the set of reachable symbols.
+ *
+ * Consider only the (partially) reduced grammar which has only
+ * nonterminals in N and productions in P.
+ *
+ * The result is the set P of productions in the reduced grammar, and
+ * the set V of symbols in the reduced grammar.
+ *
+ * Although this algorithm also computes the set of terminals which are
+ * reachable, no terminal will be deleted from the grammar. Some
+ * terminals might not be in the grammar but might be generated by
+ * semantic routines, and so the user might want them available with
+ * specified numbers. (Is this true?) However, the nonreachable
+ * terminals are printed (if running in verbose mode) so that the user
+ * can know.
+ */
+
+ Vp = NEW2(WORDSIZE(nsyms), unsigned);
+ Pp = NEW2(WORDSIZE(nrules + 1), unsigned);
+
+ /* If the start symbol isn't useful, then nothing will be useful. */
+ if (!BITISSET(N, start_symbol - ntokens))
+ goto end_iteration;
+
+ SETBIT(V, start_symbol);
+
+ n = 0;
+ while (1)
+ {
+ for (i = WORDSIZE(nsyms) - 1; i >= 0; i--)
+ Vp[i] = V[i];
+ for (i = 1; i <= nrules; i++)
+ {
+ if (!BITISSET(Pp, i) && BITISSET(P, i) &&
+ BITISSET(V, rlhs[i]))
+ {
+ for (r = &ritem[rrhs[i]]; *r >= 0; r++)
+ {
+ if (ISTOKEN(t = *r)
+ || BITISSET(N, t - ntokens))
+ {
+ SETBIT(Vp, t);
+ }
+ }
+ SETBIT(Pp, i);
+ }
+ }
+ if (bits_equal(V, Vp, WORDSIZE(nsyms)))
+ {
+ break;
+ }
+ Vs = Vp;
+ Vp = V;
+ V = Vs;
+ }
+ end_iteration:
+
+ FREE(V);
+ V = Vp;
+
+ /* Tokens 0, 1, and 2 are internal to Bison. Consider them useful. */
+ SETBIT(V, 0); /* end-of-input token */
+ SETBIT(V, 1); /* error token */
+ SETBIT(V, 2); /* illegal token */
+
+ FREE(P);
+ P = Pp;
+
+ nuseful_productions = bits_size(P, WORDSIZE(nrules + 1));
+ nuseless_productions = nrules - nuseful_productions;
+
+ nuseful_nonterminals = 0;
+ for (i = ntokens; i < nsyms; i++)
+ if (BITISSET(V, i))
+ nuseful_nonterminals++;
+ nuseless_nonterminals = nvars - nuseful_nonterminals;
+
+ /* A token that was used in %prec should not be warned about. */
+ for (i = 1; i < nrules; i++)
+ if (rprecsym[i] != 0)
+ SETBIT(V1, rprecsym[i]);
+}
+
+static void
+reduce_grammar_tables ()
+{
+/* This is turned off because we would need to change the numbers
+ in the case statements in the actions file. */
+#if 0
+ /* remove useless productions */
+ if (nuseless_productions > 0)
+ {
+ short np, pn, ni, pi;
+
+ np = 0;
+ ni = 0;
+ for (pn = 1; pn <= nrules; pn++)
+ {
+ if (BITISSET(P, pn))
+ {
+ np++;
+ if (pn != np)
+ {
+ rlhs[np] = rlhs[pn];
+ rline[np] = rline[pn];
+ rprec[np] = rprec[pn];
+ rassoc[np] = rassoc[pn];
+ rrhs[np] = rrhs[pn];
+ if (rrhs[np] != ni)
+ {
+ pi = rrhs[np];
+ rrhs[np] = ni;
+ while (ritem[pi] >= 0)
+ ritem[ni++] = ritem[pi++];
+ ritem[ni++] = -np;
+ }
+ } else {
+ while (ritem[ni++] >= 0);
+ }
+ }
+ }
+ ritem[ni] = 0;
+ nrules -= nuseless_productions;
+ nitems = ni;
+
+ /*
+ * Is it worth it to reduce the amount of memory for the
+ * grammar? Probably not.
+ */
+
+ }
+#endif /* 0 */
+ /* Disable useless productions,
+ since they may contain useless nonterms
+ that would get mapped below to -1 and confuse everyone. */
+ if (nuseless_productions > 0)
+ {
+ int pn;
+
+ for (pn = 1; pn <= nrules; pn++)
+ {
+ if (!BITISSET(P, pn))
+ {
+ rlhs[pn] = -1;
+ }
+ }
+ }
+
+ /* remove useless symbols */
+ if (nuseless_nonterminals > 0)
+ {
+
+ int i, n;
+/* short j; JF unused */
+ short *nontermmap;
+ rule r;
+
+ /*
+ * create a map of nonterminal number to new nonterminal
+ * number. -1 in the map means it was useless and is being
+ * eliminated.
+ */
+
+ nontermmap = NEW2(nvars, short) - ntokens;
+ for (i = ntokens; i < nsyms; i++)
+ nontermmap[i] = -1;
+
+ n = ntokens;
+ for (i = ntokens; i < nsyms; i++)
+ if (BITISSET(V, i))
+ nontermmap[i] = n++;
+
+ /* Shuffle elements of tables indexed by symbol number. */
+
+ for (i = ntokens; i < nsyms; i++)
+ {
+ n = nontermmap[i];
+ if (n >= 0)
+ {
+ sassoc[n] = sassoc[i];
+ sprec[n] = sprec[i];
+ tags[n] = tags[i];
+ } else {
+ free(tags[i]);
+ }
+ }
+
+ /* Replace all symbol numbers in valid data structures. */
+
+ for (i = 1; i <= nrules; i++)
+ {
+ /* Ignore the rules disabled above. */
+ if (rlhs[i] >= 0)
+ rlhs[i] = nontermmap[rlhs[i]];
+ if (ISVAR (rprecsym[i]))
+ /* Can this happen? */
+ rprecsym[i] = nontermmap[rprecsym[i]];
+ }
+
+ for (r = ritem; *r; r++)
+ if (ISVAR(*r))
+ *r = nontermmap[*r];
+
+ start_symbol = nontermmap[start_symbol];
+
+ nsyms -= nuseless_nonterminals;
+ nvars -= nuseless_nonterminals;
+
+ free(&nontermmap[ntokens]);
+ }
+}
+
+static void
+print_results ()
+{
+ int i;
+/* short j; JF unused */
+ rule r;
+ bool b;
+
+ if (nuseless_nonterminals > 0)
+ {
+ fprintf(foutput, "Useless nonterminals:\n\n");
+ for (i = ntokens; i < nsyms; i++)
+ if (!BITISSET(V, i))
+ fprintf(foutput, " %s\n", tags[i]);
+ }
+ b = FALSE;
+ for (i = 0; i < ntokens; i++)
+ {
+ if (!BITISSET(V, i) && !BITISSET(V1, i))
+ {
+ if (!b)
+ {
+ fprintf(foutput, "\n\nTerminals which are not used:\n\n");
+ b = TRUE;
+ }
+ fprintf(foutput, " %s\n", tags[i]);
+ }
+ }
+
+ if (nuseless_productions > 0)
+ {
+ fprintf(foutput, "\n\nUseless rules:\n\n");
+ for (i = 1; i <= nrules; i++)
+ {
+ if (!BITISSET(P, i))
+ {
+ fprintf(foutput, "#%-4d ", i);
+ fprintf(foutput, "%s :\t", tags[rlhs[i]]);
+ for (r = &ritem[rrhs[i]]; *r >= 0; r++)
+ {
+ fprintf(foutput, " %s", tags[*r]);
+ }
+ fprintf(foutput, ";\n");
+ }
+ }
+ }
+ if (nuseless_nonterminals > 0 || nuseless_productions > 0 || b)
+ fprintf(foutput, "\n\n");
+}
+
+void
+dump_grammar ()
+{
+ int i;
+ rule r;
+
+ fprintf(foutput,
+ "ntokens = %d, nvars = %d, nsyms = %d, nrules = %d, nitems = %d\n\n",
+ ntokens, nvars, nsyms, nrules, nitems);
+ fprintf(foutput, "Variables\n---------\n\n");
+ fprintf(foutput, "Value Sprec Sassoc Tag\n");
+ for (i = ntokens; i < nsyms; i++)
+ fprintf(foutput, "%5d %5d %5d %s\n",
+ i, sprec[i], sassoc[i], tags[i]);
+ fprintf(foutput, "\n\n");
+ fprintf(foutput, "Rules\n-----\n\n");
+ for (i = 1; i <= nrules; i++)
+ {
+ fprintf(foutput, "%-5d(%5d%5d)%5d : (@%-5d)",
+ i, rprec[i], rassoc[i], rlhs[i], rrhs[i]);
+ for (r = &ritem[rrhs[i]]; *r > 0; r++)
+ fprintf(foutput, "%5d", *r);
+ fprintf(foutput, " [%d]\n", -(*r));
+ }
+ fprintf(foutput, "\n\n");
+ fprintf(foutput, "Rules interpreted\n-----------------\n\n");
+ for (i = 1; i <= nrules; i++)
+ {
+ fprintf(foutput, "%-5d %s :", i, tags[rlhs[i]]);
+ for (r = &ritem[rrhs[i]]; *r > 0; r++)
+ fprintf(foutput, " %s", tags[*r]);
+ fprintf(foutput, "\n");
+ }
+ fprintf(foutput, "\n\n");
+}
+
+
+static void
+print_notices ()
+{
+ extern int fixed_outfiles;
+
+ if (fixed_outfiles && nuseless_productions)
+ fprintf(stderr, "%d rules never reduced\n", nuseless_productions);
+
+ fprintf(stderr, "%s contains ", infile);
+
+ if (nuseless_nonterminals > 0)
+ {
+ fprintf(stderr, "%d useless nonterminal%s",
+ nuseless_nonterminals,
+ (nuseless_nonterminals == 1 ? "" : "s"));
+ }
+ if (nuseless_nonterminals > 0 && nuseless_productions > 0)
+ fprintf(stderr, " and ");
+
+ if (nuseless_productions > 0)
+ {
+ fprintf(stderr, "%d useless rule%s",
+ nuseless_productions,
+ (nuseless_productions == 1 ? "" : "s"));
+ }
+ fprintf(stderr, ".\n");
+ fflush(stderr);
+}
diff --git a/tools/bison++/smart-install b/tools/bison++/smart-install
new file mode 100644
index 000000000..5bfddeff8
--- /dev/null
+++ b/tools/bison++/smart-install
@@ -0,0 +1,250 @@
+#! /bin/sh
+# @(#) smart-install.sh ShScript env 1.5 94/09/13 14:03:30 (/u/icdc/rdt/tools/mgmake/SCCS/s.smart-install.sh)
+#
+# Nom du Fichier : smart-install.sh
+# Titre : |Titre|
+# Auteur: coetmeur
+# Date de creation : 01 Sep 94
+#
+# Description :
+# Document de reference : |doc|
+# Objet : |objet|
+#
+#
+#
+# historique :
+# |date| coetmeur |objet|
+#
+mycmdname=smart-install
+
+usage() {
+echo "usage: $mycmdname [<options>] [<file1> <file2>| <files...> <directory>/"
+echo " <file1> : source file"
+echo " <file2> : destination file"
+echo " <files...> : possibly empty list of source files"
+echo " <directory>/ : destination directory with '/' appended"
+echo "trace options: "
+echo " -trace : activate trace"
+echo " -traceonly : activate trace only, disactivate actions"
+echo " -echo : echo actions"
+echo " -quiet : reduce comment messages"
+echo " -usage : show this usage"
+echo "user options: "
+echo " -compare : don't copy if files are equals"
+echo " -used : return error code 1 if nothing is done"
+echo " -preserve : preserve date and flags in copy"
+echo " -apply <command>: apply a command on each file"
+echo " -owner <value> : change owner"
+echo " -group <value> : change group"
+echo " -mode <value> : change protection mode"
+echo " -rename <sed-subst-patterns> : change name of destination "
+echo " in destination directory. "
+echo " <sed-s-pattern> : pattern and replace parts "
+echo " of sed(1) 's' command. can add subdirectory."
+echo "examples: "
+echo " $mycmdname *.h -compare ../include/"
+echo " $mycmdname ../include/"
+echo " $mycmdname -preserve -group kmem -mode 2555 a.out ../bin/"
+echo " $mycmdname -mode 444 -apply 'ranlib -t' *.a ../lib/"
+echo " $mycmdname -rename '#^\\(.*\\)\\.\\([1-9l-p]\\)\\([a-z]*\\)\\.man\$#man\\2/\\1.\\2\\3#' \\"
+echo " *.man ../doc/man/"
+}
+
+badparameters() {
+echo "$mycmdname: $*" >&2
+usage
+exit 2
+}
+
+traceandshow() {
+echo " $@"
+"$@"
+}
+
+doit=""
+traceit=":"
+commentit="echo"
+dest=""
+sources=""
+compareopt=""
+modeopt=""
+groupopt=""
+owneropt=""
+destdir=""
+destisdir=""
+builddirlist=""
+preserveopt=""
+applyopt=""
+donestatus=0
+
+
+while [ "$1" ] ; do
+ case "$1" in
+ -mode)
+ if [ "$modeopt" -a "$modeopt" != "$2" ] ;
+ then badparameters "Ambiguous $1 options"; fi
+ modeopt="$2"
+ shift
+ ;;
+ -owner)
+ if [ "$owneropt" -a "$owneropt" != "$2" ] ;
+ then badparameters "Ambiguous $1 options"; fi
+ owneropt="$2"
+ shift
+ ;;
+ -group)
+ if [ "$groupopt" -a "$groupopt" != "$2" ] ;
+ then badparameters "Ambiguous $1 options"; fi
+ groupopt="$2"
+ shift
+ ;;
+ -apply)
+ if [ "$applyopt" -a "$applyopt" != "$2" ] ;
+ then badparameters "Ambiguous $1 options"; fi
+ applyopt="$2"
+ shift
+ ;;
+ -rename)
+ if [ "$renameopt" -a "$renameopt" != "$2" ] ;
+ then badparameters "Ambiguous $1 options"; fi
+ renameopt="$2"
+ shift
+ ;;
+ -traceonly)
+ doit="echo"
+ traceit="echo"
+ ;;
+ -trace)
+ doit="traceandshow"
+ traceit="echo"
+ ;;
+ -echo)
+ doit="traceandshow"
+ traceit=":"
+ ;;
+ -quiet)
+ commentit=":"
+ doit=""
+ traceit=":"
+ ;;
+ -preserve)
+ preserveopt="true"
+ ;;
+ -used)
+ donestatus=1
+ ;;
+ -compare)
+ compareopt="true"
+ ;;
+ -usage)
+ usage
+ exit 0
+ ;;
+ -*)
+ badparameters Unknown flag "$1"
+ ;;
+ */) sources="$sources $dest"
+ destdir=`dirname "$1."`
+ dest=""
+ destisdir="1$destisdir"
+ ;;
+ *)
+ sources="$sources $dest"
+ dest="$1"
+ ;;
+ esac
+ shift
+done
+set - $sources
+if [ 0$destisdir -gt 1 \
+ -o "$destisdir" -a "$dest" \
+ -o ! "$destisdir" -a "$renameopt" \
+ -o ! "$destisdir" -a $# -ne 1 ] ; then
+ badparameters Bad parameters
+fi
+if [ "$dest" ] ; then destdir=`dirname $dest` ; fi
+if [ "$preserveopt" ] ; then cpcmd="cp -p" ; else cpcmd="cp" ; fi
+
+
+$traceit " 0=<$0>"
+$traceit " mycmdname=<$mycmdname>"
+$traceit " modeopt=<$modeopt>"
+$traceit " owneropt=<$owneropt>"
+$traceit " groupopt=<$groupopt>"
+$traceit " compareopt=<$compareopt>"
+$traceit " preserveopt=<$preserveopt>"
+$traceit " applyopt=<$applyopt>"
+$traceit " cpcmd=<$cpcmd>"
+$traceit " sources=<$sources>"
+$traceit " dest=<$dest>"
+$traceit " destisdir=<$destisdir>"
+$traceit " destdir=<$destdir>"
+
+
+while [ "$1" ] ; do
+ sourcefile="$1"
+ shift
+ sourcename=`basename "$sourcefile"`
+ $traceit " sourcefile=<$sourcefile>"
+ $traceit " sourcename=<$sourcename>"
+ if [ ! -r $sourcefile ] ; then
+ echo "cannot read '$sourcefile'" >&2
+ exit 3
+ fi
+ if [ "$destisdir" ] ; then
+ if [ "$renameopt" ] ; then
+ if destsubname=`echo $sourcename | sed -e "s$renameopt"` &&
+ [ "$destsubname" ] ;
+ then
+ dest="$destdir/$destsubname"
+ destsubdir=`dirname $dest`
+ destname=`basename $dest`
+ $traceit " destsubname=<$destsubname>"
+ else
+ badparameters "Cannot rename '$sourcename' with: sed -e 's$renameopt'"
+ fi
+ else
+ destname="$sourcename"
+ destsubdir="$destdir"
+ dest="$destdir/$destname"
+ fi
+ else
+ destname=`basename $dest`
+ destsubdir=`dirname $dest`
+ fi
+ $traceit " dest=<$dest>"
+ $traceit " destname=<$destname>"
+ $traceit " destsubdir=<$destsubdir>"
+ builddirlist=""
+ subdir="$destsubdir"
+ while [ ! -d "$subdir" ] ; do
+ builddirlist="$subdir $builddirlist"
+ subdir=`dirname "$subdir"`
+ done
+ $traceit " builddirlist=<$builddirlist>"
+ [ ! "$builddirlist" ] || $doit mkdir $builddirlist || exit 3 ;
+
+ if [ "$compareopt" -a -r "$dest" ] && cmp -s $sourcefile $dest ; then
+ # $commentit "${mycmdname}: $sourcefile == $dest"
+ continue;
+ fi
+ donestatus=0
+ desttmp="$destsubdir/#$$#$destname"
+ $traceit " desttmp=<$desttmp>"
+ $commentit "${mycmdname}: $sourcefile -> $dest"
+ if $doit $cpcmd "$sourcefile" "$desttmp" &&
+ trap "$doit rm -f $desttmp" 0 &&
+ if [ "$applyopt" -a ! -w "$desttmp" ] ;
+ then $doit chmod u+w "$desttmp" 2>/dev/null || true ; fi &&
+ if [ "$applyopt" ] ; then $doit $applyopt "$desttmp" ; fi &&
+ if [ "$owneropt" ] ; then $doit chown $owneropt "$desttmp" ; fi &&
+ if [ "$groupopt" ] ; then $doit chgrp $groupopt "$desttmp" ; fi &&
+ if [ "$modeopt" ] ; then $doit chmod $modeopt "$desttmp" ; fi &&
+ $doit mv -f "$desttmp" "$dest" &&
+ trap 0 ; then : ;
+ else
+ echo "problem installing $sourcefile into $dest" >&2
+ exit 3; fi
+done
+
+exit $donestatus
diff --git a/tools/bison++/stamp-vti b/tools/bison++/stamp-vti
new file mode 100644
index 000000000..879d70c7b
--- /dev/null
+++ b/tools/bison++/stamp-vti
@@ -0,0 +1,3 @@
+@set UPDATED 17 February 2002
+@set EDITION 2.21.5
+@set VERSION 2.21.5
diff --git a/tools/bison++/state.h b/tools/bison++/state.h
new file mode 100644
index 000000000..00fb13c1b
--- /dev/null
+++ b/tools/bison++/state.h
@@ -0,0 +1,137 @@
+/* Type definitions for nondeterministic finite state machine for bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* These type definitions are used to represent a nondeterministic
+ finite state machine that parses the specified grammar.
+ This information is generated by the function generate_states
+ in the file LR0.
+
+Each state of the machine is described by a set of items --
+particular positions in particular rules -- that are the possible
+places where parsing could continue when the machine is in this state.
+These symbols at these items are the allowable inputs that can follow now.
+
+A core represents one state. States are numbered in the number field.
+When generate_states is finished, the starting state is state 0
+and nstates is the number of states. (A transition to a state
+whose state number is nstates indicates termination.) All the cores
+are chained together and first_state points to the first one (state 0).
+
+For each state there is a particular symbol which must have been the
+last thing accepted to reach that state. It is the accessing_symbol
+of the core.
+
+Each core contains a vector of nitems items which are the indices
+in the ritems vector of the items that are selected in this state.
+
+The link field is used for chaining buckets that hash states by
+their itemsets. This is for recognizing equivalent states and
+combining them when the states are generated.
+
+The two types of transitions are shifts (push the lookahead token
+and read another) and reductions (combine the last n things on the
+stack via a rule, replace them with the symbol that the rule derives,
+and leave the lookahead token alone). When the states are generated,
+these transitions are represented in two other lists.
+
+Each shifts structure describes the possible shift transitions out
+of one state, the state whose number is in the number field.
+The shifts structures are linked through next and first_shift points to them.
+Each contains a vector of numbers of the states that shift transitions
+can go to. The accessing_symbol fields of those states' cores say what kind
+of input leads to them.
+
+A shift to state zero should be ignored. Conflict resolution
+deletes shifts by changing them to zero.
+
+Each reductions structure describes the possible reductions at the state
+whose number is in the number field. The data is a list of nreds rules,
+represented by their rule numbers. first_reduction points to the list
+of these structures.
+
+Conflict resolution can decide that certain tokens in certain
+states should explicitly be errors (for implementing %nonassoc).
+For each state, the tokens that are errors for this reason
+are recorded in an errs structure, which has the state number
+in its number field. The rest of the errs structure is full
+of token numbers.
+
+There is at least one shift transition present in state zero.
+It leads to a next-to-final state whose accessing_symbol is
+the grammar's start symbol. The next-to-final state has one shift
+to the final state, whose accessing_symbol is zero (end of input).
+The final state has one shift, which goes to the termination state
+(whose number is nstates-1).
+The reason for the extra state at the end is to placate the parser's
+strategy of making all decisions one token ahead of its actions. */
+
+
+typedef
+ struct core
+ {
+ struct core *next;
+ struct core *link;
+ short number;
+ short accessing_symbol;
+ short nitems;
+ short items[1];
+ }
+ core;
+
+
+
+typedef
+ struct shifts
+ {
+ struct shifts *next;
+ short number;
+ short nshifts;
+ short internalShifts[1];
+ }
+ shifts;
+
+
+
+typedef
+ struct errs
+ {
+ short nerrs;
+ short internalErrs[1];
+ }
+ errs;
+
+
+
+typedef
+ struct reductions
+ {
+ struct reductions *next;
+ short number;
+ short nreds;
+ short rules[1];
+ }
+ reductions;
+
+
+
+extern int nstates;
+extern core *first_state;
+extern shifts *first_shift;
+extern reductions *first_reduction;
diff --git a/tools/bison++/symtab.cc b/tools/bison++/symtab.cc
new file mode 100644
index 000000000..33d3f8b5a
--- /dev/null
+++ b/tools/bison++/symtab.cc
@@ -0,0 +1,147 @@
+/* Symbol table manager for Bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include "system.h"
+#include "new.h"
+#include "symtab.h"
+#include "gram.h"
+
+
+bucket **symtab;
+bucket *firstsymbol;
+bucket *lastsymbol;
+
+
+
+int
+hash(char* key)
+{
+ register char *cp;
+ register int k;
+
+ cp = key;
+ k = 0;
+ while (*cp)
+ k = ((k << 1) ^ (*cp++)) & 0x3fff;
+
+ return (k % TABSIZE);
+}
+
+
+
+char *
+copys(char* s)
+{
+ register int i;
+ register char *cp;
+ register char *result;
+
+ i = 1;
+ for (cp = s; *cp; cp++)
+ i++;
+
+ result = xmalloc((unsigned int)i);
+ strcpy(result, s);
+ return (result);
+}
+
+
+void
+tabinit()
+{
+/* register int i; JF unused */
+
+ symtab = NEW2(TABSIZE, bucket *);
+
+ firstsymbol = NULL;
+ lastsymbol = NULL;
+}
+
+
+bucket *
+getsym(char* key)
+{
+ register int hashval;
+ register bucket *bp;
+ register int found;
+
+ hashval = hash(key);
+ bp = symtab[hashval];
+
+ found = 0;
+ while (bp != NULL && found == 0)
+ {
+ if (strcmp(key, bp->tag) == 0)
+ found = 1;
+ else
+ bp = bp->link;
+ }
+
+ if (found == 0)
+ {
+ nsyms++;
+
+ bp = NEW(bucket);
+ bp->link = symtab[hashval];
+ bp->next = NULL;
+ bp->tag = copys(key);
+ bp->internalClass = SUNKNOWN;
+
+ if (firstsymbol == NULL)
+ {
+ firstsymbol = bp;
+ lastsymbol = bp;
+ }
+ else
+ {
+ lastsymbol->next = bp;
+ lastsymbol = bp;
+ }
+
+ symtab[hashval] = bp;
+ }
+
+ return (bp);
+}
+
+
+void
+free_symtab()
+{
+ register int i;
+ register bucket *bp,*bptmp;/* JF don't use ptr after free */
+
+ for (i = 0; i < TABSIZE; i++)
+ {
+ bp = symtab[i];
+ while (bp)
+ {
+ bptmp = bp->link;
+#if 0 /* This causes crashes because one string can appear more than once. */
+ if (bp->type_name)
+ FREE(bp->type_name);
+#endif
+ FREE(bp);
+ bp = bptmp;
+ }
+ }
+ FREE(symtab);
+}
diff --git a/tools/bison++/symtab.h b/tools/bison++/symtab.h
new file mode 100644
index 000000000..bd8c514ce
--- /dev/null
+++ b/tools/bison++/symtab.h
@@ -0,0 +1,50 @@
+/* Definitions for symtab.c and callers, part of bison,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#define TABSIZE 1009
+
+
+/* symbol classes */
+
+#define SUNKNOWN 0
+#define STOKEN 1
+#define SNTERM 2
+
+
+typedef
+ struct bucket
+ {
+ struct bucket *link;
+ struct bucket *next;
+ char *tag;
+ char *type_name;
+ short value;
+ short prec;
+ short assoc;
+ short user_token_number;
+ char internalClass;
+ }
+ bucket;
+
+
+extern bucket **symtab;
+extern bucket *firstsymbol;
+
+extern bucket *getsym(char*);
diff --git a/tools/bison++/system.h b/tools/bison++/system.h
new file mode 100644
index 000000000..607e558f5
--- /dev/null
+++ b/tools/bison++/system.h
@@ -0,0 +1,34 @@
+#ifdef MSDOS
+#ifndef _MSDOS
+#define _MSDOS
+#endif
+#endif
+
+#if defined(HAVE_STDLIB_H) || defined(_MSDOS)
+#include <stdlib.h>
+#endif
+
+#if (defined(VMS) || defined(MSDOS)) && !defined(HAVE_STRING_H)
+#define HAVE_STRING_H 1
+#endif
+
+#ifdef _MSDOS
+#include <io.h>
+
+#define strlwr _strlwr
+#define strupr _strupr
+#define unlink _unlink
+#define mktemp _mktemp
+#endif /* MSDOS */
+
+#if defined(STDC_HEADERS) || defined(HAVE_STRING_H)
+#include <string.h>
+/* An ANSI string.h and pre-ANSI memory.h might conflict. */
+#if !defined(STDC_HEADERS) && defined(HAVE_MEMORY_H)
+#include <memory.h>
+#endif /* not STDC_HEADERS and HAVE_MEMORY_H */
+#define bcopy(src, dst, num) memcpy((dst), (src), (num))
+#else /* not STDC_HEADERS and not HAVE_STRING_H */
+#include <strings.h>
+/* memory.h and strings.h conflict on some systems. */
+#endif /* not STDC_HEADERS and not HAVE_STRING_H */
diff --git a/tools/bison++/texinfo.tex b/tools/bison++/texinfo.tex
new file mode 100644
index 000000000..3aae14df0
--- /dev/null
+++ b/tools/bison++/texinfo.tex
@@ -0,0 +1,4041 @@
+%% TeX macros to handle texinfo files
+
+% Copyright (C) 1985, 86, 88, 90, 91, 92, 1993 Free Software Foundation, Inc.
+
+%This texinfo.tex file is free software; you can redistribute it and/or
+%modify it under the terms of the GNU General Public License as
+%published by the Free Software Foundation; either version 2, or (at
+%your option) any later version.
+
+%This texinfo.tex file is distributed in the hope that it will be
+%useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+%of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+%General Public License for more details.
+
+%You should have received a copy of the GNU General Public License
+%along with this texinfo.tex file; see the file COPYING. If not, write
+%to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+%USA.
+
+
+%In other words, you are welcome to use, share and improve this program.
+%You are forbidden to forbid anyone else to use, share and improve
+%what you give them. Help stamp out software-hoarding!
+
+\def\texinfoversion{2.112}
+\message{Loading texinfo package [Version \texinfoversion]:}
+
+% Print the version number if in a .fmt file.
+\everyjob{\message{[Texinfo version \texinfoversion]}\message{}}
+
+% Save some parts of plain tex whose names we will redefine.
+
+\let\ptexlbrace=\{
+\let\ptexrbrace=\}
+\let\ptexdots=\dots
+\let\ptexdot=\.
+\let\ptexstar=\*
+\let\ptexend=\end
+\let\ptexbullet=\bullet
+\let\ptexb=\b
+\let\ptexc=\c
+\let\ptexi=\i
+\let\ptext=\t
+\let\ptexl=\l
+\let\ptexL=\L
+
+\def\tie{\penalty 10000\ } % Save plain tex definition of ~.
+
+\message{Basics,}
+\chardef\other=12
+
+% If this character appears in an error message or help string, it
+% starts a new line in the output.
+\newlinechar = `^^J
+
+% Ignore a token.
+%
+\def\gobble#1{}
+
+\hyphenation{ap-pen-dix}
+\hyphenation{mini-buf-fer mini-buf-fers}
+\hyphenation{eshell}
+
+% Margin to add to right of even pages, to left of odd pages.
+\newdimen \bindingoffset \bindingoffset=0pt
+\newdimen \normaloffset \normaloffset=\hoffset
+\newdimen\pagewidth \newdimen\pageheight
+\pagewidth=\hsize \pageheight=\vsize
+
+% Sometimes it is convenient to have everything in the transcript file
+% and nothing on the terminal. We don't just call \tracingall here,
+% since that produces some useless output on the terminal.
+%
+\def\gloggingall{\begingroup \globaldefs = 1 \loggingall \endgroup}%
+\def\loggingall{\tracingcommands2 \tracingstats2
+ \tracingpages1 \tracingoutput1 \tracinglostchars1
+ \tracingmacros2 \tracingparagraphs1 \tracingrestores1
+ \showboxbreadth\maxdimen\showboxdepth\maxdimen
+}%
+
+%---------------------Begin change-----------------------
+%
+%%%% For @cropmarks command.
+% Dimensions to add cropmarks at corners Added by P. A. MacKay, 12 Nov. 1986
+%
+\newdimen\cornerlong \newdimen\cornerthick
+\newdimen \topandbottommargin
+\newdimen \outerhsize \newdimen \outervsize
+\cornerlong=1pc\cornerthick=.3pt % These set size of cropmarks
+\outerhsize=7in
+%\outervsize=9.5in
+% Alternative @smallbook page size is 9.25in
+\outervsize=9.25in
+\topandbottommargin=.75in
+%
+%---------------------End change-----------------------
+
+% \onepageout takes a vbox as an argument. Note that \pagecontents
+% does insertions itself, but you have to call it yourself.
+\chardef\PAGE=255 \output={\onepageout{\pagecontents\PAGE}}
+\def\onepageout#1{\hoffset=\normaloffset
+\ifodd\pageno \advance\hoffset by \bindingoffset
+\else \advance\hoffset by -\bindingoffset\fi
+{\escapechar=`\\\relax % makes sure backslash is used in output files.
+\shipout\vbox{{\let\hsize=\pagewidth \makeheadline} \pagebody{#1}%
+{\let\hsize=\pagewidth \makefootline}}}%
+\advancepageno \ifnum\outputpenalty>-20000 \else\dosupereject\fi}
+
+%%%% For @cropmarks command %%%%
+
+% Here is a modification of the main output routine for Near East Publications
+% This provides right-angle cropmarks at all four corners.
+% The contents of the page are centerlined into the cropmarks,
+% and any desired binding offset is added as an \hskip on either
+% site of the centerlined box. (P. A. MacKay, 12 November, 1986)
+%
+\def\croppageout#1{\hoffset=0pt % make sure this doesn't mess things up
+{\escapechar=`\\\relax % makes sure backslash is used in output files.
+ \shipout
+ \vbox to \outervsize{\hsize=\outerhsize
+ \vbox{\line{\ewtop\hfill\ewtop}}
+ \nointerlineskip
+ \line{\vbox{\moveleft\cornerthick\nstop}
+ \hfill
+ \vbox{\moveright\cornerthick\nstop}}
+ \vskip \topandbottommargin
+ \centerline{\ifodd\pageno\hskip\bindingoffset\fi
+ \vbox{
+ {\let\hsize=\pagewidth \makeheadline}
+ \pagebody{#1}
+ {\let\hsize=\pagewidth \makefootline}}
+ \ifodd\pageno\else\hskip\bindingoffset\fi}
+ \vskip \topandbottommargin plus1fill minus1fill
+ \boxmaxdepth\cornerthick
+ \line{\vbox{\moveleft\cornerthick\nsbot}
+ \hfill
+ \vbox{\moveright\cornerthick\nsbot}}
+ \nointerlineskip
+ \vbox{\line{\ewbot\hfill\ewbot}}
+ }}
+ \advancepageno
+ \ifnum\outputpenalty>-20000 \else\dosupereject\fi}
+%
+% Do @cropmarks to get crop marks
+\def\cropmarks{\let\onepageout=\croppageout }
+
+\def\pagebody#1{\vbox to\pageheight{\boxmaxdepth=\maxdepth #1}}
+{\catcode`\@ =11
+\gdef\pagecontents#1{\ifvoid\topins\else\unvbox\topins\fi
+\dimen@=\dp#1 \unvbox#1
+\ifvoid\footins\else\vskip\skip\footins\footnoterule \unvbox\footins\fi
+\ifr@ggedbottom \kern-\dimen@ \vfil \fi}
+}
+
+%
+% Here are the rules for the cropmarks. Note that they are
+% offset so that the space between them is truly \outerhsize or \outervsize
+% (P. A. MacKay, 12 November, 1986)
+%
+\def\ewtop{\vrule height\cornerthick depth0pt width\cornerlong}
+\def\nstop{\vbox
+ {\hrule height\cornerthick depth\cornerlong width\cornerthick}}
+\def\ewbot{\vrule height0pt depth\cornerthick width\cornerlong}
+\def\nsbot{\vbox
+ {\hrule height\cornerlong depth\cornerthick width\cornerthick}}
+
+% Parse an argument, then pass it to #1. The argument is the rest of
+% the input line (except we remove a trailing comment). #1 should be a
+% macro which expects an ordinary undelimited TeX argument.
+%
+\def\parsearg#1{%
+ \let\next = #1%
+ \begingroup
+ \obeylines
+ \futurelet\temp\parseargx
+}
+
+% If the next token is an obeyed space (from an @example environment or
+% the like), remove it and recurse. Otherwise, we're done.
+\def\parseargx{%
+ % \obeyedspace is defined far below, after the definition of \sepspaces.
+ \ifx\obeyedspace\temp
+ \expandafter\parseargdiscardspace
+ \else
+ \expandafter\parseargline
+ \fi
+}
+
+% Remove a single space (as the delimiter token to the macro call).
+{\obeyspaces %
+ \gdef\parseargdiscardspace {\futurelet\temp\parseargx}}
+
+{\obeylines %
+ \gdef\parseargline#1^^M{%
+ \endgroup % End of the group started in \parsearg.
+ %
+ % First remove any @c comment, then any @comment.
+ % Result of each macro is put in \toks0.
+ \argremovec #1\c\relax %
+ \expandafter\argremovecomment \the\toks0 \comment\relax %
+ %
+ % Call the caller's macro, saved as \next in \parsearg.
+ \expandafter\next\expandafter{\the\toks0}%
+ }%
+}
+
+% Since all \c{,omment} does is throw away the argument, we can let TeX
+% do that for us. The \relax here is matched by the \relax in the call
+% in \parseargline; it could be more or less anything, its purpose is
+% just to delimit the argument to the \c.
+\def\argremovec#1\c#2\relax{\toks0 = {#1}}
+\def\argremovecomment#1\comment#2\relax{\toks0 = {#1}}
+
+% \argremovec{,omment} might leave us with trailing spaces, though; e.g.,
+% @end itemize @c foo
+% will have two active spaces as part of the argument with the
+% `itemize'. Here we remove all active spaces from #1, and assign the
+% result to \toks0.
+%
+% This loses if there are any *other* active characters besides spaces
+% in the argument -- _ ^ +, for example -- since they get expanded.
+% Fortunately, Texinfo does not define any such commands. (If it ever
+% does, the catcode of the characters in questionwill have to be changed
+% here.) But this means we cannot call \removeactivespaces as part of
+% \argremovec{,omment}, since @c uses \parsearg, and thus the argument
+% that \parsearg gets might well have any character at all in it.
+%
+\def\removeactivespaces#1{%
+ \begingroup
+ \ignoreactivespaces
+ \edef\temp{#1}%
+ \global\toks0 = \expandafter{\temp}%
+ \endgroup
+}
+
+% Change the active space to expand to nothing.
+%
+\begingroup
+ \obeyspaces
+ \gdef\ignoreactivespaces{\obeyspaces\let =\empty}
+\endgroup
+
+
+\def\flushcr{\ifx\par\lisppar \def\next##1{}\else \let\next=\relax \fi \next}
+
+%% These are used to keep @begin/@end levels from running away
+%% Call \inENV within environments (after a \begingroup)
+\newif\ifENV \ENVfalse \def\inENV{\ifENV\relax\else\ENVtrue\fi}
+\def\ENVcheck{%
+\ifENV\errmessage{Still within an environment. Type Return to continue.}
+\endgroup\fi} % This is not perfect, but it should reduce lossage
+
+% @begin foo is the same as @foo, for now.
+\newhelp\EMsimple{Type <Return> to continue.}
+
+\outer\def\begin{\parsearg\beginxxx}
+
+\def\beginxxx #1{%
+\expandafter\ifx\csname #1\endcsname\relax
+{\errhelp=\EMsimple \errmessage{Undefined command @begin #1}}\else
+\csname #1\endcsname\fi}
+
+% @end foo executes the definition of \Efoo.
+%
+\def\end{\parsearg\endxxx}
+\def\endxxx #1{%
+ \removeactivespaces{#1}%
+ \edef\endthing{\the\toks0}%
+ %
+ \expandafter\ifx\csname E\endthing\endcsname\relax
+ \expandafter\ifx\csname \endthing\endcsname\relax
+ % There's no \foo, i.e., no ``environment'' foo.
+ \errhelp = \EMsimple
+ \errmessage{Undefined command `@end \endthing'}%
+ \else
+ \unmatchedenderror\endthing
+ \fi
+ \else
+ % Everything's ok; the right environment has been started.
+ \csname E\endthing\endcsname
+ \fi
+}
+
+% There is an environment #1, but it hasn't been started. Give an error.
+%
+\def\unmatchedenderror#1{%
+ \errhelp = \EMsimple
+ \errmessage{This `@end #1' doesn't have a matching `@#1'}%
+}
+
+% Define the control sequence \E#1 to give an unmatched @end error.
+%
+\def\defineunmatchedend#1{%
+ \expandafter\def\csname E#1\endcsname{\unmatchedenderror{#1}}%
+}
+
+
+% Single-spacing is done by various environments (specifically, in
+% \nonfillstart and \quotations).
+\newskip\singlespaceskip \singlespaceskip = \baselineskip
+\def\singlespace{%
+% Why was this kern here? It messes up equalizing space above and below
+% environments. --karl, 6may93
+%{\advance \baselineskip by -\singlespaceskip
+%\kern \baselineskip}%
+\baselineskip=\singlespaceskip
+}
+
+%% Simple single-character @ commands
+
+% @@ prints an @
+% Kludge this until the fonts are right (grr).
+\def\@{{\tt \char '100}}
+
+% This is turned off because it was never documented
+% and you can use @w{...} around a quote to suppress ligatures.
+%% Define @` and @' to be the same as ` and '
+%% but suppressing ligatures.
+%\def\`{{`}}
+%\def\'{{'}}
+
+% Used to generate quoted braces.
+
+\def\mylbrace {{\tt \char '173}}
+\def\myrbrace {{\tt \char '175}}
+\let\{=\mylbrace
+\let\}=\myrbrace
+
+% @: forces normal size whitespace following.
+\def\:{\spacefactor=1000 }
+
+% @* forces a line break.
+\def\*{\hfil\break\hbox{}\ignorespaces}
+
+% @. is an end-of-sentence period.
+\def\.{.\spacefactor=3000 }
+
+% @w prevents a word break. Without the \leavevmode, @w at the
+% beginning of a paragraph, when TeX is still in vertical mode, would
+% produce a whole line of output instead of starting the paragraph.
+\def\w#1{\leavevmode\hbox{#1}}
+
+% @group ... @end group forces ... to be all on one page, by enclosing
+% it in a TeX vbox. We use \vtop instead of \vbox to construct the box
+% to keep its height that of a normal line. According to the rules for
+% \topskip (p.114 of the TeXbook), the glue inserted is
+% max (\topskip - \ht (first item), 0). If that height is large,
+% therefore, no glue is inserted, and the space between the headline and
+% the text is small, which looks bad.
+%
+\def\group{\begingroup
+ \ifnum\catcode13=\active \else
+ \errhelp = \groupinvalidhelp
+ \errmessage{@group invalid in context where filling is enabled}%
+ \fi
+ %
+ % The \vtop we start below produces a box with normal height and large
+ % depth; thus, TeX puts \baselineskip glue before it, and (when the
+ % next line of text is done) \lineskip glue after it. (See p.82 of
+ % the TeXbook.) Thus, space below is not quite equal to space
+ % above. But it's pretty close.
+ \def\Egroup{%
+ \egroup % End the \vtop.
+ \endgroup % End the \group.
+ }%
+ %
+ \vtop\bgroup
+ % We have to put a strut on the last line in case the @group is in
+ % the midst of an example, rather than completely enclosing it.
+ % Otherwise, the interline space between the last line of the group
+ % and the first line afterwards is too small. But we can't put the
+ % strut in \Egroup, since there it would be on a line by itself.
+ % Hence this just inserts a strut at the beginning of each line.
+ \everypar = {\strut}%
+ %
+ % Since we have a strut on every line, we don't need any of TeX's
+ % normal interline spacing.
+ \offinterlineskip
+ %
+ % OK, but now we have to do something about blank
+ % lines in the input in @example-like environments, which normally
+ % just turn into \lisppar, which will insert no space now that we've
+ % turned off the interline space. Simplest is to make them be an
+ % empty paragraph.
+ \ifx\par\lisppar
+ \edef\par{\leavevmode \par}%
+ %
+ % Reset ^^M's definition to new definition of \par.
+ \obeylines
+ \fi
+ %
+ % We do @comment here in case we are called inside an environment,
+ % such as @example, where each end-of-line in the input causes an
+ % end-of-line in the output. We don't want the end-of-line after
+ % the `@group' to put extra space in the output. Since @group
+ % should appear on a line by itself (according to the Texinfo
+ % manual), we don't worry about eating any user text.
+ \comment
+}
+%
+% TeX puts in an \escapechar (i.e., `@') at the beginning of the help
+% message, so this ends up printing `@group can only ...'.
+%
+\newhelp\groupinvalidhelp{%
+group can only be used in environments such as @example,^^J%
+where each line of input produces a line of output.}
+
+% @need space-in-mils
+% forces a page break if there is not space-in-mils remaining.
+
+\newdimen\mil \mil=0.001in
+
+\def\need{\parsearg\needx}
+
+% Old definition--didn't work.
+%\def\needx #1{\par %
+%% This method tries to make TeX break the page naturally
+%% if the depth of the box does not fit.
+%{\baselineskip=0pt%
+%\vtop to #1\mil{\vfil}\kern -#1\mil\penalty 10000
+%\prevdepth=-1000pt
+%}}
+
+\def\needx#1{%
+ % Go into vertical mode, so we don't make a big box in the middle of a
+ % paragraph.
+ \par
+ %
+ % Don't add any leading before our big empty box, but allow a page
+ % break, since the best break might be right here.
+ \allowbreak
+ \nointerlineskip
+ \vtop to #1\mil{\vfil}%
+ %
+ % TeX does not even consider page breaks if a penalty added to the
+ % main vertical list is 10000 or more. But in order to see if the
+ % empty box we just added fits on the page, we must make it consider
+ % page breaks. On the other hand, we don't want to actually break the
+ % page after the empty box. So we use a penalty of 9999.
+ %
+ % There is an extremely small chance that TeX will actually break the
+ % page at this \penalty, if there are no other feasible breakpoints in
+ % sight. (If the user is using lots of big @group commands, which
+ % almost-but-not-quite fill up a page, TeX will have a hard time doing
+ % good page breaking, for example.) However, I could not construct an
+ % example where a page broke at this \penalty; if it happens in a real
+ % document, then we can reconsider our strategy.
+ \penalty9999
+ %
+ % Back up by the size of the box, whether we did a page break or not.
+ \kern -#1\mil
+ %
+ % Do not allow a page break right after this kern.
+ \nobreak
+}
+
+% @br forces paragraph break
+
+\let\br = \par
+
+% @dots{} output some dots
+
+\def\dots{$\ldots$}
+
+% @page forces the start of a new page
+
+\def\page{\par\vfill\supereject}
+
+% @exdent text....
+% outputs text on separate line in roman font, starting at standard page margin
+
+% This records the amount of indent in the innermost environment.
+% That's how much \exdent should take out.
+\newskip\exdentamount
+
+% This defn is used inside fill environments such as @defun.
+\def\exdent{\parsearg\exdentyyy}
+\def\exdentyyy #1{{\hfil\break\hbox{\kern -\exdentamount{\rm#1}}\hfil\break}}
+
+% This defn is used inside nofill environments such as @example.
+\def\nofillexdent{\parsearg\nofillexdentyyy}
+\def\nofillexdentyyy #1{{\advance \leftskip by -\exdentamount
+\leftline{\hskip\leftskip{\rm#1}}}}
+
+%\hbox{{\rm#1}}\hfil\break}}
+
+% @include file insert text of that file as input.
+
+\def\include{\parsearg\includezzz}
+%Use \input\thisfile to avoid blank after \input, which may be an active
+%char (in which case the blank would become the \input argument).
+%The grouping keeps the value of \thisfile correct even when @include
+%is nested.
+\def\includezzz #1{\begingroup
+\def\thisfile{#1}\input\thisfile
+\endgroup}
+
+\def\thisfile{}
+
+% @center line outputs that line, centered
+
+\def\center{\parsearg\centerzzz}
+\def\centerzzz #1{{\advance\hsize by -\leftskip
+\advance\hsize by -\rightskip
+\centerline{#1}}}
+
+% @sp n outputs n lines of vertical space
+
+\def\sp{\parsearg\spxxx}
+\def\spxxx #1{\par \vskip #1\baselineskip}
+
+% @comment ...line which is ignored...
+% @c is the same as @comment
+% @ignore ... @end ignore is another way to write a comment
+
+\def\comment{\catcode 64=\other \catcode 123=\other \catcode 125=\other%
+\parsearg \commentxxx}
+
+\def\commentxxx #1{\catcode 64=0 \catcode 123=1 \catcode 125=2 }
+
+\let\c=\comment
+
+% Prevent errors for section commands.
+% Used in @ignore and in failing conditionals.
+\def\ignoresections{%
+\let\chapter=\relax
+\let\unnumbered=\relax
+\let\top=\relax
+\let\unnumberedsec=\relax
+\let\unnumberedsection=\relax
+\let\unnumberedsubsec=\relax
+\let\unnumberedsubsection=\relax
+\let\unnumberedsubsubsec=\relax
+\let\unnumberedsubsubsection=\relax
+\let\section=\relax
+\let\subsec=\relax
+\let\subsubsec=\relax
+\let\subsection=\relax
+\let\subsubsection=\relax
+\let\appendix=\relax
+\let\appendixsec=\relax
+\let\appendixsection=\relax
+\let\appendixsubsec=\relax
+\let\appendixsubsection=\relax
+\let\appendixsubsubsec=\relax
+\let\appendixsubsubsection=\relax
+\let\contents=\relax
+\let\smallbook=\relax
+\let\titlepage=\relax
+}
+
+% Used in nested conditionals, where we have to parse the Texinfo source
+% and so want to turn off most commands, in case they are used
+% incorrectly.
+%
+\def\ignoremorecommands{%
+ \let\defcv = \relax
+ \let\deffn = \relax
+ \let\deffnx = \relax
+ \let\defindex = \relax
+ \let\defivar = \relax
+ \let\defmac = \relax
+ \let\defmethod = \relax
+ \let\defop = \relax
+ \let\defopt = \relax
+ \let\defspec = \relax
+ \let\deftp = \relax
+ \let\deftypefn = \relax
+ \let\deftypefun = \relax
+ \let\deftypevar = \relax
+ \let\deftypevr = \relax
+ \let\defun = \relax
+ \let\defvar = \relax
+ \let\defvr = \relax
+ \let\ref = \relax
+ \let\xref = \relax
+ \let\printindex = \relax
+ \let\pxref = \relax
+ \let\settitle = \relax
+ \let\include = \relax
+ \let\lowersections = \relax
+ \let\down = \relax
+ \let\raisesections = \relax
+ \let\up = \relax
+ \let\set = \relax
+ \let\clear = \relax
+}
+
+% Ignore @ignore ... @end ignore.
+%
+\def\ignore{\doignore{ignore}}
+
+% Also ignore @ifinfo, @menu, and @direntry text.
+%
+\def\ifinfo{\doignore{ifinfo}}
+\def\menu{\doignore{menu}}
+\def\direntry{\doignore{direntry}}
+
+% Ignore text until a line `@end #1'.
+%
+\def\doignore#1{\begingroup
+ % Don't complain about control sequences we have declared \outer.
+ \ignoresections
+ %
+ % Define a command to swallow text until we reach `@end #1'.
+ \long\def\doignoretext##1\end #1{\enddoignore}%
+ %
+ % Make sure that spaces turn into tokens that match what \doignoretext wants.
+ \catcode32 = 10
+ %
+ % And now expand that command.
+ \doignoretext
+}
+
+% What we do to finish off ignored text.
+%
+\def\enddoignore{\endgroup\ignorespaces}%
+
+\newif\ifwarnedobs\warnedobsfalse
+\def\obstexwarn{%
+ \ifwarnedobs\relax\else
+ % We need to warn folks that they may have trouble with TeX 3.0.
+ % This uses \immediate\write16 rather than \message to get newlines.
+ \immediate\write16{}
+ \immediate\write16{***WARNING*** for users of Unix TeX 3.0!}
+ \immediate\write16{This manual trips a bug in TeX version 3.0 (tex hangs).}
+ \immediate\write16{If you are running another version of TeX, relax.}
+ \immediate\write16{If you are running Unix TeX 3.0, kill this TeX process.}
+ \immediate\write16{ Then upgrade your TeX installation if you can.}
+ \immediate\write16{If you are stuck with version 3.0, run the}
+ \immediate\write16{ script ``tex3patch'' from the Texinfo distribution}
+ \immediate\write16{ to use a workaround.}
+ \immediate\write16{}
+ \warnedobstrue
+ \fi
+}
+
+% **In TeX 3.0, setting text in \nullfont hangs tex. For a
+% workaround (which requires the file ``dummy.tfm'' to be installed),
+% uncomment the following line:
+%%%%%\font\nullfont=dummy\let\obstexwarn=\relax
+
+% Ignore text, except that we keep track of conditional commands for
+% purposes of nesting, up to an `@end #1' command.
+%
+\def\nestedignore#1{%
+ \obstexwarn
+ % We must actually expand the ignored text to look for the @end
+ % command, so that nested ignore constructs work. Thus, we put the
+ % text into a \vbox and then do nothing with the result. To minimize
+ % the change of memory overflow, we follow the approach outlined on
+ % page 401 of the TeXbook: make the current font be a dummy font.
+ %
+ \setbox0 = \vbox\bgroup
+ % Don't complain about control sequences we have declared \outer.
+ \ignoresections
+ %
+ % Define `@end #1' to end the box, which will in turn undefine the
+ % @end command again.
+ \expandafter\def\csname E#1\endcsname{\egroup\ignorespaces}%
+ %
+ % We are going to be parsing Texinfo commands. Most cause no
+ % trouble when they are used incorrectly, but some commands do
+ % complicated argument parsing or otherwise get confused, so we
+ % undefine them.
+ %
+ % We can't do anything about stray @-signs, unfortunately;
+ % they'll produce `undefined control sequence' errors.
+ \ignoremorecommands
+ %
+ % Set the current font to be \nullfont, a TeX primitive, and define
+ % all the font commands to also use \nullfont. We don't use
+ % dummy.tfm, as suggested in the TeXbook, because not all sites
+ % might have that installed. Therefore, math mode will still
+ % produce output, but that should be an extremely small amount of
+ % stuff compared to the main input.
+ %
+ \nullfont
+ \let\tenrm = \nullfont \let\tenit = \nullfont \let\tensl = \nullfont
+ \let\tenbf = \nullfont \let\tentt = \nullfont \let\smallcaps = \nullfont
+ \let\tensf = \nullfont
+ % Similarly for index fonts (mostly for their use in
+ % smallexample)
+ \let\indrm = \nullfont \let\indit = \nullfont \let\indsl = \nullfont
+ \let\indbf = \nullfont \let\indtt = \nullfont \let\indsc = \nullfont
+ \let\indsf = \nullfont
+ %
+ % Don't complain when characters are missing from the fonts.
+ \tracinglostchars = 0
+ %
+ % Don't bother to do space factor calculations.
+ \frenchspacing
+ %
+ % Don't report underfull hboxes.
+ \hbadness = 10000
+ %
+ % Do minimal line-breaking.
+ \pretolerance = 10000
+ %
+ % Do not execute instructions in @tex
+ \def\tex{\doignore{tex}}
+}
+
+% @set VAR sets the variable VAR to an empty value.
+% @set VAR REST-OF-LINE sets VAR to the value REST-OF-LINE.
+%
+% Since we want to separate VAR from REST-OF-LINE (which might be
+% empty), we can't just use \parsearg; we have to insert a space of our
+% own to delimit the rest of the line, and then take it out again if we
+% didn't need it.
+%
+\def\set{\parsearg\setxxx}
+\def\setxxx#1{\setyyy#1 \endsetyyy}
+\def\setyyy#1 #2\endsetyyy{%
+ \def\temp{#2}%
+ \ifx\temp\empty \global\expandafter\let\csname SET#1\endcsname = \empty
+ \else \setzzz{#1}#2\endsetzzz % Remove the trailing space \setxxx inserted.
+ \fi
+}
+\def\setzzz#1#2 \endsetzzz{\expandafter\xdef\csname SET#1\endcsname{#2}}
+
+% @clear VAR clears (i.e., unsets) the variable VAR.
+%
+\def\clear{\parsearg\clearxxx}
+\def\clearxxx#1{\global\expandafter\let\csname SET#1\endcsname=\relax}
+
+% @value{foo} gets the text saved in variable foo.
+%
+\def\value#1{\expandafter
+ \ifx\csname SET#1\endcsname\relax
+ {\{No value for ``#1''\}}
+ \else \csname SET#1\endcsname \fi}
+
+% @ifset VAR ... @end ifset reads the `...' iff VAR has been defined
+% with @set.
+%
+\def\ifset{\parsearg\ifsetxxx}
+\def\ifsetxxx #1{%
+ \expandafter\ifx\csname SET#1\endcsname\relax
+ \expandafter\ifsetfail
+ \else
+ \expandafter\ifsetsucceed
+ \fi
+}
+\def\ifsetsucceed{\conditionalsucceed{ifset}}
+\def\ifsetfail{\nestedignore{ifset}}
+\defineunmatchedend{ifset}
+
+% @ifclear VAR ... @end ifclear reads the `...' iff VAR has never been
+% defined with @set, or has been undefined with @clear.
+%
+\def\ifclear{\parsearg\ifclearxxx}
+\def\ifclearxxx #1{%
+ \expandafter\ifx\csname SET#1\endcsname\relax
+ \expandafter\ifclearsucceed
+ \else
+ \expandafter\ifclearfail
+ \fi
+}
+\def\ifclearsucceed{\conditionalsucceed{ifclear}}
+\def\ifclearfail{\nestedignore{ifclear}}
+\defineunmatchedend{ifclear}
+
+% @iftex always succeeds; we read the text following, through @end
+% iftex). But `@end iftex' should be valid only after an @iftex.
+%
+\def\iftex{\conditionalsucceed{iftex}}
+\defineunmatchedend{iftex}
+
+% We can't just want to start a group at @iftex (for example) and end it
+% at @end iftex, since then @set commands inside the conditional have no
+% effect (they'd get reverted at the end of the group). So we must
+% define \Eiftex to redefine itself to be its previous value. (We can't
+% just define it to fail again with an ``unmatched end'' error, since
+% the @ifset might be nested.)
+%
+\def\conditionalsucceed#1{%
+ \edef\temp{%
+ % Remember the current value of \E#1.
+ \let\nece{prevE#1} = \nece{E#1}%
+ %
+ % At the `@end #1', redefine \E#1 to be its previous value.
+ \def\nece{E#1}{\let\nece{E#1} = \nece{prevE#1}}%
+ }%
+ \temp
+}
+
+% We need to expand lots of \csname's, but we don't want to expand the
+% control sequences after we've constructed them.
+%
+\def\nece#1{\expandafter\noexpand\csname#1\endcsname}
+
+% @asis just yields its argument. Used with @table, for example.
+%
+\def\asis#1{#1}
+
+% @math means output in math mode.
+% We don't use $'s directly in the definition of \math because control
+% sequences like \math are expanded when the toc file is written. Then,
+% we read the toc file back, the $'s will be normal characters (as they
+% should be, according to the definition of Texinfo). So we must use a
+% control sequence to switch into and out of math mode.
+%
+% This isn't quite enough for @math to work properly in indices, but it
+% seems unlikely it will ever be needed there.
+%
+\let\implicitmath = $
+\def\math#1{\implicitmath #1\implicitmath}
+
+% @bullet and @minus need the same treatment as @math, just above.
+\def\bullet{\implicitmath\ptexbullet\implicitmath}
+\def\minus{\implicitmath-\implicitmath}
+
+\def\node{\ENVcheck\parsearg\nodezzz}
+\def\nodezzz#1{\nodexxx [#1,]}
+\def\nodexxx[#1,#2]{\gdef\lastnode{#1}}
+\let\nwnode=\node
+\let\lastnode=\relax
+
+\def\donoderef{\ifx\lastnode\relax\else
+\expandafter\expandafter\expandafter\setref{\lastnode}\fi
+\let\lastnode=\relax}
+
+\def\unnumbnoderef{\ifx\lastnode\relax\else
+\expandafter\expandafter\expandafter\unnumbsetref{\lastnode}\fi
+\let\lastnode=\relax}
+
+\def\appendixnoderef{\ifx\lastnode\relax\else
+\expandafter\expandafter\expandafter\appendixsetref{\lastnode}\fi
+\let\lastnode=\relax}
+
+\let\refill=\relax
+
+% @setfilename is done at the beginning of every texinfo file.
+% So open here the files we need to have open while reading the input.
+% This makes it possible to make a .fmt file for texinfo.
+\def\setfilename{%
+ \readauxfile
+ \opencontents
+ \openindices
+ \fixbackslash % Turn off hack to swallow `\input texinfo'.
+ \global\let\setfilename=\comment % Ignore extra @setfilename cmds.
+ \comment % Ignore the actual filename.
+}
+
+\outer\def\bye{\pagealignmacro\tracingstats=1\ptexend}
+
+\def\inforef #1{\inforefzzz #1,,,,**}
+\def\inforefzzz #1,#2,#3,#4**{See Info file \file{\ignorespaces #3{}},
+ node \samp{\ignorespaces#1{}}}
+
+\message{fonts,}
+
+% Font-change commands.
+
+% Texinfo supports the sans serif font style, which plain TeX does not.
+% So we set up a \sf analogous to plain's \rm, etc.
+\newfam\sffam
+\def\sf{\fam=\sffam \tensf}
+\let\li = \sf % Sometimes we call it \li, not \sf.
+
+%% Try out Computer Modern fonts at \magstephalf
+\let\mainmagstep=\magstephalf
+
+\ifx\bigger\relax
+\let\mainmagstep=\magstep1
+\font\textrm=cmr12
+\font\texttt=cmtt12
+\else
+\font\textrm=cmr10 scaled \mainmagstep
+\font\texttt=cmtt10 scaled \mainmagstep
+\fi
+% Instead of cmb10, you many want to use cmbx10.
+% cmbx10 is a prettier font on its own, but cmb10
+% looks better when embedded in a line with cmr10.
+\font\textbf=cmb10 scaled \mainmagstep
+\font\textit=cmti10 scaled \mainmagstep
+\font\textsl=cmsl10 scaled \mainmagstep
+\font\textsf=cmss10 scaled \mainmagstep
+\font\textsc=cmcsc10 scaled \mainmagstep
+\font\texti=cmmi10 scaled \mainmagstep
+\font\textsy=cmsy10 scaled \mainmagstep
+
+% A few fonts for @defun, etc.
+\font\defbf=cmbx10 scaled \magstep1 %was 1314
+\font\deftt=cmtt10 scaled \magstep1
+\def\df{\let\tentt=\deftt \let\tenbf = \defbf \bf}
+
+% Fonts for indices and small examples.
+% We actually use the slanted font rather than the italic,
+% because texinfo normally uses the slanted fonts for that.
+% Do not make many font distinctions in general in the index, since they
+% aren't very useful.
+\font\ninett=cmtt9
+\font\indrm=cmr9
+\font\indit=cmsl9
+\let\indsl=\indit
+\let\indtt=\ninett
+\let\indsf=\indrm
+\let\indbf=\indrm
+\let\indsc=\indrm
+\font\indi=cmmi9
+\font\indsy=cmsy9
+
+% Fonts for headings
+\font\chaprm=cmbx12 scaled \magstep2
+\font\chapit=cmti12 scaled \magstep2
+\font\chapsl=cmsl12 scaled \magstep2
+\font\chaptt=cmtt12 scaled \magstep2
+\font\chapsf=cmss12 scaled \magstep2
+\let\chapbf=\chaprm
+\font\chapsc=cmcsc10 scaled\magstep3
+\font\chapi=cmmi12 scaled \magstep2
+\font\chapsy=cmsy10 scaled \magstep3
+
+\font\secrm=cmbx12 scaled \magstep1
+\font\secit=cmti12 scaled \magstep1
+\font\secsl=cmsl12 scaled \magstep1
+\font\sectt=cmtt12 scaled \magstep1
+\font\secsf=cmss12 scaled \magstep1
+\font\secbf=cmbx12 scaled \magstep1
+\font\secsc=cmcsc10 scaled\magstep2
+\font\seci=cmmi12 scaled \magstep1
+\font\secsy=cmsy10 scaled \magstep2
+
+% \font\ssecrm=cmbx10 scaled \magstep1 % This size an font looked bad.
+% \font\ssecit=cmti10 scaled \magstep1 % The letters were too crowded.
+% \font\ssecsl=cmsl10 scaled \magstep1
+% \font\ssectt=cmtt10 scaled \magstep1
+% \font\ssecsf=cmss10 scaled \magstep1
+
+%\font\ssecrm=cmb10 scaled 1315 % Note the use of cmb rather than cmbx.
+%\font\ssecit=cmti10 scaled 1315 % Also, the size is a little larger than
+%\font\ssecsl=cmsl10 scaled 1315 % being scaled magstep1.
+%\font\ssectt=cmtt10 scaled 1315
+%\font\ssecsf=cmss10 scaled 1315
+
+%\let\ssecbf=\ssecrm
+
+\font\ssecrm=cmbx12 scaled \magstephalf
+\font\ssecit=cmti12 scaled \magstephalf
+\font\ssecsl=cmsl12 scaled \magstephalf
+\font\ssectt=cmtt12 scaled \magstephalf
+\font\ssecsf=cmss12 scaled \magstephalf
+\font\ssecbf=cmbx12 scaled \magstephalf
+\font\ssecsc=cmcsc10 scaled \magstep1
+\font\sseci=cmmi12 scaled \magstephalf
+\font\ssecsy=cmsy10 scaled \magstep1
+% The smallcaps and symbol fonts should actually be scaled \magstep1.5,
+% but that is not a standard magnification.
+
+% Fonts for title page:
+\font\titlerm = cmbx12 scaled \magstep3
+\let\authorrm = \secrm
+
+% In order for the font changes to affect most math symbols and letters,
+% we have to define the \textfont of the standard families. Since
+% texinfo doesn't allow for producing subscripts and superscripts, we
+% don't bother to reset \scriptfont and \scriptscriptfont (which would
+% also require loading a lot more fonts).
+%
+\def\resetmathfonts{%
+ \textfont0 = \tenrm \textfont1 = \teni \textfont2 = \tensy
+ \textfont\itfam = \tenit \textfont\slfam = \tensl \textfont\bffam = \tenbf
+ \textfont\ttfam = \tentt \textfont\sffam = \tensf
+}
+
+
+% The font-changing commands redefine the meanings of \tenSTYLE, instead
+% of just \STYLE. We do this so that font changes will continue to work
+% in math mode, where it is the current \fam that is relevant in most
+% cases, not the current. Plain TeX does, for example,
+% \def\bf{\fam=\bffam \tenbf} By redefining \tenbf, we obviate the need
+% to redefine \bf itself.
+\def\textfonts{%
+ \let\tenrm=\textrm \let\tenit=\textit \let\tensl=\textsl
+ \let\tenbf=\textbf \let\tentt=\texttt \let\smallcaps=\textsc
+ \let\tensf=\textsf \let\teni=\texti \let\tensy=\textsy
+ \resetmathfonts}
+\def\chapfonts{%
+ \let\tenrm=\chaprm \let\tenit=\chapit \let\tensl=\chapsl
+ \let\tenbf=\chapbf \let\tentt=\chaptt \let\smallcaps=\chapsc
+ \let\tensf=\chapsf \let\teni=\chapi \let\tensy=\chapsy
+ \resetmathfonts}
+\def\secfonts{%
+ \let\tenrm=\secrm \let\tenit=\secit \let\tensl=\secsl
+ \let\tenbf=\secbf \let\tentt=\sectt \let\smallcaps=\secsc
+ \let\tensf=\secsf \let\teni=\seci \let\tensy=\secsy
+ \resetmathfonts}
+\def\subsecfonts{%
+ \let\tenrm=\ssecrm \let\tenit=\ssecit \let\tensl=\ssecsl
+ \let\tenbf=\ssecbf \let\tentt=\ssectt \let\smallcaps=\ssecsc
+ \let\tensf=\ssecsf \let\teni=\sseci \let\tensy=\ssecsy
+ \resetmathfonts}
+\def\indexfonts{%
+ \let\tenrm=\indrm \let\tenit=\indit \let\tensl=\indsl
+ \let\tenbf=\indbf \let\tentt=\indtt \let\smallcaps=\indsc
+ \let\tensf=\indsf \let\teni=\indi \let\tensy=\indsy
+ \resetmathfonts}
+
+% Set up the default fonts, so we can use them for creating boxes.
+%
+\textfonts
+
+% Count depth in font-changes, for error checks
+\newcount\fontdepth \fontdepth=0
+
+% Fonts for short table of contents.
+\font\shortcontrm=cmr12
+\font\shortcontbf=cmbx12
+\font\shortcontsl=cmsl12
+
+%% Add scribe-like font environments, plus @l for inline lisp (usually sans
+%% serif) and @ii for TeX italic
+
+% \smartitalic{ARG} outputs arg in italics, followed by an italic correction
+% unless the following character is such as not to need one.
+\def\smartitalicx{\ifx\next,\else\ifx\next-\else\ifx\next.\else\/\fi\fi\fi}
+\def\smartitalic#1{{\sl #1}\futurelet\next\smartitalicx}
+
+\let\i=\smartitalic
+\let\var=\smartitalic
+\let\dfn=\smartitalic
+\let\emph=\smartitalic
+\let\cite=\smartitalic
+
+\def\b#1{{\bf #1}}
+\let\strong=\b
+
+% We can't just use \exhyphenpenalty, because that only has effect at
+% the end of a paragraph. Restore normal hyphenation at the end of the
+% group within which \nohyphenation is presumably called.
+%
+\def\nohyphenation{\hyphenchar\font = -1 \aftergroup\restorehyphenation}
+\def\restorehyphenation{\hyphenchar\font = `- }
+
+\def\t#1{%
+ {\tt \nohyphenation \rawbackslash \frenchspacing #1}%
+ \null
+}
+\let\ttfont = \t
+%\def\samp #1{`{\tt \rawbackslash \frenchspacing #1}'\null}
+\def\samp #1{`\tclose{#1}'\null}
+\def\key #1{{\tt \nohyphenation \uppercase{#1}}\null}
+\def\ctrl #1{{\tt \rawbackslash \hat}#1}
+
+\let\file=\samp
+
+% @code is a modification of @t,
+% which makes spaces the same size as normal in the surrounding text.
+\def\tclose#1{%
+ {%
+ % Change normal interword space to be same as for the current font.
+ \spaceskip = \fontdimen2\font
+ %
+ % Switch to typewriter.
+ \tt
+ %
+ % But `\ ' produces the large typewriter interword space.
+ \def\ {{\spaceskip = 0pt{} }}%
+ %
+ % Turn off hyphenation.
+ \nohyphenation
+ %
+ \rawbackslash
+ \frenchspacing
+ #1%
+ }%
+ \null
+}
+
+% We *must* turn on hyphenation at `-' and `_' in \code.
+% Otherwise, it is too hard to avoid overful hboxes
+% in the Emacs manual, the Library manual, etc.
+
+% Unfortunately, TeX uses one parameter (\hyphenchar) to control
+% both hyphenation at - and hyphenation within words.
+% We must therefore turn them both off (\tclose does that)
+% and arrange explicitly to hyphenate an a dash.
+% -- rms.
+{
+\catcode `\-=\active
+\catcode `\_=\active
+\global\def\code{\begingroup \catcode `\-=\active \let-\codedash \let_\codeunder \codex}
+}
+\def\codedash{-\discretionary{}{}{}}
+\def\codeunder{\normalunderscore\discretionary{}{}{}}
+\def\codex #1{\tclose{#1}\endgroup}
+
+%\let\exp=\tclose %Was temporary
+
+% @kbd is like @code, except that if the argument is just one @key command,
+% then @kbd has no effect.
+
+\def\xkey{\key}
+\def\kbdfoo#1#2#3\par{\def\one{#1}\def\three{#3}\def\threex{??}%
+\ifx\one\xkey\ifx\threex\three \key{#2}%
+\else\tclose{\look}\fi
+\else\tclose{\look}\fi}
+
+% Typeset a dimension, e.g., `in' or `pt'. The only reason for the
+% argument is to make the input look right: @dmn{pt} instead of
+% @dmn{}pt.
+%
+\def\dmn#1{\thinspace #1}
+
+\def\kbd#1{\def\look{#1}\expandafter\kbdfoo\look??\par}
+
+\def\l#1{{\li #1}\null} %
+
+\def\r#1{{\rm #1}} % roman font
+% Use of \lowercase was suggested.
+\def\sc#1{{\smallcaps#1}} % smallcaps font
+\def\ii#1{{\it #1}} % italic font
+
+\message{page headings,}
+
+\newskip\titlepagetopglue \titlepagetopglue = 1.5in
+\newskip\titlepagebottomglue \titlepagebottomglue = 2pc
+
+% First the title page. Must do @settitle before @titlepage.
+\def\titlefont#1{{\titlerm #1}}
+
+\newif\ifseenauthor
+\newif\iffinishedtitlepage
+
+\def\shorttitlepage{\parsearg\shorttitlepagezzz}
+\def\shorttitlepagezzz #1{\begingroup\hbox{}\vskip 1.5in \chaprm \centerline{#1}%
+ \endgroup\page\hbox{}\page}
+
+\def\titlepage{\begingroup \parindent=0pt \textfonts
+ \let\subtitlerm=\tenrm
+% I deinstalled the following change because \cmr12 is undefined.
+% This change was not in the ChangeLog anyway. --rms.
+% \let\subtitlerm=\cmr12
+ \def\subtitlefont{\subtitlerm \normalbaselineskip = 13pt \normalbaselines}%
+ %
+ \def\authorfont{\authorrm \normalbaselineskip = 16pt \normalbaselines}%
+ %
+ % Leave some space at the very top of the page.
+ \vglue\titlepagetopglue
+ %
+ % Now you can print the title using @title.
+ \def\title{\parsearg\titlezzz}%
+ \def\titlezzz##1{\leftline{\titlefont{##1}}
+ % print a rule at the page bottom also.
+ \finishedtitlepagefalse
+ \vskip4pt \hrule height 4pt width \hsize \vskip4pt}%
+ % No rule at page bottom unless we print one at the top with @title.
+ \finishedtitlepagetrue
+ %
+ % Now you can put text using @subtitle.
+ \def\subtitle{\parsearg\subtitlezzz}%
+ \def\subtitlezzz##1{{\subtitlefont \rightline{##1}}}%
+ %
+ % @author should come last, but may come many times.
+ \def\author{\parsearg\authorzzz}%
+ \def\authorzzz##1{\ifseenauthor\else\vskip 0pt plus 1filll\seenauthortrue\fi
+ {\authorfont \leftline{##1}}}%
+ %
+ % Most title ``pages'' are actually two pages long, with space
+ % at the top of the second. We don't want the ragged left on the second.
+ \let\oldpage = \page
+ \def\page{%
+ \iffinishedtitlepage\else
+ \finishtitlepage
+ \fi
+ \oldpage
+ \let\page = \oldpage
+ \hbox{}}%
+% \def\page{\oldpage \hbox{}}
+}
+
+\def\Etitlepage{%
+ \iffinishedtitlepage\else
+ \finishtitlepage
+ \fi
+ % It is important to do the page break before ending the group,
+ % because the headline and footline are only empty inside the group.
+ % If we use the new definition of \page, we always get a blank page
+ % after the title page, which we certainly don't want.
+ \oldpage
+ \endgroup
+ \HEADINGSon
+}
+
+\def\finishtitlepage{%
+ \vskip4pt \hrule height 2pt width \hsize
+ \vskip\titlepagebottomglue
+ \finishedtitlepagetrue
+}
+
+%%% Set up page headings and footings.
+
+\let\thispage=\folio
+
+\newtoks \evenheadline % Token sequence for heading line of even pages
+\newtoks \oddheadline % Token sequence for heading line of odd pages
+\newtoks \evenfootline % Token sequence for footing line of even pages
+\newtoks \oddfootline % Token sequence for footing line of odd pages
+
+% Now make Tex use those variables
+\headline={{\textfonts\rm \ifodd\pageno \the\oddheadline
+ \else \the\evenheadline \fi}}
+\footline={{\textfonts\rm \ifodd\pageno \the\oddfootline
+ \else \the\evenfootline \fi}\HEADINGShook}
+\let\HEADINGShook=\relax
+
+% Commands to set those variables.
+% For example, this is what @headings on does
+% @evenheading @thistitle|@thispage|@thischapter
+% @oddheading @thischapter|@thispage|@thistitle
+% @evenfooting @thisfile||
+% @oddfooting ||@thisfile
+
+\def\evenheading{\parsearg\evenheadingxxx}
+\def\oddheading{\parsearg\oddheadingxxx}
+\def\everyheading{\parsearg\everyheadingxxx}
+
+\def\evenfooting{\parsearg\evenfootingxxx}
+\def\oddfooting{\parsearg\oddfootingxxx}
+\def\everyfooting{\parsearg\everyfootingxxx}
+
+{\catcode`\@=0 %
+
+\gdef\evenheadingxxx #1{\evenheadingyyy #1@|@|@|@|\finish}
+\gdef\evenheadingyyy #1@|#2@|#3@|#4\finish{%
+\global\evenheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\oddheadingxxx #1{\oddheadingyyy #1@|@|@|@|\finish}
+\gdef\oddheadingyyy #1@|#2@|#3@|#4\finish{%
+\global\oddheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\everyheadingxxx #1{\everyheadingyyy #1@|@|@|@|\finish}
+\gdef\everyheadingyyy #1@|#2@|#3@|#4\finish{%
+\global\evenheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}
+\global\oddheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\evenfootingxxx #1{\evenfootingyyy #1@|@|@|@|\finish}
+\gdef\evenfootingyyy #1@|#2@|#3@|#4\finish{%
+\global\evenfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\oddfootingxxx #1{\oddfootingyyy #1@|@|@|@|\finish}
+\gdef\oddfootingyyy #1@|#2@|#3@|#4\finish{%
+\global\oddfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\everyfootingxxx #1{\everyfootingyyy #1@|@|@|@|\finish}
+\gdef\everyfootingyyy #1@|#2@|#3@|#4\finish{%
+\global\evenfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}
+\global\oddfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+%
+}% unbind the catcode of @.
+
+% @headings double turns headings on for double-sided printing.
+% @headings single turns headings on for single-sided printing.
+% @headings off turns them off.
+% @headings on same as @headings double, retained for compatibility.
+% @headings after turns on double-sided headings after this page.
+% @headings doubleafter turns on double-sided headings after this page.
+% @headings singleafter turns on single-sided headings after this page.
+% By default, they are off.
+
+\def\headings #1 {\csname HEADINGS#1\endcsname}
+
+\def\HEADINGSoff{
+\global\evenheadline={\hfil} \global\evenfootline={\hfil}
+\global\oddheadline={\hfil} \global\oddfootline={\hfil}}
+\HEADINGSoff
+% When we turn headings on, set the page number to 1.
+% For double-sided printing, put current file name in lower left corner,
+% chapter name on inside top of right hand pages, document
+% title on inside top of left hand pages, and page numbers on outside top
+% edge of all pages.
+\def\HEADINGSdouble{
+%\pagealignmacro
+\global\pageno=1
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\folio\hfil\thistitle}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+}
+% For single-sided printing, chapter title goes across top left of page,
+% page number on top right.
+\def\HEADINGSsingle{
+%\pagealignmacro
+\global\pageno=1
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\thischapter\hfil\folio}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+}
+\def\HEADINGSon{\HEADINGSdouble}
+
+\def\HEADINGSafter{\let\HEADINGShook=\HEADINGSdoublex}
+\let\HEADINGSdoubleafter=\HEADINGSafter
+\def\HEADINGSdoublex{%
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\folio\hfil\thistitle}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+}
+
+\def\HEADINGSsingleafter{\let\HEADINGShook=\HEADINGSsinglex}
+\def\HEADINGSsinglex{%
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\thischapter\hfil\folio}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+}
+
+% Subroutines used in generating headings
+% Produces Day Month Year style of output.
+\def\today{\number\day\space
+\ifcase\month\or
+January\or February\or March\or April\or May\or June\or
+July\or August\or September\or October\or November\or December\fi
+\space\number\year}
+
+% Use this if you want the Month Day, Year style of output.
+%\def\today{\ifcase\month\or
+%January\or February\or March\or April\or May\or June\or
+%July\or August\or September\or October\or November\or December\fi
+%\space\number\day, \number\year}
+
+% @settitle line... specifies the title of the document, for headings
+% It generates no output of its own
+
+\def\thistitle{No Title}
+\def\settitle{\parsearg\settitlezzz}
+\def\settitlezzz #1{\gdef\thistitle{#1}}
+
+\message{tables,}
+
+% @tabs -- simple alignment
+
+% These don't work. For one thing, \+ is defined as outer.
+% So these macros cannot even be defined.
+
+%\def\tabs{\parsearg\tabszzz}
+%\def\tabszzz #1{\settabs\+#1\cr}
+%\def\tabline{\parsearg\tablinezzz}
+%\def\tablinezzz #1{\+#1\cr}
+%\def\&{&}
+
+% Tables -- @table, @ftable, @vtable, @item(x), @kitem(x), @xitem(x).
+
+% default indentation of table text
+\newdimen\tableindent \tableindent=.8in
+% default indentation of @itemize and @enumerate text
+\newdimen\itemindent \itemindent=.3in
+% margin between end of table item and start of table text.
+\newdimen\itemmargin \itemmargin=.1in
+
+% used internally for \itemindent minus \itemmargin
+\newdimen\itemmax
+
+% Note @table, @vtable, and @vtable define @item, @itemx, etc., with
+% these defs.
+% They also define \itemindex
+% to index the item name in whatever manner is desired (perhaps none).
+
+\newif\ifitemxneedsnegativevskip
+
+\def\itemxpar{\par\ifitemxneedsnegativevskip\vskip-\parskip\nobreak\fi}
+
+\def\internalBitem{\smallbreak \parsearg\itemzzz}
+\def\internalBitemx{\itemxpar \parsearg\itemzzz}
+
+\def\internalBxitem "#1"{\def\xitemsubtopix{#1} \smallbreak \parsearg\xitemzzz}
+\def\internalBxitemx "#1"{\def\xitemsubtopix{#1} \itemxpar \parsearg\xitemzzz}
+
+\def\internalBkitem{\smallbreak \parsearg\kitemzzz}
+\def\internalBkitemx{\itemxpar \parsearg\kitemzzz}
+
+\def\kitemzzz #1{\dosubind {kw}{\code{#1}}{for {\bf \lastfunction}}%
+ \itemzzz {#1}}
+
+\def\xitemzzz #1{\dosubind {kw}{\code{#1}}{for {\bf \xitemsubtopic}}%
+ \itemzzz {#1}}
+
+\def\itemzzz #1{\begingroup %
+ \advance\hsize by -\rightskip
+ \advance\hsize by -\tableindent
+ \setbox0=\hbox{\itemfont{#1}}%
+ \itemindex{#1}%
+ \nobreak % This prevents a break before @itemx.
+ %
+ % Be sure we are not still in the middle of a paragraph.
+ %{\parskip = 0in
+ %\par
+ %}%
+ %
+ % If the item text does not fit in the space we have, put it on a line
+ % by itself, and do not allow a page break either before or after that
+ % line. We do not start a paragraph here because then if the next
+ % command is, e.g., @kindex, the whatsit would get put into the
+ % horizontal list on a line by itself, resulting in extra blank space.
+ \ifdim \wd0>\itemmax
+ %
+ % Make this a paragraph so we get the \parskip glue and wrapping,
+ % but leave it ragged-right.
+ \begingroup
+ \advance\leftskip by-\tableindent
+ \advance\hsize by\tableindent
+ \advance\rightskip by0pt plus1fil
+ \leavevmode\unhbox0\par
+ \endgroup
+ %
+ % We're going to be starting a paragraph, but we don't want the
+ % \parskip glue -- logically it's part of the @item we just started.
+ \nobreak \vskip-\parskip
+ %
+ % Stop a page break at the \parskip glue coming up. Unfortunately
+ % we can't prevent a possible page break at the following
+ % \baselineskip glue.
+ \nobreak
+ \endgroup
+ \itemxneedsnegativevskipfalse
+ \else
+ % The item text fits into the space. Start a paragraph, so that the
+ % following text (if any) will end up on the same line. Since that
+ % text will be indented by \tableindent, we make the item text be in
+ % a zero-width box.
+ \noindent
+ \rlap{\hskip -\tableindent\box0}\ignorespaces%
+ \endgroup%
+ \itemxneedsnegativevskiptrue%
+ \fi
+}
+
+\def\item{\errmessage{@item while not in a table}}
+\def\itemx{\errmessage{@itemx while not in a table}}
+\def\kitem{\errmessage{@kitem while not in a table}}
+\def\kitemx{\errmessage{@kitemx while not in a table}}
+\def\xitem{\errmessage{@xitem while not in a table}}
+\def\xitemx{\errmessage{@xitemx while not in a table}}
+
+%% Contains a kludge to get @end[description] to work
+\def\description{\tablez{\dontindex}{1}{}{}{}{}}
+
+\def\table{\begingroup\inENV\obeylines\obeyspaces\tablex}
+{\obeylines\obeyspaces%
+\gdef\tablex #1^^M{%
+\tabley\dontindex#1 \endtabley}}
+
+\def\ftable{\begingroup\inENV\obeylines\obeyspaces\ftablex}
+{\obeylines\obeyspaces%
+\gdef\ftablex #1^^M{%
+\tabley\fnitemindex#1 \endtabley
+\def\Eftable{\endgraf\afterenvbreak\endgroup}%
+\let\Etable=\relax}}
+
+\def\vtable{\begingroup\inENV\obeylines\obeyspaces\vtablex}
+{\obeylines\obeyspaces%
+\gdef\vtablex #1^^M{%
+\tabley\vritemindex#1 \endtabley
+\def\Evtable{\endgraf\afterenvbreak\endgroup}%
+\let\Etable=\relax}}
+
+\def\dontindex #1{}
+\def\fnitemindex #1{\doind {fn}{\code{#1}}}%
+\def\vritemindex #1{\doind {vr}{\code{#1}}}%
+
+{\obeyspaces %
+\gdef\tabley#1#2 #3 #4 #5 #6 #7\endtabley{\endgroup%
+\tablez{#1}{#2}{#3}{#4}{#5}{#6}}}
+
+\def\tablez #1#2#3#4#5#6{%
+\aboveenvbreak %
+\begingroup %
+\def\Edescription{\Etable}% Neccessary kludge.
+\let\itemindex=#1%
+\ifnum 0#3>0 \advance \leftskip by #3\mil \fi %
+\ifnum 0#4>0 \tableindent=#4\mil \fi %
+\ifnum 0#5>0 \advance \rightskip by #5\mil \fi %
+\def\itemfont{#2}%
+\itemmax=\tableindent %
+\advance \itemmax by -\itemmargin %
+\advance \leftskip by \tableindent %
+\exdentamount=\tableindent
+\parindent = 0pt
+\parskip = \smallskipamount
+\ifdim \parskip=0pt \parskip=2pt \fi%
+\def\Etable{\endgraf\afterenvbreak\endgroup}%
+\let\item = \internalBitem %
+\let\itemx = \internalBitemx %
+\let\kitem = \internalBkitem %
+\let\kitemx = \internalBkitemx %
+\let\xitem = \internalBxitem %
+\let\xitemx = \internalBxitemx %
+}
+
+% This is the counter used by @enumerate, which is really @itemize
+
+\newcount \itemno
+
+\def\itemize{\parsearg\itemizezzz}
+
+\def\itemizezzz #1{%
+ \begingroup % ended by the @end itemsize
+ \itemizey {#1}{\Eitemize}
+}
+
+\def\itemizey #1#2{%
+\aboveenvbreak %
+\itemmax=\itemindent %
+\advance \itemmax by -\itemmargin %
+\advance \leftskip by \itemindent %
+\exdentamount=\itemindent
+\parindent = 0pt %
+\parskip = \smallskipamount %
+\ifdim \parskip=0pt \parskip=2pt \fi%
+\def#2{\endgraf\afterenvbreak\endgroup}%
+\def\itemcontents{#1}%
+\let\item=\itemizeitem}
+
+% Set sfcode to normal for the chars that usually have another value.
+% These are `.?!:;,'
+\def\frenchspacing{\sfcode46=1000 \sfcode63=1000 \sfcode33=1000
+ \sfcode58=1000 \sfcode59=1000 \sfcode44=1000 }
+
+% \splitoff TOKENS\endmark defines \first to be the first token in
+% TOKENS, and \rest to be the remainder.
+%
+\def\splitoff#1#2\endmark{\def\first{#1}\def\rest{#2}}%
+
+% Allow an optional argument of an uppercase letter, lowercase letter,
+% or number, to specify the first label in the enumerated list. No
+% argument is the same as `1'.
+%
+\def\enumerate{\parsearg\enumeratezzz}
+\def\enumeratezzz #1{\enumeratey #1 \endenumeratey}
+\def\enumeratey #1 #2\endenumeratey{%
+ \begingroup % ended by the @end enumerate
+ %
+ % If we were given no argument, pretend we were given `1'.
+ \def\thearg{#1}%
+ \ifx\thearg\empty \def\thearg{1}\fi
+ %
+ % Detect if the argument is a single token. If so, it might be a
+ % letter. Otherwise, the only valid thing it can be is a number.
+ % (We will always have one token, because of the test we just made.
+ % This is a good thing, since \splitoff doesn't work given nothing at
+ % all -- the first parameter is undelimited.)
+ \expandafter\splitoff\thearg\endmark
+ \ifx\rest\empty
+ % Only one token in the argument. It could still be anything.
+ % A ``lowercase letter'' is one whose \lccode is nonzero.
+ % An ``uppercase letter'' is one whose \lccode is both nonzero, and
+ % not equal to itself.
+ % Otherwise, we assume it's a number.
+ %
+ % We need the \relax at the end of the \ifnum lines to stop TeX from
+ % continuing to look for a <number>.
+ %
+ \ifnum\lccode\expandafter`\thearg=0\relax
+ \numericenumerate % a number (we hope)
+ \else
+ % It's a letter.
+ \ifnum\lccode\expandafter`\thearg=\expandafter`\thearg\relax
+ \lowercaseenumerate % lowercase letter
+ \else
+ \uppercaseenumerate % uppercase letter
+ \fi
+ \fi
+ \else
+ % Multiple tokens in the argument. We hope it's a number.
+ \numericenumerate
+ \fi
+}
+
+% An @enumerate whose labels are integers. The starting integer is
+% given in \thearg.
+%
+\def\numericenumerate{%
+ \itemno = \thearg
+ \startenumeration{\the\itemno}%
+}
+
+% The starting (lowercase) letter is in \thearg.
+\def\lowercaseenumerate{%
+ \itemno = \expandafter`\thearg
+ \startenumeration{%
+ % Be sure we're not beyond the end of the alphabet.
+ \ifnum\itemno=0
+ \errmessage{No more lowercase letters in @enumerate; get a bigger
+ alphabet}%
+ \fi
+ \char\lccode\itemno
+ }%
+}
+
+% The starting (uppercase) letter is in \thearg.
+\def\uppercaseenumerate{%
+ \itemno = \expandafter`\thearg
+ \startenumeration{%
+ % Be sure we're not beyond the end of the alphabet.
+ \ifnum\itemno=0
+ \errmessage{No more uppercase letters in @enumerate; get a bigger
+ alphabet}
+ \fi
+ \char\uccode\itemno
+ }%
+}
+
+% Call itemizey, adding a period to the first argument and supplying the
+% common last two arguments. Also subtract one from the initial value in
+% \itemno, since @item increments \itemno.
+%
+\def\startenumeration#1{%
+ \advance\itemno by -1
+ \itemizey{#1.}\Eenumerate\flushcr
+}
+
+% @alphaenumerate and @capsenumerate are abbreviations for giving an arg
+% to @enumerate.
+%
+\def\alphaenumerate{\enumerate{a}}
+\def\capsenumerate{\enumerate{A}}
+\def\Ealphaenumerate{\Eenumerate}
+\def\Ecapsenumerate{\Eenumerate}
+
+% Definition of @item while inside @itemize.
+
+\def\itemizeitem{%
+\advance\itemno by 1
+{\let\par=\endgraf \smallbreak}%
+\ifhmode \errmessage{\in hmode at itemizeitem}\fi
+{\parskip=0in \hskip 0pt
+\hbox to 0pt{\hss \itemcontents\hskip \itemmargin}%
+\vadjust{\penalty 1200}}%
+\flushcr}
+
+\message{indexing,}
+% Index generation facilities
+
+% Define \newwrite to be identical to plain tex's \newwrite
+% except not \outer, so it can be used within \newindex.
+{\catcode`\@=11
+\gdef\newwrite{\alloc@7\write\chardef\sixt@@n}}
+
+% \newindex {foo} defines an index named foo.
+% It automatically defines \fooindex such that
+% \fooindex ...rest of line... puts an entry in the index foo.
+% It also defines \fooindfile to be the number of the output channel for
+% the file that accumulates this index. The file's extension is foo.
+% The name of an index should be no more than 2 characters long
+% for the sake of vms.
+
+\def\newindex #1{
+\expandafter\newwrite \csname#1indfile\endcsname% Define number for output file
+\openout \csname#1indfile\endcsname \jobname.#1 % Open the file
+\expandafter\xdef\csname#1index\endcsname{% % Define \xxxindex
+\noexpand\doindex {#1}}
+}
+
+% @defindex foo == \newindex{foo}
+
+\def\defindex{\parsearg\newindex}
+
+% Define @defcodeindex, like @defindex except put all entries in @code.
+
+\def\newcodeindex #1{
+\expandafter\newwrite \csname#1indfile\endcsname% Define number for output file
+\openout \csname#1indfile\endcsname \jobname.#1 % Open the file
+\expandafter\xdef\csname#1index\endcsname{% % Define \xxxindex
+\noexpand\docodeindex {#1}}
+}
+
+\def\defcodeindex{\parsearg\newcodeindex}
+
+% @synindex foo bar makes index foo feed into index bar.
+% Do this instead of @defindex foo if you don't want it as a separate index.
+\def\synindex #1 #2 {%
+\expandafter\let\expandafter\synindexfoo\expandafter=\csname#2indfile\endcsname
+\expandafter\let\csname#1indfile\endcsname=\synindexfoo
+\expandafter\xdef\csname#1index\endcsname{% % Define \xxxindex
+\noexpand\doindex {#2}}%
+}
+
+% @syncodeindex foo bar similar, but put all entries made for index foo
+% inside @code.
+\def\syncodeindex #1 #2 {%
+\expandafter\let\expandafter\synindexfoo\expandafter=\csname#2indfile\endcsname
+\expandafter\let\csname#1indfile\endcsname=\synindexfoo
+\expandafter\xdef\csname#1index\endcsname{% % Define \xxxindex
+\noexpand\docodeindex {#2}}%
+}
+
+% Define \doindex, the driver for all \fooindex macros.
+% Argument #1 is generated by the calling \fooindex macro,
+% and it is "foo", the name of the index.
+
+% \doindex just uses \parsearg; it calls \doind for the actual work.
+% This is because \doind is more useful to call from other macros.
+
+% There is also \dosubind {index}{topic}{subtopic}
+% which makes an entry in a two-level index such as the operation index.
+
+\def\doindex#1{\edef\indexname{#1}\parsearg\singleindexer}
+\def\singleindexer #1{\doind{\indexname}{#1}}
+
+% like the previous two, but they put @code around the argument.
+\def\docodeindex#1{\edef\indexname{#1}\parsearg\singlecodeindexer}
+\def\singlecodeindexer #1{\doind{\indexname}{\code{#1}}}
+
+\def\indexdummies{%
+\def\_{{\realbackslash _}}%
+\def\w{\realbackslash w }%
+\def\bf{\realbackslash bf }%
+\def\rm{\realbackslash rm }%
+\def\sl{\realbackslash sl }%
+\def\sf{\realbackslash sf}%
+\def\tt{\realbackslash tt}%
+\def\gtr{\realbackslash gtr}%
+\def\less{\realbackslash less}%
+\def\hat{\realbackslash hat}%
+\def\char{\realbackslash char}%
+\def\TeX{\realbackslash TeX}%
+\def\dots{\realbackslash dots }%
+\def\copyright{\realbackslash copyright }%
+\def\tclose##1{\realbackslash tclose {##1}}%
+\def\code##1{\realbackslash code {##1}}%
+\def\samp##1{\realbackslash samp {##1}}%
+\def\t##1{\realbackslash r {##1}}%
+\def\r##1{\realbackslash r {##1}}%
+\def\i##1{\realbackslash i {##1}}%
+\def\b##1{\realbackslash b {##1}}%
+\def\cite##1{\realbackslash cite {##1}}%
+\def\key##1{\realbackslash key {##1}}%
+\def\file##1{\realbackslash file {##1}}%
+\def\var##1{\realbackslash var {##1}}%
+\def\kbd##1{\realbackslash kbd {##1}}%
+\def\dfn##1{\realbackslash dfn {##1}}%
+\def\emph##1{\realbackslash emph {##1}}%
+}
+
+% \indexnofonts no-ops all font-change commands.
+% This is used when outputting the strings to sort the index by.
+\def\indexdummyfont#1{#1}
+\def\indexdummytex{TeX}
+\def\indexdummydots{...}
+
+\def\indexnofonts{%
+\let\w=\indexdummyfont
+\let\t=\indexdummyfont
+\let\r=\indexdummyfont
+\let\i=\indexdummyfont
+\let\b=\indexdummyfont
+\let\emph=\indexdummyfont
+\let\strong=\indexdummyfont
+\let\cite=\indexdummyfont
+\let\sc=\indexdummyfont
+%Don't no-op \tt, since it isn't a user-level command
+% and is used in the definitions of the active chars like <, >, |...
+%\let\tt=\indexdummyfont
+\let\tclose=\indexdummyfont
+\let\code=\indexdummyfont
+\let\file=\indexdummyfont
+\let\samp=\indexdummyfont
+\let\kbd=\indexdummyfont
+\let\key=\indexdummyfont
+\let\var=\indexdummyfont
+\let\TeX=\indexdummytex
+\let\dots=\indexdummydots
+}
+
+% To define \realbackslash, we must make \ not be an escape.
+% We must first make another character (@) an escape
+% so we do not become unable to do a definition.
+
+{\catcode`\@=0 \catcode`\\=\other
+@gdef@realbackslash{\}}
+
+\let\indexbackslash=0 %overridden during \printindex.
+
+\def\doind #1#2{%
+{\count10=\lastpenalty %
+{\indexdummies % Must do this here, since \bf, etc expand at this stage
+\escapechar=`\\%
+{\let\folio=0% Expand all macros now EXCEPT \folio
+\def\rawbackslashxx{\indexbackslash}% \indexbackslash isn't defined now
+% so it will be output as is; and it will print as backslash in the indx.
+%
+% Now process the index-string once, with all font commands turned off,
+% to get the string to sort the index by.
+{\indexnofonts
+\xdef\temp1{#2}%
+}%
+% Now produce the complete index entry. We process the index-string again,
+% this time with font commands expanded, to get what to print in the index.
+\edef\temp{%
+\write \csname#1indfile\endcsname{%
+\realbackslash entry {\temp1}{\folio}{#2}}}%
+\temp }%
+}\penalty\count10}}
+
+\def\dosubind #1#2#3{%
+{\count10=\lastpenalty %
+{\indexdummies % Must do this here, since \bf, etc expand at this stage
+\escapechar=`\\%
+{\let\folio=0%
+\def\rawbackslashxx{\indexbackslash}%
+%
+% Now process the index-string once, with all font commands turned off,
+% to get the string to sort the index by.
+{\indexnofonts
+\xdef\temp1{#2 #3}%
+}%
+% Now produce the complete index entry. We process the index-string again,
+% this time with font commands expanded, to get what to print in the index.
+\edef\temp{%
+\write \csname#1indfile\endcsname{%
+\realbackslash entry {\temp1}{\folio}{#2}{#3}}}%
+\temp }%
+}\penalty\count10}}
+
+% The index entry written in the file actually looks like
+% \entry {sortstring}{page}{topic}
+% or
+% \entry {sortstring}{page}{topic}{subtopic}
+% The texindex program reads in these files and writes files
+% containing these kinds of lines:
+% \initial {c}
+% before the first topic whose initial is c
+% \entry {topic}{pagelist}
+% for a topic that is used without subtopics
+% \primary {topic}
+% for the beginning of a topic that is used with subtopics
+% \secondary {subtopic}{pagelist}
+% for each subtopic.
+
+% Define the user-accessible indexing commands
+% @findex, @vindex, @kindex, @cindex.
+
+\def\findex {\fnindex}
+\def\kindex {\kyindex}
+\def\cindex {\cpindex}
+\def\vindex {\vrindex}
+\def\tindex {\tpindex}
+\def\pindex {\pgindex}
+
+\def\cindexsub {\begingroup\obeylines\cindexsub}
+{\obeylines %
+\gdef\cindexsub "#1" #2^^M{\endgroup %
+\dosubind{cp}{#2}{#1}}}
+
+% Define the macros used in formatting output of the sorted index material.
+
+% This is what you call to cause a particular index to get printed.
+% Write
+% @unnumbered Function Index
+% @printindex fn
+
+\def\printindex{\parsearg\doprintindex}
+
+\def\doprintindex#1{%
+ \tex
+ \dobreak \chapheadingskip {10000}
+ \catcode`\%=\other\catcode`\&=\other\catcode`\#=\other
+ \catcode`\$=\other\catcode`\_=\other
+ \catcode`\~=\other
+ %
+ % The following don't help, since the chars were translated
+ % when the raw index was written, and their fonts were discarded
+ % due to \indexnofonts.
+ %\catcode`\"=\active
+ %\catcode`\^=\active
+ %\catcode`\_=\active
+ %\catcode`\|=\active
+ %\catcode`\<=\active
+ %\catcode`\>=\active
+ % %
+ \def\indexbackslash{\rawbackslashxx}
+ \indexfonts\rm \tolerance=9500 \advance\baselineskip -1pt
+ \begindoublecolumns
+ %
+ % See if the index file exists and is nonempty.
+ \openin 1 \jobname.#1s
+ \ifeof 1
+ % \enddoublecolumns gets confused if there is no text in the index,
+ % and it loses the chapter title and the aux file entries for the
+ % index. The easiest way to prevent this problem is to make sure
+ % there is some text.
+ (Index is nonexistent)
+ \else
+ %
+ % If the index file exists but is empty, then \openin leaves \ifeof
+ % false. We have to make TeX try to read something from the file, so
+ % it can discover if there is anything in it.
+ \read 1 to \temp
+ \ifeof 1
+ (Index is empty)
+ \else
+ \input \jobname.#1s
+ \fi
+ \fi
+ \closein 1
+ \enddoublecolumns
+ \Etex
+}
+
+% These macros are used by the sorted index file itself.
+% Change them to control the appearance of the index.
+
+% Same as \bigskipamount except no shrink.
+% \balancecolumns gets confused if there is any shrink.
+\newskip\initialskipamount \initialskipamount 12pt plus4pt
+
+\def\initial #1{%
+{\let\tentt=\sectt \let\tt=\sectt \let\sf=\sectt
+\ifdim\lastskip<\initialskipamount
+\removelastskip \penalty-200 \vskip \initialskipamount\fi
+\line{\secbf#1\hfill}\kern 2pt\penalty10000}}
+
+% This typesets a paragraph consisting of #1, dot leaders, and then #2
+% flush to the right margin. It is used for index and table of contents
+% entries. The paragraph is indented by \leftskip.
+%
+\def\entry #1#2{\begingroup
+ %
+ % Start a new paragraph if necessary, so our assignments below can't
+ % affect previous text.
+ \par
+ %
+ % Do not fill out the last line with white space.
+ \parfillskip = 0in
+ %
+ % No extra space above this paragraph.
+ \parskip = 0in
+ %
+ % Do not prefer a separate line ending with a hyphen to fewer lines.
+ \finalhyphendemerits = 0
+ %
+ % \hangindent is only relevant when the entry text and page number
+ % don't both fit on one line. In that case, bob suggests starting the
+ % dots pretty far over on the line. Unfortunately, a large
+ % indentation looks wrong when the entry text itself is broken across
+ % lines. So we use a small indentation and put up with long leaders.
+ %
+ % \hangafter is reset to 1 (which is the value we want) at the start
+ % of each paragraph, so we need not do anything with that.
+ \hangindent=2em
+ %
+ % When the entry text needs to be broken, just fill out the first line
+ % with blank space.
+ \rightskip = 0pt plus1fil
+ %
+ % Start a ``paragraph'' for the index entry so the line breaking
+ % parameters we've set above will have an effect.
+ \noindent
+ %
+ % Insert the text of the index entry. TeX will do line-breaking on it.
+ #1%
+ % If there are no page numbers, don't output a line of dots.
+ \def\tempa{#2}
+ \def\tempb{}
+ \ifx\tempa\tempb\ \else
+ %
+ % If we must, put the page number on a line of its own, and fill out
+ % this line with blank space. (The \hfil is overwhelmed with the
+ % fill leaders glue in \indexdotfill if the page number does fit.)
+ \hfil\penalty50
+ \null\nobreak\indexdotfill % Have leaders before the page number.
+ %
+ % The `\ ' here is removed by the implicit \unskip that TeX does as
+ % part of (the primitive) \par. Without it, a spurious underfull
+ % \hbox ensues.
+ \ #2% The page number ends the paragraph.
+ \fi%
+ \par
+\endgroup}
+
+% Like \dotfill except takes at least 1 em.
+\def\indexdotfill{\cleaders
+ \hbox{$\mathsurround=0pt \mkern1.5mu . \mkern1.5mu$}\hskip 1em plus 1fill}
+
+\def\primary #1{\line{#1\hfil}}
+
+\newskip\secondaryindent \secondaryindent=0.5cm
+
+\def\secondary #1#2{
+{\parfillskip=0in \parskip=0in
+\hangindent =1in \hangafter=1
+\noindent\hskip\secondaryindent\hbox{#1}\indexdotfill #2\par
+}}
+
+%% Define two-column mode, which is used in indexes.
+%% Adapted from the TeXbook, page 416.
+\catcode `\@=11
+
+\newbox\partialpage
+
+\newdimen\doublecolumnhsize
+
+\def\begindoublecolumns{\begingroup
+ % Grab any single-column material above us.
+ \output = {\global\setbox\partialpage
+ =\vbox{\unvbox255\kern -\topskip \kern \baselineskip}}%
+ \eject
+ %
+ % Now switch to the double-column output routine.
+ \output={\doublecolumnout}%
+ %
+ % Change the page size parameters. We could do this once outside this
+ % routine, in each of @smallbook, @afourpaper, and the default 8.5x11
+ % format, but then we repeat the same computation. Repeating a couple
+ % of assignments once per index is clearly meaningless for the
+ % execution time, so we may as well do it once.
+ %
+ % First we halve the line length, less a little for the gutter between
+ % the columns. We compute the gutter based on the line length, so it
+ % changes automatically with the paper format. The magic constant
+ % below is chosen so that the gutter has the same value (well, +- <
+ % 1pt) as it did when we hard-coded it.
+ %
+ % We put the result in a separate register, \doublecolumhsize, so we
+ % can restore it in \pagesofar, after \hsize itself has (potentially)
+ % been clobbered.
+ %
+ \doublecolumnhsize = \hsize
+ \advance\doublecolumnhsize by -.04154\hsize
+ \divide\doublecolumnhsize by 2
+ \hsize = \doublecolumnhsize
+ %
+ % Double the \vsize as well. (We don't need a separate register here,
+ % since nobody clobbers \vsize.)
+ \vsize = 2\vsize
+ \doublecolumnpagegoal
+}
+
+\def\enddoublecolumns{\eject \endgroup \pagegoal=\vsize \unvbox\partialpage}
+
+\def\doublecolumnsplit{\splittopskip=\topskip \splitmaxdepth=\maxdepth
+ \global\dimen@=\pageheight \global\advance\dimen@ by-\ht\partialpage
+ \global\setbox1=\vsplit255 to\dimen@ \global\setbox0=\vbox{\unvbox1}
+ \global\setbox3=\vsplit255 to\dimen@ \global\setbox2=\vbox{\unvbox3}
+ \ifdim\ht0>\dimen@ \setbox255=\vbox{\unvbox0\unvbox2} \global\setbox255=\copy5 \fi
+ \ifdim\ht2>\dimen@ \setbox255=\vbox{\unvbox0\unvbox2} \global\setbox255=\copy5 \fi
+}
+\def\doublecolumnpagegoal{%
+ \dimen@=\vsize \advance\dimen@ by-2\ht\partialpage \global\pagegoal=\dimen@
+}
+\def\pagesofar{\unvbox\partialpage %
+ \hsize=\doublecolumnhsize % have to restore this since output routine
+ \wd0=\hsize \wd2=\hsize \hbox to\pagewidth{\box0\hfil\box2}}
+\def\doublecolumnout{%
+ \setbox5=\copy255
+ {\vbadness=10000 \doublecolumnsplit}
+ \ifvbox255
+ \setbox0=\vtop to\dimen@{\unvbox0}
+ \setbox2=\vtop to\dimen@{\unvbox2}
+ \onepageout\pagesofar \unvbox255 \penalty\outputpenalty
+ \else
+ \setbox0=\vbox{\unvbox5}
+ \ifvbox0
+ \dimen@=\ht0 \advance\dimen@ by\topskip \advance\dimen@ by-\baselineskip
+ \divide\dimen@ by2 \splittopskip=\topskip \splitmaxdepth=\maxdepth
+ {\vbadness=10000
+ \loop \global\setbox5=\copy0
+ \setbox1=\vsplit5 to\dimen@
+ \setbox3=\vsplit5 to\dimen@
+ \ifvbox5 \global\advance\dimen@ by1pt \repeat
+ \setbox0=\vbox to\dimen@{\unvbox1}
+ \setbox2=\vbox to\dimen@{\unvbox3}
+ \global\setbox\partialpage=\vbox{\pagesofar}
+ \doublecolumnpagegoal
+ }
+ \fi
+ \fi
+}
+
+\catcode `\@=\other
+\message{sectioning,}
+% Define chapters, sections, etc.
+
+\newcount \chapno
+\newcount \secno \secno=0
+\newcount \subsecno \subsecno=0
+\newcount \subsubsecno \subsubsecno=0
+
+% This counter is funny since it counts through charcodes of letters A, B, ...
+\newcount \appendixno \appendixno = `\@
+\def\appendixletter{\char\the\appendixno}
+
+\newwrite \contentsfile
+% This is called from \setfilename.
+\def\opencontents{\openout \contentsfile = \jobname.toc}
+
+% Each @chapter defines this as the name of the chapter.
+% page headings and footings can use it. @section does likewise
+
+\def\thischapter{} \def\thissection{}
+\def\seccheck#1{\if \pageno<0 %
+\errmessage{@#1 not allowed after generating table of contents}\fi
+%
+}
+
+\def\chapternofonts{%
+\let\rawbackslash=\relax%
+\let\frenchspacing=\relax%
+\def\result{\realbackslash result}
+\def\equiv{\realbackslash equiv}
+\def\expansion{\realbackslash expansion}
+\def\print{\realbackslash print}
+\def\TeX{\realbackslash TeX}
+\def\dots{\realbackslash dots}
+\def\copyright{\realbackslash copyright}
+\def\tt{\realbackslash tt}
+\def\bf{\realbackslash bf }
+\def\w{\realbackslash w}
+\def\less{\realbackslash less}
+\def\gtr{\realbackslash gtr}
+\def\hat{\realbackslash hat}
+\def\char{\realbackslash char}
+\def\tclose##1{\realbackslash tclose {##1}}
+\def\code##1{\realbackslash code {##1}}
+\def\samp##1{\realbackslash samp {##1}}
+\def\r##1{\realbackslash r {##1}}
+\def\b##1{\realbackslash b {##1}}
+\def\key##1{\realbackslash key {##1}}
+\def\file##1{\realbackslash file {##1}}
+\def\kbd##1{\realbackslash kbd {##1}}
+% These are redefined because @smartitalic wouldn't work inside xdef.
+\def\i##1{\realbackslash i {##1}}
+\def\cite##1{\realbackslash cite {##1}}
+\def\var##1{\realbackslash var {##1}}
+\def\emph##1{\realbackslash emph {##1}}
+\def\dfn##1{\realbackslash dfn {##1}}
+}
+
+\newcount\absseclevel % used to calculate proper heading level
+\newcount\secbase\secbase=0 % @raise/lowersections modify this count
+
+% @raisesections: treat @section as chapter, @subsection as section, etc.
+\def\raisesections{\global\advance\secbase by -1}
+\let\up=\raisesections % original BFox name
+
+% @lowersections: treat @chapter as section, @section as subsection, etc.
+\def\lowersections{\global\advance\secbase by 1}
+\let\down=\lowersections % original BFox name
+
+% Choose a numbered-heading macro
+% #1 is heading level if unmodified by @raisesections or @lowersections
+% #2 is text for heading
+\def\numhead#1#2{\absseclevel=\secbase\advance\absseclevel by #1
+\ifcase\absseclevel
+ \chapterzzz{#2}
+\or
+ \seczzz{#2}
+\or
+ \numberedsubseczzz{#2}
+\or
+ \numberedsubsubseczzz{#2}
+\else
+ \ifnum \absseclevel<0
+ \chapterzzz{#2}
+ \else
+ \numberedsubsubseczzz{#2}
+ \fi
+\fi
+}
+
+% like \numhead, but chooses appendix heading levels
+\def\apphead#1#2{\absseclevel=\secbase\advance\absseclevel by #1
+\ifcase\absseclevel
+ \appendixzzz{#2}
+\or
+ \appendixsectionzzz{#2}
+\or
+ \appendixsubseczzz{#2}
+\or
+ \appendixsubsubseczzz{#2}
+\else
+ \ifnum \absseclevel<0
+ \appendixzzz{#2}
+ \else
+ \appendixsubsubseczzz{#2}
+ \fi
+\fi
+}
+
+% like \numhead, but chooses numberless heading levels
+\def\unnmhead#1#2{\absseclevel=\secbase\advance\absseclevel by #1
+\ifcase\absseclevel
+ \unnumberedzzz{#2}
+\or
+ \unnumberedseczzz{#2}
+\or
+ \unnumberedsubseczzz{#2}
+\or
+ \unnumberedsubsubseczzz{#2}
+\else
+ \ifnum \absseclevel<0
+ \unnumberedzzz{#2}
+ \else
+ \unnumberedsubsubseczzz{#2}
+ \fi
+\fi
+}
+
+
+\def\thischaptername{No Chapter Title}
+\outer\def\chapter{\parsearg\chapteryyy}
+\def\chapteryyy #1{\numhead0{#1}} % normally numhead0 calls chapterzzz
+\def\chapterzzz #1{\seccheck{chapter}%
+\secno=0 \subsecno=0 \subsubsecno=0
+\global\advance \chapno by 1 \message{Chapter \the\chapno}%
+\chapmacro {#1}{\the\chapno}%
+\gdef\thissection{#1}%
+\gdef\thischaptername{#1}%
+% We don't substitute the actual chapter name into \thischapter
+% because we don't want its macros evaluated now.
+\xdef\thischapter{Chapter \the\chapno: \noexpand\thischaptername}%
+{\chapternofonts%
+\edef\temp{{\realbackslash chapentry {#1}{\the\chapno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\donoderef %
+\global\let\section = \numberedsec
+\global\let\subsection = \numberedsubsec
+\global\let\subsubsection = \numberedsubsubsec
+}}
+
+\outer\def\appendix{\parsearg\appendixyyy}
+\def\appendixyyy #1{\apphead0{#1}} % normally apphead0 calls appendixzzz
+\def\appendixzzz #1{\seccheck{appendix}%
+\secno=0 \subsecno=0 \subsubsecno=0
+\global\advance \appendixno by 1 \message{Appendix \appendixletter}%
+\chapmacro {#1}{Appendix \appendixletter}%
+\gdef\thissection{#1}%
+\gdef\thischaptername{#1}%
+\xdef\thischapter{Appendix \appendixletter: \noexpand\thischaptername}%
+{\chapternofonts%
+\edef\temp{{\realbackslash chapentry
+ {#1}{Appendix \appendixletter}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\appendixnoderef %
+\global\let\section = \appendixsec
+\global\let\subsection = \appendixsubsec
+\global\let\subsubsection = \appendixsubsubsec
+}}
+
+\outer\def\top{\parsearg\unnumberedyyy}
+\outer\def\unnumbered{\parsearg\unnumberedyyy}
+\def\unnumberedyyy #1{\unnmhead0{#1}} % normally unnmhead0 calls unnumberedzzz
+\def\unnumberedzzz #1{\seccheck{unnumbered}%
+\secno=0 \subsecno=0 \subsubsecno=0
+%
+% This used to be simply \message{#1}, but TeX fully expands the
+% argument to \message. Therefore, if #1 contained @-commands, TeX
+% expanded them. For example, in `@unnumbered The @cite{Book}', TeX
+% expanded @cite (which turns out to cause errors because \cite is meant
+% to be executed, not expanded).
+%
+% Anyway, we don't want the fully-expanded definition of @cite to appear
+% as a result of the \message, we just want `@cite' itself. We use
+% \the<toks register> to achieve this: TeX expands \the<toks> only once,
+% simply yielding the contents of the <toks register>.
+\toks0 = {#1}\message{(\the\toks0)}%
+%
+\unnumbchapmacro {#1}%
+\gdef\thischapter{#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\edef\temp{{\realbackslash unnumbchapentry {#1}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\unnumbnoderef %
+\global\let\section = \unnumberedsec
+\global\let\subsection = \unnumberedsubsec
+\global\let\subsubsection = \unnumberedsubsubsec
+}}
+
+\outer\def\numberedsec{\parsearg\secyyy}
+\def\secyyy #1{\numhead1{#1}} % normally calls seczzz
+\def\seczzz #1{\seccheck{section}%
+\subsecno=0 \subsubsecno=0 \global\advance \secno by 1 %
+\gdef\thissection{#1}\secheading {#1}{\the\chapno}{\the\secno}%
+{\chapternofonts%
+\edef\temp{{\realbackslash secentry %
+{#1}{\the\chapno}{\the\secno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\donoderef %
+\penalty 10000 %
+}}
+
+\outer\def\appenixsection{\parsearg\appendixsecyyy}
+\outer\def\appendixsec{\parsearg\appendixsecyyy}
+\def\appendixsecyyy #1{\apphead1{#1}} % normally calls appendixsectionzzz
+\def\appendixsectionzzz #1{\seccheck{appendixsection}%
+\subsecno=0 \subsubsecno=0 \global\advance \secno by 1 %
+\gdef\thissection{#1}\secheading {#1}{\appendixletter}{\the\secno}%
+{\chapternofonts%
+\edef\temp{{\realbackslash secentry %
+{#1}{\appendixletter}{\the\secno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\appendixnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\unnumberedsec{\parsearg\unnumberedsecyyy}
+\def\unnumberedsecyyy #1{\unnmhead1{#1}} % normally calls unnumberedseczzz
+\def\unnumberedseczzz #1{\seccheck{unnumberedsec}%
+\plainsecheading {#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\edef\temp{{\realbackslash unnumbsecentry{#1}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\unnumbnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\numberedsubsec{\parsearg\numberedsubsecyyy}
+\def\numberedsubsecyyy #1{\numhead2{#1}} % normally calls numberedsubseczzz
+\def\numberedsubseczzz #1{\seccheck{subsection}%
+\gdef\thissection{#1}\subsubsecno=0 \global\advance \subsecno by 1 %
+\subsecheading {#1}{\the\chapno}{\the\secno}{\the\subsecno}%
+{\chapternofonts%
+\edef\temp{{\realbackslash subsecentry %
+{#1}{\the\chapno}{\the\secno}{\the\subsecno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\donoderef %
+\penalty 10000 %
+}}
+
+\outer\def\appendixsubsec{\parsearg\appendixsubsecyyy}
+\def\appendixsubsecyyy #1{\apphead2{#1}} % normally calls appendixsubseczzz
+\def\appendixsubseczzz #1{\seccheck{appendixsubsec}%
+\gdef\thissection{#1}\subsubsecno=0 \global\advance \subsecno by 1 %
+\subsecheading {#1}{\appendixletter}{\the\secno}{\the\subsecno}%
+{\chapternofonts%
+\edef\temp{{\realbackslash subsecentry %
+{#1}{\appendixletter}{\the\secno}{\the\subsecno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\appendixnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\unnumberedsubsec{\parsearg\unnumberedsubsecyyy}
+\def\unnumberedsubsecyyy #1{\unnmhead2{#1}} %normally calls unnumberedsubseczzz
+\def\unnumberedsubseczzz #1{\seccheck{unnumberedsubsec}%
+\plainsecheading {#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\edef\temp{{\realbackslash unnumbsubsecentry{#1}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\unnumbnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\numberedsubsubsec{\parsearg\numberedsubsubsecyyy}
+\def\numberedsubsubsecyyy #1{\numhead3{#1}} % normally numberedsubsubseczzz
+\def\numberedsubsubseczzz #1{\seccheck{subsubsection}%
+\gdef\thissection{#1}\global\advance \subsubsecno by 1 %
+\subsubsecheading {#1}
+ {\the\chapno}{\the\secno}{\the\subsecno}{\the\subsubsecno}%
+{\chapternofonts%
+\edef\temp{{\realbackslash subsubsecentry %
+ {#1}
+ {\the\chapno}{\the\secno}{\the\subsecno}{\the\subsubsecno}
+ {\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\donoderef %
+\penalty 10000 %
+}}
+
+\outer\def\appendixsubsubsec{\parsearg\appendixsubsubsecyyy}
+\def\appendixsubsubsecyyy #1{\apphead3{#1}} % normally appendixsubsubseczzz
+\def\appendixsubsubseczzz #1{\seccheck{appendixsubsubsec}%
+\gdef\thissection{#1}\global\advance \subsubsecno by 1 %
+\subsubsecheading {#1}
+ {\appendixletter}{\the\secno}{\the\subsecno}{\the\subsubsecno}%
+{\chapternofonts%
+\edef\temp{{\realbackslash subsubsecentry{#1}%
+ {\appendixletter}
+ {\the\secno}{\the\subsecno}{\the\subsubsecno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\appendixnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\unnumberedsubsubsec{\parsearg\unnumberedsubsubsecyyy}
+\def\unnumberedsubsubsecyyy #1{\unnmhead3{#1}} %normally unnumberedsubsubseczzz
+\def\unnumberedsubsubseczzz #1{\seccheck{unnumberedsubsubsec}%
+\plainsecheading {#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\edef\temp{{\realbackslash unnumbsubsubsecentry{#1}{\noexpand\folio}}}%
+\escapechar=`\\%
+\write \contentsfile \temp %
+\unnumbnoderef %
+\penalty 10000 %
+}}
+
+% These are variants which are not "outer", so they can appear in @ifinfo.
+% Actually, they should now be obsolete; ordinary section commands should work.
+\def\infotop{\parsearg\unnumberedzzz}
+\def\infounnumbered{\parsearg\unnumberedzzz}
+\def\infounnumberedsec{\parsearg\unnumberedseczzz}
+\def\infounnumberedsubsec{\parsearg\unnumberedsubseczzz}
+\def\infounnumberedsubsubsec{\parsearg\unnumberedsubsubseczzz}
+
+\def\infoappendix{\parsearg\appendixzzz}
+\def\infoappendixsec{\parsearg\appendixseczzz}
+\def\infoappendixsubsec{\parsearg\appendixsubseczzz}
+\def\infoappendixsubsubsec{\parsearg\appendixsubsubseczzz}
+
+\def\infochapter{\parsearg\chapterzzz}
+\def\infosection{\parsearg\sectionzzz}
+\def\infosubsection{\parsearg\subsectionzzz}
+\def\infosubsubsection{\parsearg\subsubsectionzzz}
+
+% These macros control what the section commands do, according
+% to what kind of chapter we are in (ordinary, appendix, or unnumbered).
+% Define them by default for a numbered chapter.
+\global\let\section = \numberedsec
+\global\let\subsection = \numberedsubsec
+\global\let\subsubsection = \numberedsubsubsec
+
+% Define @majorheading, @heading and @subheading
+
+% NOTE on use of \vbox for chapter headings, section headings, and
+% such:
+% 1) We use \vbox rather than the earlier \line to permit
+% overlong headings to fold.
+% 2) \hyphenpenalty is set to 10000 because hyphenation in a
+% heading is obnoxious; this forbids it.
+% 3) Likewise, headings look best if no \parindent is used, and
+% if justification is not attempted. Hence \raggedright.
+
+
+\def\majorheading{\parsearg\majorheadingzzz}
+\def\majorheadingzzz #1{%
+{\advance\chapheadingskip by 10pt \chapbreak }%
+{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 200}
+
+\def\chapheading{\parsearg\chapheadingzzz}
+\def\chapheadingzzz #1{\chapbreak %
+{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 200}
+
+\def\heading{\parsearg\secheadingi}
+
+\def\subheading{\parsearg\subsecheadingi}
+
+\def\subsubheading{\parsearg\subsubsecheadingi}
+
+% These macros generate a chapter, section, etc. heading only
+% (including whitespace, linebreaking, etc. around it),
+% given all the information in convenient, parsed form.
+
+%%% Args are the skip and penalty (usually negative)
+\def\dobreak#1#2{\par\ifdim\lastskip<#1\removelastskip\penalty#2\vskip#1\fi}
+
+\def\setchapterstyle #1 {\csname CHAPF#1\endcsname}
+
+%%% Define plain chapter starts, and page on/off switching for it
+% Parameter controlling skip before chapter headings (if needed)
+
+\newskip \chapheadingskip \chapheadingskip = 30pt plus 8pt minus 4pt
+
+\def\chapbreak{\dobreak \chapheadingskip {-4000}}
+\def\chappager{\par\vfill\supereject}
+\def\chapoddpage{\chappager \ifodd\pageno \else \hbox to 0pt{} \chappager\fi}
+
+\def\setchapternewpage #1 {\csname CHAPPAG#1\endcsname}
+
+\def\CHAPPAGoff{
+\global\let\pchapsepmacro=\chapbreak
+\global\let\pagealignmacro=\chappager}
+
+\def\CHAPPAGon{
+\global\let\pchapsepmacro=\chappager
+\global\let\pagealignmacro=\chappager
+\global\def\HEADINGSon{\HEADINGSsingle}}
+
+\def\CHAPPAGodd{
+\global\let\pchapsepmacro=\chapoddpage
+\global\let\pagealignmacro=\chapoddpage
+\global\def\HEADINGSon{\HEADINGSdouble}}
+
+\CHAPPAGon
+
+\def\CHAPFplain{
+\global\let\chapmacro=\chfplain
+\global\let\unnumbchapmacro=\unnchfplain}
+
+\def\chfplain #1#2{%
+ \pchapsepmacro
+ {%
+ \chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #2\enspace #1}%
+ }%
+ \bigskip
+ \penalty5000
+}
+
+\def\unnchfplain #1{%
+\pchapsepmacro %
+{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 10000 %
+}
+\CHAPFplain % The default
+
+\def\unnchfopen #1{%
+\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 10000 %
+}
+
+\def\chfopen #1#2{\chapoddpage {\chapfonts
+\vbox to 3in{\vfil \hbox to\hsize{\hfil #2} \hbox to\hsize{\hfil #1} \vfil}}%
+\par\penalty 5000 %
+}
+
+\def\CHAPFopen{
+\global\let\chapmacro=\chfopen
+\global\let\unnumbchapmacro=\unnchfopen}
+
+% Parameter controlling skip before section headings.
+
+\newskip \subsecheadingskip \subsecheadingskip = 17pt plus 8pt minus 4pt
+\def\subsecheadingbreak{\dobreak \subsecheadingskip {-500}}
+
+\newskip \secheadingskip \secheadingskip = 21pt plus 8pt minus 4pt
+\def\secheadingbreak{\dobreak \secheadingskip {-1000}}
+
+% @paragraphindent is defined for the Info formatting commands only.
+\let\paragraphindent=\comment
+
+% Section fonts are the base font at magstep2, which produces
+% a size a bit more than 14 points in the default situation.
+
+\def\secheading #1#2#3{\secheadingi {#2.#3\enspace #1}}
+\def\plainsecheading #1{\secheadingi {#1}}
+\def\secheadingi #1{{\advance \secheadingskip by \parskip %
+\secheadingbreak}%
+{\secfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}%
+\ifdim \parskip<10pt \kern 10pt\kern -\parskip\fi \penalty 10000 }
+
+
+% Subsection fonts are the base font at magstep1,
+% which produces a size of 12 points.
+
+\def\subsecheading #1#2#3#4{\subsecheadingi {#2.#3.#4\enspace #1}}
+\def\subsecheadingi #1{{\advance \subsecheadingskip by \parskip %
+\subsecheadingbreak}%
+{\subsecfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}%
+\ifdim \parskip<10pt \kern 10pt\kern -\parskip\fi \penalty 10000 }
+
+\def\subsubsecfonts{\subsecfonts} % Maybe this should change:
+ % Perhaps make sssec fonts scaled
+ % magstep half
+\def\subsubsecheading #1#2#3#4#5{\subsubsecheadingi {#2.#3.#4.#5\enspace #1}}
+\def\subsubsecheadingi #1{{\advance \subsecheadingskip by \parskip %
+\subsecheadingbreak}%
+{\subsubsecfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}%
+\ifdim \parskip<10pt \kern 10pt\kern -\parskip\fi \penalty 10000}
+
+
+\message{toc printing,}
+
+% Finish up the main text and prepare to read what we've written
+% to \contentsfile.
+
+\newskip\contentsrightmargin \contentsrightmargin=1in
+\def\startcontents#1{%
+ \pagealignmacro
+ \immediate\closeout \contentsfile
+ \ifnum \pageno>0
+ \pageno = -1 % Request roman numbered pages.
+ \fi
+ % Don't need to put `Contents' or `Short Contents' in the headline.
+ % It is abundantly clear what they are.
+ \unnumbchapmacro{#1}\def\thischapter{}%
+ \begingroup % Set up to handle contents files properly.
+ \catcode`\\=0 \catcode`\{=1 \catcode`\}=2 \catcode`\@=11
+ \raggedbottom % Worry more about breakpoints than the bottom.
+ \advance\hsize by -\contentsrightmargin % Don't use the full line length.
+}
+
+
+% Normal (long) toc.
+\outer\def\contents{%
+ \startcontents{Table of Contents}%
+ \input \jobname.toc
+ \endgroup
+ \vfill \eject
+}
+
+% And just the chapters.
+\outer\def\summarycontents{%
+ \startcontents{Short Contents}%
+ %
+ \let\chapentry = \shortchapentry
+ \let\unnumbchapentry = \shortunnumberedentry
+ % We want a true roman here for the page numbers.
+ \secfonts
+ \let\rm=\shortcontrm \let\bf=\shortcontbf \let\sl=\shortcontsl
+ \rm
+ \advance\baselineskip by 1pt % Open it up a little.
+ \def\secentry ##1##2##3##4{}
+ \def\unnumbsecentry ##1##2{}
+ \def\subsecentry ##1##2##3##4##5{}
+ \def\unnumbsubsecentry ##1##2{}
+ \def\subsubsecentry ##1##2##3##4##5##6{}
+ \def\unnumbsubsubsecentry ##1##2{}
+ \input \jobname.toc
+ \endgroup
+ \vfill \eject
+}
+\let\shortcontents = \summarycontents
+
+% These macros generate individual entries in the table of contents.
+% The first argument is the chapter or section name.
+% The last argument is the page number.
+% The arguments in between are the chapter number, section number, ...
+
+% Chapter-level things, for both the long and short contents.
+\def\chapentry#1#2#3{\dochapentry{#2\labelspace#1}{#3}}
+
+% See comments in \dochapentry re vbox and related settings
+\def\shortchapentry#1#2#3{%
+ \tocentry{\shortchaplabel{#2}\labelspace #1}{\doshortpageno{#3}}%
+}
+
+% Typeset the label for a chapter or appendix for the short contents.
+% The arg is, e.g. `Appendix A' for an appendix, or `3' for a chapter.
+% We could simplify the code here by writing out an \appendixentry
+% command in the toc file for appendices, instead of using \chapentry
+% for both, but it doesn't seem worth it.
+\setbox0 = \hbox{\shortcontrm Appendix }
+\newdimen\shortappendixwidth \shortappendixwidth = \wd0
+
+\def\shortchaplabel#1{%
+ % We typeset #1 in a box of constant width, regardless of the text of
+ % #1, so the chapter titles will come out aligned.
+ \setbox0 = \hbox{#1}%
+ \dimen0 = \ifdim\wd0 > \shortappendixwidth \shortappendixwidth \else 0pt \fi
+ %
+ % This space should be plenty, since a single number is .5em, and the
+ % widest letter (M) is 1em, at least in the Computer Modern fonts.
+ % (This space doesn't include the extra space that gets added after
+ % the label; that gets put in in \shortchapentry above.)
+ \advance\dimen0 by 1.1em
+ \hbox to \dimen0{#1\hfil}%
+}
+
+\def\unnumbchapentry#1#2{\dochapentry{#1}{#2}}
+\def\shortunnumberedentry#1#2{\tocentry{#1}{\doshortpageno{#2}}}
+
+% Sections.
+\def\secentry#1#2#3#4{\dosecentry{#2.#3\labelspace#1}{#4}}
+\def\unnumbsecentry#1#2{\dosecentry{#1}{#2}}
+
+% Subsections.
+\def\subsecentry#1#2#3#4#5{\dosubsecentry{#2.#3.#4\labelspace#1}{#5}}
+\def\unnumbsubsecentry#1#2{\dosubsecentry{#1}{#2}}
+
+% And subsubsections.
+\def\subsubsecentry#1#2#3#4#5#6{%
+ \dosubsubsecentry{#2.#3.#4.#5\labelspace#1}{#6}}
+\def\unnumbsubsubsecentry#1#2{\dosubsubsecentry{#1}{#2}}
+
+
+% This parameter controls the indentation of the various levels.
+\newdimen\tocindent \tocindent = 3pc
+
+% Now for the actual typesetting. In all these, #1 is the text and #2 is the
+% page number.
+%
+% If the toc has to be broken over pages, we would want to be at chapters
+% if at all possible; hence the \penalty.
+\def\dochapentry#1#2{%
+ \penalty-300 \vskip\baselineskip
+ \begingroup
+ \chapentryfonts
+ \tocentry{#1}{\dopageno{#2}}%
+ \endgroup
+ \nobreak\vskip .25\baselineskip
+}
+
+\def\dosecentry#1#2{\begingroup
+ \secentryfonts \leftskip=\tocindent
+ \tocentry{#1}{\dopageno{#2}}%
+\endgroup}
+
+\def\dosubsecentry#1#2{\begingroup
+ \subsecentryfonts \leftskip=2\tocindent
+ \tocentry{#1}{\dopageno{#2}}%
+\endgroup}
+
+\def\dosubsubsecentry#1#2{\begingroup
+ \subsubsecentryfonts \leftskip=3\tocindent
+ \tocentry{#1}{\dopageno{#2}}%
+\endgroup}
+
+% Final typesetting of a toc entry; we use the same \entry macro as for
+% the index entries, but we want to suppress hyphenation here. (We
+% can't do that in the \entry macro, since index entries might consist
+% of hyphenated-identifiers-that-do-not-fit-on-a-line-and-nothing-else.)
+%
+\def\tocentry#1#2{\begingroup
+ \hyphenpenalty = 10000
+ \entry{#1}{#2}%
+\endgroup}
+
+% Space between chapter (or whatever) number and the title.
+\def\labelspace{\hskip1em \relax}
+
+\def\dopageno#1{{\rm #1}}
+\def\doshortpageno#1{{\rm #1}}
+
+\def\chapentryfonts{\secfonts \rm}
+\def\secentryfonts{\textfonts}
+\let\subsecentryfonts = \textfonts
+\let\subsubsecentryfonts = \textfonts
+
+
+\message{environments,}
+
+% Since these characters are used in examples, it should be an even number of
+% \tt widths. Each \tt character is 1en, so two makes it 1em.
+% Furthermore, these definitions must come after we define our fonts.
+\newbox\dblarrowbox \newbox\longdblarrowbox
+\newbox\pushcharbox \newbox\bullbox
+\newbox\equivbox \newbox\errorbox
+
+\let\ptexequiv = \equiv
+
+%{\tentt
+%\global\setbox\dblarrowbox = \hbox to 1em{\hfil$\Rightarrow$\hfil}
+%\global\setbox\longdblarrowbox = \hbox to 1em{\hfil$\mapsto$\hfil}
+%\global\setbox\pushcharbox = \hbox to 1em{\hfil$\dashv$\hfil}
+%\global\setbox\equivbox = \hbox to 1em{\hfil$\ptexequiv$\hfil}
+% Adapted from the manmac format (p.420 of TeXbook)
+%\global\setbox\bullbox = \hbox to 1em{\kern.15em\vrule height .75ex width .85ex
+% depth .1ex\hfil}
+%}
+
+\def\point{$\star$}
+
+\def\result{\leavevmode\raise.15ex\hbox to 1em{\hfil$\Rightarrow$\hfil}}
+\def\expansion{\leavevmode\raise.1ex\hbox to 1em{\hfil$\mapsto$\hfil}}
+\def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}}
+
+\def\equiv{\leavevmode\lower.1ex\hbox to 1em{\hfil$\ptexequiv$\hfil}}
+
+% Adapted from the TeXbook's \boxit.
+{\tentt \global\dimen0 = 3em}% Width of the box.
+\dimen2 = .55pt % Thickness of rules
+% The text. (`r' is open on the right, `e' somewhat less so on the left.)
+\setbox0 = \hbox{\kern-.75pt \tensf error\kern-1.5pt}
+
+\global\setbox\errorbox=\hbox to \dimen0{\hfil
+ \hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right.
+ \advance\hsize by -2\dimen2 % Rules.
+ \vbox{
+ \hrule height\dimen2
+ \hbox{\vrule width\dimen2 \kern3pt % Space to left of text.
+ \vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below.
+ \kern3pt\vrule width\dimen2}% Space to right.
+ \hrule height\dimen2}
+ \hfil}
+
+% The @error{} command.
+\def\error{\leavevmode\lower.7ex\copy\errorbox}
+
+% @tex ... @end tex escapes into raw Tex temporarily.
+% One exception: @ is still an escape character, so that @end tex works.
+% But \@ or @@ will get a plain tex @ character.
+
+\def\tex{\begingroup
+\catcode `\\=0 \catcode `\{=1 \catcode `\}=2
+\catcode `\$=3 \catcode `\&=4 \catcode `\#=6
+\catcode `\^=7 \catcode `\_=8 \catcode `\~=13 \let~=\tie
+\catcode `\%=14
+\catcode 43=12
+\catcode`\"=12
+\catcode`\==12
+\catcode`\|=12
+\catcode`\<=12
+\catcode`\>=12
+\escapechar=`\\
+%
+\let\{=\ptexlbrace
+\let\}=\ptexrbrace
+\let\.=\ptexdot
+\let\*=\ptexstar
+\let\dots=\ptexdots
+\def\@{@}%
+\let\bullet=\ptexbullet
+\let\b=\ptexb \let\c=\ptexc \let\i=\ptexi \let\t=\ptext \let\l=\ptexl
+\let\L=\ptexL
+%
+\let\Etex=\endgroup}
+
+% Define @lisp ... @endlisp.
+% @lisp does a \begingroup so it can rebind things,
+% including the definition of @endlisp (which normally is erroneous).
+
+% Amount to narrow the margins by for @lisp.
+\newskip\lispnarrowing \lispnarrowing=0.4in
+
+% This is the definition that ^^M gets inside @lisp, @example, and other
+% such environments. \null is better than a space, since it doesn't
+% have any width.
+\def\lisppar{\null\endgraf}
+
+% Make each space character in the input produce a normal interword
+% space in the output. Don't allow a line break at this space, as this
+% is used only in environments like @example, where each line of input
+% should produce a line of output anyway.
+%
+{\obeyspaces %
+\gdef\sepspaces{\obeyspaces\let =\tie}}
+
+% Define \obeyedspace to be our active space, whatever it is. This is
+% for use in \parsearg.
+{\sepspaces %
+\global\let\obeyedspace= }
+
+% This space is always present above and below environments.
+\newskip\envskipamount \envskipamount = 0pt
+
+% Make spacing and below environment symmetrical. We use \parskip here
+% to help in doing that, since in @example-like environments \parskip
+% is reset to zero; thus the \afterenvbreak inserts no space -- but the
+% start of the next paragraph will insert \parskip
+%
+\def\aboveenvbreak{{\advance\envskipamount by \parskip
+\endgraf \ifdim\lastskip<\envskipamount
+\removelastskip \penalty-50 \vskip\envskipamount \fi}}
+
+\let\afterenvbreak = \aboveenvbreak
+
+% \nonarrowing is a flag. If "set", @lisp etc don't narrow margins.
+\let\nonarrowing=\relax
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% \cartouche: draw rectangle w/rounded corners around argument
+\font\circle=lcircle10
+\newdimen\circthick
+\newdimen\cartouter\newdimen\cartinner
+\newskip\normbskip\newskip\normpskip\newskip\normlskip
+\circthick=\fontdimen8\circle
+%
+\def\ctl{{\circle\char'013\hskip -6pt}}% 6pt from pl file: 1/2charwidth
+\def\ctr{{\hskip 6pt\circle\char'010}}
+\def\cbl{{\circle\char'012\hskip -6pt}}
+\def\cbr{{\hskip 6pt\circle\char'011}}
+\def\carttop{\hbox to \cartouter{\hskip\lskip
+ \ctl\leaders\hrule height\circthick\hfil\ctr
+ \hskip\rskip}}
+\def\cartbot{\hbox to \cartouter{\hskip\lskip
+ \cbl\leaders\hrule height\circthick\hfil\cbr
+ \hskip\rskip}}
+%
+\newskip\lskip\newskip\rskip
+
+\long\def\cartouche{%
+\begingroup
+ \lskip=\leftskip \rskip=\rightskip
+ \leftskip=0pt\rightskip=0pt %we want these *outside*.
+ \cartinner=\hsize \advance\cartinner by-\lskip
+ \advance\cartinner by-\rskip
+ \cartouter=\hsize
+ \advance\cartouter by 18pt % allow for 3pt kerns on either
+% side, and for 6pt waste from
+% each corner char
+ \normbskip=\baselineskip \normpskip=\parskip \normlskip=\lineskip
+ % Flag to tell @lisp, etc., not to narrow margin.
+ \let\nonarrowing=\comment
+ \vbox\bgroup
+ \baselineskip=0pt\parskip=0pt\lineskip=0pt
+ \carttop
+ \hbox\bgroup
+ \hskip\lskip
+ \vrule\kern3pt
+ \vbox\bgroup
+ \hsize=\cartinner
+ \kern3pt
+ \begingroup
+ \baselineskip=\normbskip
+ \lineskip=\normlskip
+ \parskip=\normpskip
+ \vskip -\parskip
+\def\Ecartouche{%
+ \endgroup
+ \kern3pt
+ \egroup
+ \kern3pt\vrule
+ \hskip\rskip
+ \egroup
+ \cartbot
+ \egroup
+\endgroup
+}}
+
+
+% This macro is called at the beginning of all the @example variants,
+% inside a group.
+\def\nonfillstart{%
+ \aboveenvbreak
+ \inENV % This group ends at the end of the body
+ \hfuzz = 12pt % Don't be fussy
+ \sepspaces % Make spaces be word-separators rather than space tokens.
+ \singlespace
+ \let\par = \lisppar % don't ignore blank lines
+ \obeylines % each line of input is a line of output
+ \parskip = 0pt
+ \parindent = 0pt
+ \emergencystretch = 0pt % don't try to avoid overfull boxes
+ % @cartouche defines \nonarrowing to inhibit narrowing
+ % at next level down.
+ \ifx\nonarrowing\relax
+ \advance \leftskip by \lispnarrowing
+ \exdentamount=\lispnarrowing
+ \let\exdent=\nofillexdent
+ \let\nonarrowing=\relax
+ \fi
+}
+
+% To ending an @example-like environment, we first end the paragraph
+% (via \afterenvbreak's vertical glue), and then the group. That way we
+% keep the zero \parskip that the environments set -- \parskip glue
+% will be inserted at the beginning of the next paragraph in the
+% document, after the environment.
+%
+\def\nonfillfinish{\afterenvbreak\endgroup}%
+
+% This macro is
+\def\lisp{\begingroup
+ \nonfillstart
+ \let\Elisp = \nonfillfinish
+ \tt
+ \rawbackslash % have \ input char produce \ char from current font
+ \gobble
+}
+
+% Define the \E... control sequence only if we are inside the
+% environment, so the error checking in \end will work.
+%
+% We must call \lisp last in the definition, since it reads the
+% return following the @example (or whatever) command.
+%
+\def\example{\begingroup \def\Eexample{\nonfillfinish\endgroup}\lisp}
+\def\smallexample{\begingroup \def\Esmallexample{\nonfillfinish\endgroup}\lisp}
+\def\smalllisp{\begingroup \def\Esmalllisp{\nonfillfinish\endgroup}\lisp}
+
+% @smallexample and @smalllisp. This is not used unless the @smallbook
+% command is given. Originally contributed by Pavel@xerox.
+%
+\def\smalllispx{\begingroup
+ \nonfillstart
+ \let\Esmalllisp = \nonfillfinish
+ \let\Esmallexample = \nonfillfinish
+ %
+ % Smaller interline space and fonts for small examples.
+ \baselineskip 10pt
+ \indexfonts \tt
+ \rawbackslash % output the \ character from the current font
+ \gobble
+}
+
+% This is @display; same as @lisp except use roman font.
+%
+\def\display{\begingroup
+ \nonfillstart
+ \let\Edisplay = \nonfillfinish
+ \gobble
+}
+
+% This is @format; same as @display except don't narrow margins.
+%
+\def\format{\begingroup
+ \let\nonarrowing = t
+ \nonfillstart
+ \let\Eformat = \nonfillfinish
+ \gobble
+}
+
+% @flushleft (same as @format) and @flushright.
+%
+\def\flushleft{\begingroup
+ \let\nonarrowing = t
+ \nonfillstart
+ \let\Eflushleft = \nonfillfinish
+ \gobble
+}
+\def\flushright{\begingroup
+ \let\nonarrowing = t
+ \nonfillstart
+ \let\Eflushright = \nonfillfinish
+ \advance\leftskip by 0pt plus 1fill
+ \gobble}
+
+% @quotation does normal linebreaking and narrows the margins.
+%
+\def\quotation{%
+\begingroup\inENV %This group ends at the end of the @quotation body
+{\parskip=0pt % because we will skip by \parskip too, later
+\aboveenvbreak}%
+\singlespace
+\parindent=0pt
+\let\Equotation = \nonfillfinish
+% @cartouche defines \nonarrowing to inhibit narrowing
+% at next level down.
+\ifx\nonarrowing\relax
+\advance \leftskip by \lispnarrowing
+\advance \rightskip by \lispnarrowing
+\exdentamount=\lispnarrowing
+\let\nonarrowing=\relax
+\fi}
+
+\message{defuns,}
+% Define formatter for defuns
+% First, allow user to change definition object font (\df) internally
+\def\setdeffont #1 {\csname DEF#1\endcsname}
+
+\newskip\defbodyindent \defbodyindent=.4in
+\newskip\defargsindent \defargsindent=50pt
+\newskip\deftypemargin \deftypemargin=12pt
+\newskip\deflastargmargin \deflastargmargin=18pt
+
+\newcount\parencount
+% define \functionparens, which makes ( and ) and & do special things.
+% \functionparens affects the group it is contained in.
+\def\activeparens{%
+\catcode`\(=\active \catcode`\)=\active \catcode`\&=\active
+\catcode`\[=\active \catcode`\]=\active}
+
+% Make control sequences which act like normal parenthesis chars.
+\let\lparen = ( \let\rparen = )
+
+{\activeparens % Now, smart parens don't turn on until &foo (see \amprm)
+
+% Be sure that we always have a definition for `(', etc. For example,
+% if the fn name has parens in it, \boldbrax will not be in effect yet,
+% so TeX would otherwise complain about undefined control sequence.
+\global\let(=\lparen \global\let)=\rparen
+\global\let[=\lbrack \global\let]=\rbrack
+
+\gdef\functionparens{\boldbrax\let&=\amprm\parencount=0 }
+\gdef\boldbrax{\let(=\opnr\let)=\clnr\let[=\lbrb\let]=\rbrb}
+
+% Definitions of (, ) and & used in args for functions.
+% This is the definition of ( outside of all parentheses.
+\gdef\oprm#1 {{\rm\char`\(}#1 \bf \let(=\opnested %
+\global\advance\parencount by 1 }
+%
+% This is the definition of ( when already inside a level of parens.
+\gdef\opnested{\char`\(\global\advance\parencount by 1 }
+%
+\gdef\clrm{% Print a paren in roman if it is taking us back to depth of 0.
+% also in that case restore the outer-level definition of (.
+\ifnum \parencount=1 {\rm \char `\)}\sl \let(=\oprm \else \char `\) \fi
+\global\advance \parencount by -1 }
+% If we encounter &foo, then turn on ()-hacking afterwards
+\gdef\amprm#1 {{\rm\&#1}\let(=\oprm \let)=\clrm\ }
+%
+\gdef\normalparens{\boldbrax\let&=\ampnr}
+} % End of definition inside \activeparens
+%% These parens (in \boldbrax) actually are a little bolder than the
+%% contained text. This is especially needed for [ and ]
+\def\opnr{{\sf\char`\(}} \def\clnr{{\sf\char`\)}} \def\ampnr{\&}
+\def\lbrb{{\bf\char`\[}} \def\rbrb{{\bf\char`\]}}
+
+% First, defname, which formats the header line itself.
+% #1 should be the function name.
+% #2 should be the type of definition, such as "Function".
+
+\def\defname #1#2{%
+% Get the values of \leftskip and \rightskip as they were
+% outside the @def...
+\dimen2=\leftskip
+\advance\dimen2 by -\defbodyindent
+\dimen3=\rightskip
+\advance\dimen3 by -\defbodyindent
+\noindent %
+\setbox0=\hbox{\hskip \deflastargmargin{\rm #2}\hskip \deftypemargin}%
+\dimen0=\hsize \advance \dimen0 by -\wd0 % compute size for first line
+\dimen1=\hsize \advance \dimen1 by -\defargsindent %size for continuations
+\parshape 2 0in \dimen0 \defargsindent \dimen1 %
+% Now output arg 2 ("Function" or some such)
+% ending at \deftypemargin from the right margin,
+% but stuck inside a box of width 0 so it does not interfere with linebreaking
+{% Adjust \hsize to exclude the ambient margins,
+% so that \rightline will obey them.
+\advance \hsize by -\dimen2 \advance \hsize by -\dimen3
+\rlap{\rightline{{\rm #2}\hskip \deftypemargin}}}%
+% Make all lines underfull and no complaints:
+\tolerance=10000 \hbadness=10000
+\advance\leftskip by -\defbodyindent
+\exdentamount=\defbodyindent
+{\df #1}\enskip % Generate function name
+}
+
+% Actually process the body of a definition
+% #1 should be the terminating control sequence, such as \Edefun.
+% #2 should be the "another name" control sequence, such as \defunx.
+% #3 should be the control sequence that actually processes the header,
+% such as \defunheader.
+
+\def\defparsebody #1#2#3{\begingroup\inENV% Environment for definitionbody
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2{\begingroup\obeylines\activeparens\spacesplit#3}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup %
+\catcode 61=\active % 61 is `='
+\obeylines\activeparens\spacesplit#3}
+
+\def\defmethparsebody #1#2#3#4 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 {\begingroup\obeylines\activeparens\spacesplit{#3{##1}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\activeparens\spacesplit{#3{#4}}}
+
+\def\defopparsebody #1#2#3#4#5 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 ##2 {\def#4{##1}%
+\begingroup\obeylines\activeparens\spacesplit{#3{##2}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\activeparens\spacesplit{#3{#5}}}
+
+% These parsing functions are similar to the preceding ones
+% except that they do not make parens into active characters.
+% These are used for "variables" since they have no arguments.
+
+\def\defvarparsebody #1#2#3{\begingroup\inENV% Environment for definitionbody
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2{\begingroup\obeylines\spacesplit#3}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup %
+\catcode 61=\active %
+\obeylines\spacesplit#3}
+
+% This is used for \def{tp,vr}parsebody. It could probably be used for
+% some of the others, too, with some judicious conditionals.
+%
+\def\parsebodycommon#1#2#3{%
+ \begingroup\inENV %
+ \medbreak %
+ % Define the end token that this defining construct specifies
+ % so that it will exit this group.
+ \def#1{\endgraf\endgroup\medbreak}%
+ \def#2##1 {\begingroup\obeylines\spacesplit{#3{##1}}}%
+ \parindent=0in
+ \advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+ \exdentamount=\defbodyindent
+ \begingroup\obeylines
+}
+
+\def\defvrparsebody#1#2#3#4 {%
+ \parsebodycommon{#1}{#2}{#3}%
+ \spacesplit{#3{#4}}%
+}
+
+% This loses on `@deftp {Data Type} {struct termios}' -- it thinks the
+% type is just `struct', because we lose the braces in `{struct
+% termios}' when \spacesplit reads its undelimited argument. Sigh.
+% \let\deftpparsebody=\defvrparsebody
+%
+% So, to get around this, we put \empty in with the type name. That
+% way, TeX won't find exactly `{...}' as an undelimited argument, and
+% won't strip off the braces.
+%
+\def\deftpparsebody #1#2#3#4 {%
+ \parsebodycommon{#1}{#2}{#3}%
+ \spacesplit{\parsetpheaderline{#3{#4}}}\empty
+}
+
+% Fine, but then we have to eventually remove the \empty *and* the
+% braces (if any). That's what this does, putting the result in \tptemp.
+%
+\def\removeemptybraces\empty#1\relax{\def\tptemp{#1}}%
+
+% After \spacesplit has done its work, this is called -- #1 is the final
+% thing to call, #2 the type name (which starts with \empty), and #3
+% (which might be empty) the arguments.
+%
+\def\parsetpheaderline#1#2#3{%
+ \removeemptybraces#2\relax
+ #1{\tptemp}{#3}%
+}%
+
+\def\defopvarparsebody #1#2#3#4#5 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 ##2 {\def#4{##1}%
+\begingroup\obeylines\spacesplit{#3{##2}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\spacesplit{#3{#5}}}
+
+% Split up #2 at the first space token.
+% call #1 with two arguments:
+% the first is all of #2 before the space token,
+% the second is all of #2 after that space token.
+% If #2 contains no space token, all of it is passed as the first arg
+% and the second is passed as empty.
+
+{\obeylines
+\gdef\spacesplit#1#2^^M{\endgroup\spacesplitfoo{#1}#2 \relax\spacesplitfoo}%
+\long\gdef\spacesplitfoo#1#2 #3#4\spacesplitfoo{%
+\ifx\relax #3%
+#1{#2}{}\else #1{#2}{#3#4}\fi}}
+
+% So much for the things common to all kinds of definitions.
+
+% Define @defun.
+
+% First, define the processing that is wanted for arguments of \defun
+% Use this to expand the args and terminate the paragraph they make up
+
+\def\defunargs #1{\functionparens \sl
+% Expand, preventing hyphenation at `-' chars.
+% Note that groups don't affect changes in \hyphenchar.
+\hyphenchar\tensl=0
+#1%
+\hyphenchar\tensl=45
+\ifnum\parencount=0 \else \errmessage{unbalanced parens in @def arguments}\fi%
+\interlinepenalty=10000
+\advance\rightskip by 0pt plus 1fil
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000%
+}
+
+\def\deftypefunargs #1{%
+% Expand, preventing hyphenation at `-' chars.
+% Note that groups don't affect changes in \hyphenchar.
+\functionparens
+\code{#1}%
+\interlinepenalty=10000
+\advance\rightskip by 0pt plus 1fil
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000%
+}
+
+% Do complete processing of one @defun or @defunx line already parsed.
+
+% @deffn Command forward-char nchars
+
+\def\deffn{\defmethparsebody\Edeffn\deffnx\deffnheader}
+
+\def\deffnheader #1#2#3{\doind {fn}{\code{#2}}%
+\begingroup\defname {#2}{#1}\defunargs{#3}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @defun == @deffn Function
+
+\def\defun{\defparsebody\Edefun\defunx\defunheader}
+
+\def\defunheader #1#2{\doind {fn}{\code{#1}}% Make entry in function index
+\begingroup\defname {#1}{Function}%
+\defunargs {#2}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @deftypefun int foobar (int @var{foo}, float @var{bar})
+
+\def\deftypefun{\defparsebody\Edeftypefun\deftypefunx\deftypefunheader}
+
+% #1 is the data type. #2 is the name and args.
+\def\deftypefunheader #1#2{\deftypefunheaderx{#1}#2 \relax}
+% #1 is the data type, #2 the name, #3 the args.
+\def\deftypefunheaderx #1#2 #3\relax{%
+\doind {fn}{\code{#2}}% Make entry in function index
+\begingroup\defname {\code{#1} #2}{Function}%
+\deftypefunargs {#3}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @deftypefn {Library Function} int foobar (int @var{foo}, float @var{bar})
+
+\def\deftypefn{\defmethparsebody\Edeftypefn\deftypefnx\deftypefnheader}
+
+% #1 is the classification. #2 is the data type. #3 is the name and args.
+\def\deftypefnheader #1#2#3{\deftypefnheaderx{#1}{#2}#3 \relax}
+% #1 is the classification, #2 the data type, #3 the name, #4 the args.
+\def\deftypefnheaderx #1#2#3 #4\relax{%
+\doind {fn}{\code{#3}}% Make entry in function index
+\begingroup
+\normalparens % notably, turn off `&' magic, which prevents
+% at least some C++ text from working
+\defname {\code{#2} #3}{#1}%
+\deftypefunargs {#4}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @defmac == @deffn Macro
+
+\def\defmac{\defparsebody\Edefmac\defmacx\defmacheader}
+
+\def\defmacheader #1#2{\doind {fn}{\code{#1}}% Make entry in function index
+\begingroup\defname {#1}{Macro}%
+\defunargs {#2}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @defspec == @deffn Special Form
+
+\def\defspec{\defparsebody\Edefspec\defspecx\defspecheader}
+
+\def\defspecheader #1#2{\doind {fn}{\code{#1}}% Make entry in function index
+\begingroup\defname {#1}{Special Form}%
+\defunargs {#2}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% This definition is run if you use @defunx
+% anywhere other than immediately after a @defun or @defunx.
+
+\def\deffnx #1 {\errmessage{@deffnx in invalid context}}
+\def\defunx #1 {\errmessage{@defunx in invalid context}}
+\def\defmacx #1 {\errmessage{@defmacx in invalid context}}
+\def\defspecx #1 {\errmessage{@defspecx in invalid context}}
+\def\deftypefnx #1 {\errmessage{@deftypefnx in invalid context}}
+\def\deftypeunx #1 {\errmessage{@deftypeunx in invalid context}}
+
+% @defmethod, and so on
+
+% @defop {Funny Method} foo-class frobnicate argument
+
+\def\defop #1 {\def\defoptype{#1}%
+\defopparsebody\Edefop\defopx\defopheader\defoptype}
+
+\def\defopheader #1#2#3{%
+\dosubind {fn}{\code{#2}}{on #1}% Make entry in function index
+\begingroup\defname {#2}{\defoptype{} on #1}%
+\defunargs {#3}\endgroup %
+}
+
+% @defmethod == @defop Method
+
+\def\defmethod{\defmethparsebody\Edefmethod\defmethodx\defmethodheader}
+
+\def\defmethodheader #1#2#3{%
+\dosubind {fn}{\code{#2}}{on #1}% entry in function index
+\begingroup\defname {#2}{Method on #1}%
+\defunargs {#3}\endgroup %
+}
+
+% @defcv {Class Option} foo-class foo-flag
+
+\def\defcv #1 {\def\defcvtype{#1}%
+\defopvarparsebody\Edefcv\defcvx\defcvarheader\defcvtype}
+
+\def\defcvarheader #1#2#3{%
+\dosubind {vr}{\code{#2}}{of #1}% Make entry in var index
+\begingroup\defname {#2}{\defcvtype{} of #1}%
+\defvarargs {#3}\endgroup %
+}
+
+% @defivar == @defcv {Instance Variable}
+
+\def\defivar{\defvrparsebody\Edefivar\defivarx\defivarheader}
+
+\def\defivarheader #1#2#3{%
+\dosubind {vr}{\code{#2}}{of #1}% Make entry in var index
+\begingroup\defname {#2}{Instance Variable of #1}%
+\defvarargs {#3}\endgroup %
+}
+
+% These definitions are run if you use @defmethodx, etc.,
+% anywhere other than immediately after a @defmethod, etc.
+
+\def\defopx #1 {\errmessage{@defopx in invalid context}}
+\def\defmethodx #1 {\errmessage{@defmethodx in invalid context}}
+\def\defcvx #1 {\errmessage{@defcvx in invalid context}}
+\def\defivarx #1 {\errmessage{@defivarx in invalid context}}
+
+% Now @defvar
+
+% First, define the processing that is wanted for arguments of @defvar.
+% This is actually simple: just print them in roman.
+% This must expand the args and terminate the paragraph they make up
+\def\defvarargs #1{\normalparens #1%
+\interlinepenalty=10000
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000}
+
+% @defvr Counter foo-count
+
+\def\defvr{\defvrparsebody\Edefvr\defvrx\defvrheader}
+
+\def\defvrheader #1#2#3{\doind {vr}{\code{#2}}%
+\begingroup\defname {#2}{#1}\defvarargs{#3}\endgroup}
+
+% @defvar == @defvr Variable
+
+\def\defvar{\defvarparsebody\Edefvar\defvarx\defvarheader}
+
+\def\defvarheader #1#2{\doind {vr}{\code{#1}}% Make entry in var index
+\begingroup\defname {#1}{Variable}%
+\defvarargs {#2}\endgroup %
+}
+
+% @defopt == @defvr {User Option}
+
+\def\defopt{\defvarparsebody\Edefopt\defoptx\defoptheader}
+
+\def\defoptheader #1#2{\doind {vr}{\code{#1}}% Make entry in var index
+\begingroup\defname {#1}{User Option}%
+\defvarargs {#2}\endgroup %
+}
+
+% @deftypevar int foobar
+
+\def\deftypevar{\defvarparsebody\Edeftypevar\deftypevarx\deftypevarheader}
+
+% #1 is the data type. #2 is the name.
+\def\deftypevarheader #1#2{%
+\doind {vr}{\code{#2}}% Make entry in variables index
+\begingroup\defname {\code{#1} #2}{Variable}%
+\interlinepenalty=10000
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000
+\endgroup}
+
+% @deftypevr {Global Flag} int enable
+
+\def\deftypevr{\defvrparsebody\Edeftypevr\deftypevrx\deftypevrheader}
+
+\def\deftypevrheader #1#2#3{\doind {vr}{\code{#3}}%
+\begingroup\defname {\code{#2} #3}{#1}
+\interlinepenalty=10000
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000
+\endgroup}
+
+% This definition is run if you use @defvarx
+% anywhere other than immediately after a @defvar or @defvarx.
+
+\def\defvrx #1 {\errmessage{@defvrx in invalid context}}
+\def\defvarx #1 {\errmessage{@defvarx in invalid context}}
+\def\defoptx #1 {\errmessage{@defoptx in invalid context}}
+\def\deftypevarx #1 {\errmessage{@deftypevarx in invalid context}}
+\def\deftypevrx #1 {\errmessage{@deftypevrx in invalid context}}
+
+% Now define @deftp
+% Args are printed in bold, a slight difference from @defvar.
+
+\def\deftpargs #1{\bf \defvarargs{#1}}
+
+% @deftp Class window height width ...
+
+\def\deftp{\deftpparsebody\Edeftp\deftpx\deftpheader}
+
+\def\deftpheader #1#2#3{\doind {tp}{\code{#2}}%
+\begingroup\defname {#2}{#1}\deftpargs{#3}\endgroup}
+
+% This definition is run if you use @deftpx, etc
+% anywhere other than immediately after a @deftp, etc.
+
+\def\deftpx #1 {\errmessage{@deftpx in invalid context}}
+
+\message{cross reference,}
+% Define cross-reference macros
+\newwrite \auxfile
+
+\newif\ifhavexrefs % True if xref values are known.
+\newif\ifwarnedxrefs % True if we warned once that they aren't known.
+
+% \setref{foo} defines a cross-reference point named foo.
+
+\def\setref#1{%
+\dosetq{#1-title}{Ytitle}%
+\dosetq{#1-pg}{Ypagenumber}%
+\dosetq{#1-snt}{Ysectionnumberandtype}}
+
+\def\unnumbsetref#1{%
+\dosetq{#1-title}{Ytitle}%
+\dosetq{#1-pg}{Ypagenumber}%
+\dosetq{#1-snt}{Ynothing}}
+
+\def\appendixsetref#1{%
+\dosetq{#1-title}{Ytitle}%
+\dosetq{#1-pg}{Ypagenumber}%
+\dosetq{#1-snt}{Yappendixletterandtype}}
+
+% \xref, \pxref, and \ref generate cross-references to specified points.
+% For \xrefX, #1 is the node name, #2 the name of the Info
+% cross-reference, #3 the printed node name, #4 the name of the Info
+% file, #5 the name of the printed manual. All but the node name can be
+% omitted.
+%
+\def\pxref#1{see \xrefX[#1,,,,,,,]}
+\def\xref#1{See \xrefX[#1,,,,,,,]}
+\def\ref#1{\xrefX[#1,,,,,,,]}
+\def\xrefX[#1,#2,#3,#4,#5,#6]{\begingroup%
+\def\printedmanual{\ignorespaces #5}%
+\def\printednodename{\ignorespaces #3}%
+%
+\setbox1=\hbox{\printedmanual}%
+\setbox0=\hbox{\printednodename}%
+\ifdim \wd0=0pt%
+% No printed node name was explicitly given.
+\ifx SETxref-automatic-section-title %
+% This line should make the actual chapter or section title appear inside
+% the square brackets. Use the real section title if we have it.
+\ifdim \wd1>0pt%
+% It is in another manual, so we don't have it.
+\def\printednodename{\ignorespaces #1} \else%
+% We know the real title if we have the xref values.
+\ifhavexrefs \def\printednodename{\refx{#1-title}}%
+% Otherwise just copy the Info node name.
+\else \def\printednodename{\ignorespaces #1} \fi%
+\fi\def\printednodename{#1-title}%
+\else% This line just uses the node name.
+\def\printednodename{\ignorespaces #1}%
+\fi% ends \ifx SETxref-automatic-section-title
+\fi% ends \ifdim \wd0
+%
+%
+% If we use \unhbox0 and \unhbox1 to print the node names, TeX does
+% not insert empty discretionaries after hyphens, which means that it
+% will not find a line break at a hyphen in a node names. Since some
+% manuals are best written with fairly long node names, containing
+% hyphens, this is a loss. Therefore, we simply give the text of
+% the node name again, so it is as if TeX is seeing it for the first
+% time.
+\ifdim \wd1>0pt
+section ``\printednodename'' in \cite{\printedmanual}%
+\else%
+\turnoffactive%
+\refx{#1-snt}{} [\printednodename], page\tie\refx{#1-pg}{}%
+\fi
+\endgroup}
+
+% \dosetq is the interface for calls from other macros
+
+% Use \turnoffactive so that punctuation chars such as underscore
+% work in node names.
+\def\dosetq #1#2{{\let\folio=0 \turnoffactive%
+\edef\next{\write\auxfile{\internalsetq {#1}{#2}}}%
+\next}}
+
+% \internalsetq {foo}{page} expands into
+% CHARACTERS 'xrdef {foo}{...expansion of \Ypage...}
+% When the aux file is read, ' is the escape character
+
+\def\internalsetq #1#2{'xrdef {#1}{\csname #2\endcsname}}
+
+% Things to be expanded by \internalsetq
+
+\def\Ypagenumber{\folio}
+
+\def\Ytitle{\thissection}
+
+\def\Ynothing{}
+
+\def\Ysectionnumberandtype{%
+\ifnum\secno=0 Chapter\xreftie\the\chapno %
+\else \ifnum \subsecno=0 Section\xreftie\the\chapno.\the\secno %
+\else \ifnum \subsubsecno=0 %
+Section\xreftie\the\chapno.\the\secno.\the\subsecno %
+\else %
+Section\xreftie\the\chapno.\the\secno.\the\subsecno.\the\subsubsecno %
+\fi \fi \fi }
+
+\def\Yappendixletterandtype{%
+\ifnum\secno=0 Appendix\xreftie'char\the\appendixno{}%
+\else \ifnum \subsecno=0 Section\xreftie'char\the\appendixno.\the\secno %
+\else \ifnum \subsubsecno=0 %
+Section\xreftie'char\the\appendixno.\the\secno.\the\subsecno %
+\else %
+Section\xreftie'char\the\appendixno.\the\secno.\the\subsecno.\the\subsubsecno %
+\fi \fi \fi }
+
+\gdef\xreftie{'tie}
+
+% Use TeX 3.0's \inputlineno to get the line number, for better error
+% messages, but if we're using an old version of TeX, don't do anything.
+%
+\ifx\inputlineno\thisisundefined
+ \let\linenumber = \empty % Non-3.0.
+\else
+ \def\linenumber{\the\inputlineno:\space}
+\fi
+
+% Define \refx{NAME}{SUFFIX} to reference a cross-reference string named NAME.
+% If its value is nonempty, SUFFIX is output afterward.
+
+\def\refx#1#2{%
+ \expandafter\ifx\csname X#1\endcsname\relax
+ % If not defined, say something at least.
+ $\langle$un\-de\-fined$\rangle$%
+ \ifhavexrefs
+ \message{\linenumber Undefined cross reference `#1'.}%
+ \else
+ \ifwarnedxrefs\else
+ \global\warnedxrefstrue
+ \message{Cross reference values unknown; you must run TeX again.}%
+ \fi
+ \fi
+ \else
+ % It's defined, so just use it.
+ \csname X#1\endcsname
+ \fi
+ #2% Output the suffix in any case.
+}
+
+% Read the last existing aux file, if any. No error if none exists.
+
+% This is the macro invoked by entries in the aux file.
+\def\xrdef #1#2{
+{\catcode`\'=\other\expandafter \gdef \csname X#1\endcsname {#2}}}
+
+\def\readauxfile{%
+\begingroup
+\catcode `\^^@=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\^^C=\other
+\catcode `\^^D=\other
+\catcode `\^^E=\other
+\catcode `\^^F=\other
+\catcode `\^^G=\other
+\catcode `\^^H=\other
+\catcode `\ =\other
+\catcode `\^^L=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode `\=\other
+\catcode 26=\other
+\catcode `\^^[=\other
+\catcode `\^^\=\other
+\catcode `\^^]=\other
+\catcode `\^^^=\other
+\catcode `\^^_=\other
+\catcode `\@=\other
+\catcode `\^=\other
+\catcode `\~=\other
+\catcode `\[=\other
+\catcode `\]=\other
+\catcode`\"=\other
+\catcode`\_=\other
+\catcode`\|=\other
+\catcode`\<=\other
+\catcode`\>=\other
+\catcode `\$=\other
+\catcode `\#=\other
+\catcode `\&=\other
+% `\+ does not work, so use 43.
+\catcode 43=\other
+% the aux file uses ' as the escape.
+% Turn off \ as an escape so we do not lose on
+% entries which were dumped with control sequences in their names.
+% For example, 'xrdef {$\leq $-fun}{page ...} made by @defun ^^
+% Reference to such entries still does not work the way one would wish,
+% but at least they do not bomb out when the aux file is read in.
+\catcode `\{=1 \catcode `\}=2
+\catcode `\%=\other
+\catcode `\'=0
+\catcode `\\=\other
+\openin 1 \jobname.aux
+\ifeof 1 \else \closein 1 \input \jobname.aux \global\havexrefstrue
+\global\warnedobstrue
+\fi
+% Open the new aux file. Tex will close it automatically at exit.
+\openout \auxfile=\jobname.aux
+\endgroup}
+
+
+% Footnotes.
+
+\newcount \footnoteno
+
+% The trailing space in the following definition for supereject is
+% vital for proper filling; pages come out unaligned when you do a
+% pagealignmacro call if that space before the closing brace is
+% removed.
+\def\supereject{\par\penalty -20000\footnoteno =0 }
+
+% @footnotestyle is meaningful for info output only..
+\let\footnotestyle=\comment
+
+\let\ptexfootnote=\footnote
+
+{\catcode `\@=11
+%
+% Auto-number footnotes. Otherwise like plain.
+\gdef\footnote{%
+ \global\advance\footnoteno by \@ne
+ \edef\thisfootno{$^{\the\footnoteno}$}%
+ %
+ % In case the footnote comes at the end of a sentence, preserve the
+ % extra spacing after we do the footnote number.
+ \let\@sf\empty
+ \ifhmode\edef\@sf{\spacefactor\the\spacefactor}\/\fi
+ %
+ % Remove inadvertent blank space before typesetting the footnote number.
+ \unskip
+ \thisfootno\@sf
+ \footnotezzz
+}%
+
+% Don't bother with the trickery in plain.tex to not require the
+% footnote text as a parameter. Our footnotes don't need to be so general.
+%
+\long\gdef\footnotezzz#1{\insert\footins{%
+ % We want to typeset this text as a normal paragraph, even if the
+ % footnote reference occurs in (for example) a display environment.
+ % So reset some parameters.
+ \interlinepenalty\interfootnotelinepenalty
+ \splittopskip\ht\strutbox % top baseline for broken footnotes
+ \splitmaxdepth\dp\strutbox
+ \floatingpenalty\@MM
+ \leftskip\z@skip
+ \rightskip\z@skip
+ \spaceskip\z@skip
+ \xspaceskip\z@skip
+ \parindent\defaultparindent
+ %
+ % Hang the footnote text off the number.
+ \hang
+ \textindent{\thisfootno}%
+ %
+ % Don't crash into the line above the footnote text. Since this
+ % expands into a box, it must come within the paragraph, lest it
+ % provide a place where TeX can split the footnote.
+ \footstrut
+ #1\strut}%
+}
+
+}%end \catcode `\@=11
+
+% Set the baselineskip to #1, and the lineskip and strut size
+% correspondingly. There is no deep meaning behind these magic numbers
+% used as factors; they just match (closely enough) what Knuth defined.
+%
+\def\lineskipfactor{.08333}
+\def\strutheightpercent{.70833}
+\def\strutdepthpercent {.29167}
+%
+\def\setleading#1{%
+ \normalbaselineskip = #1\relax
+ \normallineskip = \lineskipfactor\normalbaselineskip
+ \normalbaselines
+ \setbox\strutbox =\hbox{%
+ \vrule width0pt height\strutheightpercent\baselineskip
+ depth \strutdepthpercent \baselineskip
+ }%
+}
+
+% @| inserts a changebar to the left of the current line. It should
+% surround any changed text. This approach does *not* work if the
+% change spans more than two lines of output. To handle that, we would
+% have adopt a much more difficult approach (putting marks into the main
+% vertical list for the beginning and end of each change).
+%
+\def\|{%
+ % \vadjust can only be used in horizontal mode.
+ \leavevmode
+ %
+ % Append this vertical mode material after the current line in the output.
+ \vadjust{%
+ % We want to insert a rule with the height and depth of the current
+ % leading; that is exactly what \strutbox is supposed to record.
+ \vskip-\baselineskip
+ %
+ % \vadjust-items are inserted at the left edge of the type. So
+ % the \llap here moves out into the left-hand margin.
+ \llap{%
+ %
+ % For a thicker or thinner bar, change the `1pt'.
+ \vrule height\baselineskip width1pt
+ %
+ % This is the space between the bar and the text.
+ \hskip 12pt
+ }%
+ }%
+}
+
+% For a final copy, take out the rectangles
+% that mark overfull boxes (in case you have decided
+% that the text looks ok even though it passes the margin).
+%
+\def\finalout{\overfullrule=0pt}
+
+
+% End of control word definitions.
+
+\message{and turning on texinfo input format.}
+
+\def\openindices{%
+ \newindex{cp}%
+ \newcodeindex{fn}%
+ \newcodeindex{vr}%
+ \newcodeindex{tp}%
+ \newcodeindex{ky}%
+ \newcodeindex{pg}%
+}
+
+% Set some numeric style parameters, for 8.5 x 11 format.
+
+%\hsize = 6.5in
+\newdimen\defaultparindent \defaultparindent = 15pt
+\parindent = \defaultparindent
+\parskip 18pt plus 1pt
+\setleading{15pt}
+\advance\topskip by 1.2cm
+
+% Prevent underfull vbox error messages.
+\vbadness=10000
+
+% Following George Bush, just get rid of widows and orphans.
+\widowpenalty=10000
+\clubpenalty=10000
+
+% Use TeX 3.0's \emergencystretch to help line breaking, but if we're
+% using an old version of TeX, don't do anything. We want the amount of
+% stretch added to depend on the line length, hence the dependence on
+% \hsize. This makes it come to about 9pt for the 8.5x11 format.
+%
+\ifx\emergencystretch\thisisundefined
+ % Allow us to assign to \emergencystretch anyway.
+ \def\emergencystretch{\dimen0}%
+\else
+ \emergencystretch = \hsize
+ \divide\emergencystretch by 45
+\fi
+
+% Use @smallbook to reset parameters for 7x9.5 format (or else 7x9.25)
+\def\smallbook{
+
+% These values for secheadingskip and subsecheadingskip are
+% experiments. RJC 7 Aug 1992
+\global\secheadingskip = 17pt plus 6pt minus 3pt
+\global\subsecheadingskip = 14pt plus 6pt minus 3pt
+
+\global\lispnarrowing = 0.3in
+\setleading{12pt}
+\advance\topskip by -1cm
+\global\parskip 3pt plus 1pt
+\global\hsize = 5in
+\global\vsize=7.5in
+\global\tolerance=700
+\global\hfuzz=1pt
+\global\contentsrightmargin=0pt
+
+\global\pagewidth=\hsize
+\global\pageheight=\vsize
+
+\global\let\smalllisp=\smalllispx
+\global\let\smallexample=\smalllispx
+\global\def\Esmallexample{\Esmalllisp}
+}
+
+% Use @afourpaper to print on European A4 paper.
+\def\afourpaper{
+\global\tolerance=700
+\global\hfuzz=1pt
+\setleading{12pt}
+\global\parskip 15pt plus 1pt
+
+\global\vsize= 53\baselineskip
+\advance\vsize by \topskip
+%\global\hsize= 5.85in % A4 wide 10pt
+\global\hsize= 6.5in
+\global\outerhsize=\hsize
+\global\advance\outerhsize by 0.5in
+\global\outervsize=\vsize
+\global\advance\outervsize by 0.6in
+
+\global\pagewidth=\hsize
+\global\pageheight=\vsize
+}
+
+% Define macros to output various characters with catcode for normal text.
+\catcode`\"=\other
+\catcode`\~=\other
+\catcode`\^=\other
+\catcode`\_=\other
+\catcode`\|=\other
+\catcode`\<=\other
+\catcode`\>=\other
+\catcode`\+=\other
+\def\normaldoublequote{"}
+\def\normaltilde{~}
+\def\normalcaret{^}
+\def\normalunderscore{_}
+\def\normalverticalbar{|}
+\def\normalless{<}
+\def\normalgreater{>}
+\def\normalplus{+}
+
+% This macro is used to make a character print one way in ttfont
+% where it can probably just be output, and another way in other fonts,
+% where something hairier probably needs to be done.
+%
+% #1 is what to print if we are indeed using \tt; #2 is what to print
+% otherwise. Since all the Computer Modern typewriter fonts have zero
+% interword stretch (and shrink), and it is reasonable to expect all
+% typewriter fonts to have this, we can check that font parameter.
+%
+\def\ifusingtt#1#2{\ifdim \fontdimen3\the\font=0pt #1\else #2\fi}
+
+% Turn off all special characters except @
+% (and those which the user can use as if they were ordinary).
+% Most of these we simply print from the \tt font, but for some, we can
+% use math or other variants that look better in normal text.
+
+\catcode`\"=\active
+\def\activedoublequote{{\tt \char '042}}
+\let"=\activedoublequote
+\catcode`\~=\active
+\def~{{\tt \char '176}}
+\chardef\hat=`\^
+\catcode`\^=\active
+\def^{{\tt \hat}}
+
+\catcode`\_=\active
+\def_{\ifusingtt\normalunderscore\_}
+% Subroutine for the previous macro.
+\def\_{\lvvmode \kern.06em \vbox{\hrule width.3em height.1ex}}
+
+% \lvvmode is equivalent in function to \leavevmode.
+% Using \leavevmode runs into trouble when written out to
+% an index file due to the expansion of \leavevmode into ``\unhbox
+% \voidb@x'' ---which looks to TeX like ``\unhbox \voidb\x'' due to our
+% magic tricks with @.
+\def\lvvmode{\vbox to 0pt{}}
+
+\catcode`\|=\active
+\def|{{\tt \char '174}}
+\chardef \less=`\<
+\catcode`\<=\active
+\def<{{\tt \less}}
+\chardef \gtr=`\>
+\catcode`\>=\active
+\def>{{\tt \gtr}}
+\catcode`\+=\active
+\def+{{\tt \char 43}}
+%\catcode 27=\active
+%\def^^[{$\diamondsuit$}
+
+% Used sometimes to turn off (effectively) the active characters
+% even after parsing them.
+\def\turnoffactive{\let"=\normaldoublequote
+\let~=\normaltilde
+\let^=\normalcaret
+\let_=\normalunderscore
+\let|=\normalverticalbar
+\let<=\normalless
+\let>=\normalgreater
+\let+=\normalplus}
+
+% Set up an active definition for =, but don't enable it most of the time.
+{\catcode`\==\active
+\global\def={{\tt \char 61}}}
+
+\catcode`\@=0
+
+% \rawbackslashxx output one backslash character in current font
+\global\chardef\rawbackslashxx=`\\
+%{\catcode`\\=\other
+%@gdef@rawbackslashxx{\}}
+
+% \rawbackslash redefines \ as input to do \rawbackslashxx.
+{\catcode`\\=\active
+@gdef@rawbackslash{@let\=@rawbackslashxx }}
+
+% \normalbackslash outputs one backslash in fixed width font.
+\def\normalbackslash{{\tt\rawbackslashxx}}
+
+% Say @foo, not \foo, in error messages.
+\escapechar=`\@
+
+% \catcode 17=0 % Define control-q
+\catcode`\\=\active
+
+% If a .fmt file is being used, we don't want the `\input texinfo' to show up.
+% That is what \eatinput is for; after that, the `\' should revert to printing
+% a backslash.
+%
+@gdef@eatinput input texinfo{@fixbackslash}
+@global@let\ = @eatinput
+
+% On the other hand, perhaps the file did not have a `\input texinfo'. Then
+% the first `\{ in the file would cause an error. This macro tries to fix
+% that, assuming it is called before the first `\' could plausibly occur.
+%
+@gdef@fixbackslash{@ifx\@eatinput @let\ = @normalbackslash @fi}
+
+%% These look ok in all fonts, so just make them not special. The @rm below
+%% makes sure that the current font starts out as the newly loaded cmr10
+@catcode`@$=@other @catcode`@%=@other @catcode`@&=@other @catcode`@#=@other
+
+@textfonts
+@rm
+
+@c Local variables:
+@c page-delimiter: "^\\\\message"
+@c End:
diff --git a/tools/bison++/types.h b/tools/bison++/types.h
new file mode 100644
index 000000000..a4aa0a750
--- /dev/null
+++ b/tools/bison++/types.h
@@ -0,0 +1,27 @@
+/* Define data type for representing bison's grammar input as it is parsed,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+typedef
+ struct shorts
+ {
+ struct shorts *next;
+ short value;
+ }
+ shorts;
diff --git a/tools/bison++/version.cc b/tools/bison++/version.cc
new file mode 100644
index 000000000..006c96a83
--- /dev/null
+++ b/tools/bison++/version.cc
@@ -0,0 +1 @@
+char *version_string = "bison++ Version 1.21.9-1, adapted from GNU bison by coetmeur@icdc.fr\nMaintained by Magnus Ekdahl <magnus@debian.org>\n";
diff --git a/tools/bison++/version.texi b/tools/bison++/version.texi
new file mode 100644
index 000000000..879d70c7b
--- /dev/null
+++ b/tools/bison++/version.texi
@@ -0,0 +1,3 @@
+@set UPDATED 17 February 2002
+@set EDITION 2.21.5
+@set VERSION 2.21.5
diff --git a/tools/bison++/vmsgetargs.c b/tools/bison++/vmsgetargs.c
new file mode 100644
index 000000000..c7e738bb3
--- /dev/null
+++ b/tools/bison++/vmsgetargs.c
@@ -0,0 +1,160 @@
+/* VMS version of getargs; Uses DCL command parsing.
+ Copyright (C) 1989 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <ctype.h>
+#include <stdio.h>
+#include "files.h"
+
+/*
+ * VMS version of getargs(): Uses DCL command parsing
+ * (argc and argv are ignored)
+ */
+int verboseflag;
+int definesflag;
+int debugflag;
+int nolinesflag;
+extern int fixed_outfiles;
+extern char * version_string;
+
+/* Allocate storgate and initialize, since bison uses them elsewhere. */
+char *spec_name_prefix;
+char *spec_file_prefix;
+
+getargs(argc,argv)
+ int argc;
+ char *argv[];
+{
+ register char *cp;
+ static char Input_File[256];
+ static char output_spec[256], name_prefix_spec[256], file_prefix_spec[256];
+ extern char *infile;
+
+ verboseflag = 0;
+ definesflag = 0;
+ debugflag = 0;
+ fixed_outfiles = 0;
+ nolinesflag = 0;
+ /*
+ * Check for /VERBOSE qualifier
+ */
+ if (cli_present("BISON$VERBOSE")) verboseflag = 1;
+ /*
+ * Check for /DEFINES qualifier
+ */
+ if (cli_present("BISON$DEFINES")) definesflag = 1;
+ /*
+ * Check for /FIXED_OUTFILES qualifier
+ */
+ if (cli_present("BISON$FIXED_OUTFILES")) fixed_outfiles = 1;
+ if (cli_present("BISON$YACC")) fixed_outfiles = 1;
+ /*
+ * Check for /VERSION qualifier
+ */
+ if (cli_present("BISON$VERSION")) printf("%s",version_string);
+ /*
+ * Check for /NOLINES qualifier
+ */
+ if (cli_present("BISON$NOLINES")) nolinesflag = 1;
+ /*
+ * Check for /DEBUG qualifier
+ */
+ if (cli_present("BISON$DEBUG")) debugflag = 1;
+ /*
+ * Get the filename
+ */
+ cli_get_value("BISON$INFILE", Input_File, sizeof(Input_File));
+ /*
+ * Lowercaseify the input filename
+ */
+ cp = Input_File;
+ while(*cp)
+ {
+ if (isupper(*cp)) *cp = tolower(*cp);
+ cp++;
+ }
+ infile = Input_File;
+ /*
+ * Get the output file
+ */
+ if (cli_present("BISON$OUTPUT"))
+ {
+ cli_get_value("BISON$OUTPUT", output_spec, sizeof(output_spec));
+ for (cp = spec_outfile = output_spec; *cp; cp++)
+ if (isupper(*cp))
+ *cp = tolower(*cp);
+ }
+ /*
+ * Get the output file
+ */
+ if (cli_present("BISON$FILE_PREFIX"))
+ {
+ cli_get_value("BISON$FILE_PREFIX", file_prefix_spec,
+ sizeof(file_prefix_spec));
+ for (cp = spec_file_prefix = file_prefix_spec; *cp; cp++)
+ if (isupper(*cp))
+ *cp = tolower(*cp);
+ }
+ /*
+ * Get the output file
+ */
+ if (cli_present("BISON$NAME_PREFIX"))
+ {
+ cli_get_value("BISON$NAME_PREFIX", name_prefix_spec,
+ sizeof(name_prefix_spec));
+ for (cp = spec_name_prefix = name_prefix_spec; *cp; cp++)
+ if (isupper(*cp))
+ *cp = tolower(*cp);
+ }
+}
+
+/************ DCL PARSING ROUTINES **********/
+
+/*
+ * See if "NAME" is present
+ */
+int
+cli_present(Name)
+ char *Name;
+{
+ struct {int Size; char *Ptr;} Descr;
+
+ Descr.Ptr = Name;
+ Descr.Size = strlen(Name);
+ return((cli$present(&Descr) & 1) ? 1 : 0);
+}
+
+/*
+ * Get value of "NAME"
+ */
+int
+cli_get_value(Name,Buffer,Size)
+ char *Name;
+ char *Buffer;
+{
+ struct {int Size; char *Ptr;} Descr1,Descr2;
+
+ Descr1.Ptr = Name;
+ Descr1.Size = strlen(Name);
+ Descr2.Ptr = Buffer;
+ Descr2.Size = Size-1;
+ if (cli$get_value(&Descr1,&Descr2,&Descr2.Size) & 1) {
+ Buffer[Descr2.Size] = 0;
+ return(1);
+ }
+ return(0);
+}
diff --git a/tools/bison++/vmshlp.mar b/tools/bison++/vmshlp.mar
new file mode 100644
index 000000000..637d170d5
--- /dev/null
+++ b/tools/bison++/vmshlp.mar
@@ -0,0 +1,42 @@
+;/* Macro help routines for the BISON/VMS program
+; Gabor Karsai, Vanderbilt University
+;
+;BISON is distributed in the hope that it will be useful, but WITHOUT ANY
+;WARRANTY. No author or distributor accepts responsibility to anyone
+;for the consequences of using it or for whether it serves any
+;particular purpose or works at all, unless he says so in writing.
+;Refer to the BISON General Public License for full details.
+;
+;Everyone is granted permission to copy, modify and redistribute BISON,
+;but only under the conditions described in the BISON General Public
+;License. A copy of this license is supposed to have been given to you
+;along with BISON so you can know your rights and responsibilities. It
+;should be in a file named COPYING. Among other things, the copyright
+;notice and this notice must be preserved on all copies.
+;
+; In other words, you are welcome to use, share and improve this program.
+; You are forbidden to forbid anyone else to use, share and improve
+; what you give them. Help stamp out software-hoarding! */
+;
+ .psect vmshlp pic,usr,rel,ovr,shr,long,exe,nowrt
+
+alloca::
+ .word 0
+ subl2 ^X4(ap),sp
+ movl ^X10(fp),r1
+ movq ^X8(fp),ap
+ bicl2 #03,sp
+ addl2 #^X1c,sp
+ movl sp,r0
+ jmp (r1)
+
+bcopy::
+ .word ^X0e00
+ movl ^X04(ap),r11
+ movl ^X08(ap),r10
+ movl ^X0c(ap),r9
+ brb 1$
+2$: movb (r10)+,(r11)+
+1$: sobgeq r9,2$
+ ret
+ .end
diff --git a/tools/bison++/warshall.cc b/tools/bison++/warshall.cc
new file mode 100644
index 000000000..576a3a4e9
--- /dev/null
+++ b/tools/bison++/warshall.cc
@@ -0,0 +1,115 @@
+/* Generate transitive closure of a matrix,
+ Copyright (C) 1984, 1989 Free Software Foundation, Inc.
+
+This file is part of Bison, the GNU Compiler Compiler.
+
+Bison is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+Bison is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Bison; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include "system.h"
+#include "machine.h"
+
+
+/* given n by n matrix of bits R, modify its contents
+ to be the transive closure of what was given. */
+
+void
+TC(unsigned* R, int n)
+{
+ register int rowsize;
+ register unsigned mask;
+ register unsigned *rowj;
+ register unsigned *rp;
+ register unsigned *rend;
+ register unsigned *ccol;
+
+ unsigned *relend;
+ unsigned *cword;
+ unsigned *rowi;
+
+ rowsize = WORDSIZE(n) * sizeof(unsigned);
+ relend = (unsigned *) ((char *) R + (n * rowsize));
+
+ cword = R;
+ mask = 1;
+ rowi = R;
+ while (rowi < relend)
+ {
+ ccol = cword;
+ rowj = R;
+
+ while (rowj < relend)
+ {
+ if (*ccol & mask)
+ {
+ rp = rowi;
+ rend = (unsigned *) ((char *) rowj + rowsize);
+
+ while (rowj < rend)
+ *rowj++ |= *rp++;
+ }
+ else
+ {
+ rowj = (unsigned *) ((char *) rowj + rowsize);
+ }
+
+ ccol = (unsigned *) ((char *) ccol + rowsize);
+ }
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ cword++;
+ }
+
+ rowi = (unsigned *) ((char *) rowi + rowsize);
+ }
+}
+
+
+/* Reflexive Transitive Closure. Same as TC
+ and then set all the bits on the diagonal of R. */
+
+void
+RTC(unsigned* R, int n)
+{
+ register int rowsize;
+ register unsigned mask;
+ register unsigned *rp;
+ register unsigned *relend;
+
+ TC(R, n);
+
+ rowsize = WORDSIZE(n) * sizeof(unsigned);
+ relend = (unsigned *) ((char *) R + n*rowsize);
+
+ mask = 1;
+ rp = R;
+ while (rp < relend)
+ {
+ *rp |= mask;
+
+ mask <<= 1;
+ if (mask == 0)
+ {
+ mask = 1;
+ rp++;
+ }
+
+ rp = (unsigned *) ((char *) rp + rowsize);
+ }
+}