Fix errors in the lexer module
This commit is contained in:
		@@ -239,8 +239,10 @@ namespace elna::gcc
 | 
			
		||||
    {
 | 
			
		||||
        visit(static_cast<boot::unit *>(program));
 | 
			
		||||
 | 
			
		||||
        tree declaration_type = build_function_type_list(integer_type_node, elna_int_type_node,
 | 
			
		||||
                build_global_pointer_type(build_global_pointer_type(elna_char_type_node)), NULL_TREE);
 | 
			
		||||
        tree declaration_type = build_function_type_list(elna_int_type_node,
 | 
			
		||||
                elna_int_type_node,
 | 
			
		||||
                build_global_pointer_type(build_global_pointer_type(elna_char_type_node)),
 | 
			
		||||
                NULL_TREE);
 | 
			
		||||
        tree fndecl = build_fn_decl("main", declaration_type);
 | 
			
		||||
 | 
			
		||||
        tree resdecl = build_decl(UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, integer_type_node);
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,8 @@ proc free(ptr: Pointer); extern;
 | 
			
		||||
proc calloc(nmemb: Word, size: Word) -> Pointer; extern;
 | 
			
		||||
proc realloc(ptr: Pointer, size: Word) -> Pointer; extern;
 | 
			
		||||
 | 
			
		||||
proc atoi(str: ^Char) -> Int; extern;
 | 
			
		||||
 | 
			
		||||
proc exit(code: Int) -> !; extern;
 | 
			
		||||
 | 
			
		||||
end.
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,7 @@
 | 
			
		||||
  obtain one at https://mozilla.org/MPL/2.0/. *)
 | 
			
		||||
module;
 | 
			
		||||
 | 
			
		||||
proc memset(ptr: Pointer, c: Int, n: Int) -> ^Char; extern;
 | 
			
		||||
proc memset(ptr: Pointer, c: Int, n: Word) -> ^Char; extern;
 | 
			
		||||
proc memcpy(dst: Pointer, src: Pointer, n: Word); extern;
 | 
			
		||||
 | 
			
		||||
proc strcmp(s1: ^Char, s2: ^Char) -> Int; extern;
 | 
			
		||||
 
 | 
			
		||||
@@ -6,12 +6,12 @@ module;
 | 
			
		||||
import cstdio, cstring, cctype, cstdlib, common;
 | 
			
		||||
 | 
			
		||||
const
 | 
			
		||||
  CHUNK_SIZE := 85536;
 | 
			
		||||
  CHUNK_SIZE := 85536u;
 | 
			
		||||
 | 
			
		||||
type
 | 
			
		||||
  (*
 | 
			
		||||
   * Classification table assigns each possible character to a group (class). All
 | 
			
		||||
   * characters of the same group a handled equivalently.
 | 
			
		||||
   * characters of the same group are handled equivalently.
 | 
			
		||||
   *
 | 
			
		||||
   * Classification:
 | 
			
		||||
   *)
 | 
			
		||||
@@ -88,7 +88,7 @@ type
 | 
			
		||||
    current: BufferPosition
 | 
			
		||||
  end;
 | 
			
		||||
  LexerKind* = (
 | 
			
		||||
    eof,
 | 
			
		||||
    unknown,
 | 
			
		||||
    identifier,
 | 
			
		||||
    _if,
 | 
			
		||||
    _then,
 | 
			
		||||
@@ -99,7 +99,7 @@ type
 | 
			
		||||
    _proc,
 | 
			
		||||
    _begin,
 | 
			
		||||
    _end,
 | 
			
		||||
    _xor,
 | 
			
		||||
    _extern,
 | 
			
		||||
    _const,
 | 
			
		||||
    _var,
 | 
			
		||||
    _case,
 | 
			
		||||
@@ -113,13 +113,15 @@ type
 | 
			
		||||
    null,
 | 
			
		||||
    and,
 | 
			
		||||
    _or,
 | 
			
		||||
    tilde,
 | 
			
		||||
    _xor,
 | 
			
		||||
    not,
 | 
			
		||||
    _return,
 | 
			
		||||
    _defer,
 | 
			
		||||
    range,
 | 
			
		||||
    _cast,
 | 
			
		||||
    shift_left,
 | 
			
		||||
    shift_right,
 | 
			
		||||
    left_paren,
 | 
			
		||||
    right_paren,
 | 
			
		||||
    lefts_quare,
 | 
			
		||||
    left_square,
 | 
			
		||||
    right_square,
 | 
			
		||||
    greater_equal,
 | 
			
		||||
    less_equal,
 | 
			
		||||
@@ -132,7 +134,7 @@ type
 | 
			
		||||
    comma,
 | 
			
		||||
    plus,
 | 
			
		||||
    minus,
 | 
			
		||||
    asterisk,
 | 
			
		||||
    multiplication,
 | 
			
		||||
    division,
 | 
			
		||||
    remainder,
 | 
			
		||||
    assignment,
 | 
			
		||||
@@ -144,10 +146,10 @@ type
 | 
			
		||||
    word,
 | 
			
		||||
    character,
 | 
			
		||||
    string,
 | 
			
		||||
    from,
 | 
			
		||||
    pointer,
 | 
			
		||||
    array,
 | 
			
		||||
    _defer,
 | 
			
		||||
    exclamation,
 | 
			
		||||
    arrow,
 | 
			
		||||
    trait,
 | 
			
		||||
    _program,
 | 
			
		||||
    _module,
 | 
			
		||||
    _import
 | 
			
		||||
@@ -322,7 +324,7 @@ end;
 | 
			
		||||
(* Reached the end of file. *)
 | 
			
		||||
proc transition_action_eof(lexer: ^Lexer, token: ^LexerToken);
 | 
			
		||||
begin
 | 
			
		||||
  token^.kind := LexerKind.eof
 | 
			
		||||
  token^.kind := LexerKind.unknown
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
proc increment(position: ^BufferPosition);
 | 
			
		||||
@@ -374,9 +376,6 @@ begin
 | 
			
		||||
  if (lexer^.start.iterator^ = '>') & (lexer^.current.iterator^ = '=') then
 | 
			
		||||
    token^.kind := LexerKind.greater_equal
 | 
			
		||||
  end;
 | 
			
		||||
  if (lexer^.start.iterator^ = '.') & (lexer^.current.iterator^ = '.') then
 | 
			
		||||
    token^.kind := LexerKind.range
 | 
			
		||||
  end;
 | 
			
		||||
  if (lexer^.start.iterator^ = ':') & (lexer^.current.iterator^ = '=') then
 | 
			
		||||
    token^.kind := LexerKind.assignment
 | 
			
		||||
  end;
 | 
			
		||||
@@ -409,16 +408,16 @@ begin
 | 
			
		||||
  if lexer^.start.iterator^ = '"' then
 | 
			
		||||
    text_length := cast(lexer^.current.iterator - lexer^.start.iterator + 1: Word);
 | 
			
		||||
 | 
			
		||||
    token^.stringKind := String(malloc(text_length), text_length);
 | 
			
		||||
    memcpy(@token^.stringKind.ptr, lexer^.start.iterator, text_length);
 | 
			
		||||
    token^.value.stringKind := String(cast(malloc(text_length): ^Char), text_length);
 | 
			
		||||
    memcpy(cast(token^.value.stringKind.ptr: Pointer), cast(lexer^.start.iterator: Pointer), text_length);
 | 
			
		||||
 | 
			
		||||
    token^.kind := LexerKind.character
 | 
			
		||||
  end;
 | 
			
		||||
  if lexer^.start.iterator^ = "'" then
 | 
			
		||||
  if lexer^.start.iterator^ = '\'' then
 | 
			
		||||
    text_length := cast(lexer^.current.iterator - lexer^.start.iterator + 1: Word);
 | 
			
		||||
 | 
			
		||||
    token^.stringKind := String(malloc(text_length), text_length);
 | 
			
		||||
    memcpy(@token^.stringKind.ptr, lexer^.start.iterator, text_length);
 | 
			
		||||
    token^.value.stringKind := String(cast(malloc(text_length): ^Char), text_length);
 | 
			
		||||
    memcpy(cast(token^.value.stringKind.ptr: Pointer), cast(lexer^.start.iterator: Pointer), text_length);
 | 
			
		||||
 | 
			
		||||
    token^.kind := LexerKind.string
 | 
			
		||||
  end;
 | 
			
		||||
@@ -430,8 +429,8 @@ proc transition_action_key_id(lexer: ^Lexer, token: ^LexerToken);
 | 
			
		||||
begin
 | 
			
		||||
  token^.kind := LexerKind.identifier;
 | 
			
		||||
 | 
			
		||||
  token^.identifierKind[1] := cast(lexer^.current.iterator - lexer^.start.iterator: Char);
 | 
			
		||||
  memcpy(@token^.identifierKind[2], lexer^.start.iterator, cast(token^.identifierKind[1]: Word));
 | 
			
		||||
  token^.value.identifierKind[1] := cast(lexer^.current.iterator - lexer^.start.iterator: Char);
 | 
			
		||||
  memcpy(cast(@token^.value.identifierKind[2]: Pointer), cast(lexer^.start.iterator: Pointer), cast(token^.value.identifierKind[1]: Word));
 | 
			
		||||
 | 
			
		||||
  if compare_keyword("program", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind._program
 | 
			
		||||
@@ -502,28 +501,19 @@ begin
 | 
			
		||||
  if compare_keyword("OF", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind._of
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("FROM", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind.from
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("module", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind._module
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("xor", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind._xor
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("POINTER", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind.pointer
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("ARRAY", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind.array
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("TRUE", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind.boolean;
 | 
			
		||||
    token^.booleanKind := true
 | 
			
		||||
    token^.value.booleanKind := true
 | 
			
		||||
  end;
 | 
			
		||||
  if compare_keyword("FALSE", lexer^.start, lexer^.current.iterator) then
 | 
			
		||||
    token^.kind := LexerKind.boolean;
 | 
			
		||||
    token^.booleanKind := false
 | 
			
		||||
    token^.value.booleanKind := false
 | 
			
		||||
  end
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
@@ -541,7 +531,7 @@ begin
 | 
			
		||||
    token^.kind := LexerKind.comma
 | 
			
		||||
  end;
 | 
			
		||||
  if lexer^.current.iterator^ = '~' then
 | 
			
		||||
    token^.kind := LexerKind.tilde
 | 
			
		||||
    token^.kind := LexerKind.not
 | 
			
		||||
  end;
 | 
			
		||||
  if lexer^.current.iterator^ = ')' then
 | 
			
		||||
    token^.kind := LexerKind.right_paren
 | 
			
		||||
@@ -562,7 +552,7 @@ begin
 | 
			
		||||
    token^.kind := LexerKind.plus
 | 
			
		||||
  end;
 | 
			
		||||
  if lexer^.current.iterator^ = '*' then
 | 
			
		||||
    token^.kind := LexerKind.asterisk
 | 
			
		||||
    token^.kind := LexerKind.multiplication
 | 
			
		||||
  end;
 | 
			
		||||
  if lexer^.current.iterator^ = '/' then
 | 
			
		||||
    token^.kind := LexerKind.division
 | 
			
		||||
@@ -576,25 +566,24 @@ begin
 | 
			
		||||
  if lexer^.current.iterator^ = '|' then
 | 
			
		||||
    token^.kind := LexerKind.pipe
 | 
			
		||||
  end;
 | 
			
		||||
  increment(@lexer^.current.iterator)
 | 
			
		||||
  increment(@lexer^.current)
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
(* Handle an integer literal. *)
 | 
			
		||||
proc transition_action_integer(lexer: ^Lexer, token: ^LexerToken);
 | 
			
		||||
var
 | 
			
		||||
  buffer: String;
 | 
			
		||||
  integer_length: Int;
 | 
			
		||||
  integer_length: Word;
 | 
			
		||||
  found: Bool;
 | 
			
		||||
begin
 | 
			
		||||
  token^.kind := LexerKind.integer;
 | 
			
		||||
 | 
			
		||||
  integer_length := lexer^.current.iterator - lexer^.start.iterator;
 | 
			
		||||
  memset(@token^.identifierKind, 0, #size(Identifier));
 | 
			
		||||
  memcpy(@token^.identifierKind[1], lexer^.start.iterator, integer_length);
 | 
			
		||||
  integer_length := cast(lexer^.current.iterator - lexer^.start.iterator: Word);
 | 
			
		||||
  memset(cast(token^.value.identifierKind.ptr: Pointer), 0, #size(Identifier));
 | 
			
		||||
  memcpy(cast(@token^.value.identifierKind[1]: Pointer), cast(lexer^.start.iterator: Pointer), integer_length);
 | 
			
		||||
 | 
			
		||||
  buffer := InitStringCharStar(@token^.identifierKind[1]);
 | 
			
		||||
  token^.integerKind := StringToInteger(buffer, 10, found);
 | 
			
		||||
  buffer := KillString(buffer)
 | 
			
		||||
  token^.value.identifierKind[cast(token^.value.identifierKind[1]: Int) + 2] := '\0';
 | 
			
		||||
  token^.value.integerKind := atoi(@token^.value.identifierKind[2])
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
proc set_default_transition(current_state: TransitionState, default_action: TransitionAction, next_state: TransitionState) -> Int;
 | 
			
		||||
@@ -893,10 +882,10 @@ end;
 | 
			
		||||
proc lexer_make*(lexer: ^Lexer, input: ^FILE);
 | 
			
		||||
begin
 | 
			
		||||
  lexer^.input := input;
 | 
			
		||||
  lexer^.length := 0;
 | 
			
		||||
  lexer^.length := 0u;
 | 
			
		||||
 | 
			
		||||
  lexer^.buffer := malloc(CHUNK_SIZE);
 | 
			
		||||
  memset(lexer^.buffer, 0, CHUNK_SIZE);
 | 
			
		||||
  lexer^.buffer := cast(malloc(CHUNK_SIZE): ^Char);
 | 
			
		||||
  memset(cast(lexer^.buffer: Pointer), 0, CHUNK_SIZE);
 | 
			
		||||
  lexer^.size := CHUNK_SIZE
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
@@ -937,10 +926,10 @@ proc lexer_lex*(lexer: ^Lexer) -> LexerToken;
 | 
			
		||||
var
 | 
			
		||||
  result: LexerToken;
 | 
			
		||||
begin
 | 
			
		||||
  if lexer^.length = 0 then
 | 
			
		||||
    lexer^.length := ReadNBytes(lexer^.input, CHUNK_SIZE, lexer^.buffer);
 | 
			
		||||
    lexer^.current.location.column := 1;
 | 
			
		||||
    lexer^.current.location.line := 1;
 | 
			
		||||
  if lexer^.length = 0u then
 | 
			
		||||
    lexer^.length := fread(cast(lexer^.buffer: Pointer), CHUNK_SIZE, 1u, lexer^.input);
 | 
			
		||||
    lexer^.current.location.column := 1u;
 | 
			
		||||
    lexer^.current.location.line := 1u;
 | 
			
		||||
    lexer^.current.iterator := lexer^.buffer
 | 
			
		||||
  end;
 | 
			
		||||
  lexer^.start := lexer^.current;
 | 
			
		||||
@@ -951,7 +940,7 @@ end;
 | 
			
		||||
 | 
			
		||||
proc lexer_destroy*(lexer: ^Lexer);
 | 
			
		||||
begin
 | 
			
		||||
  free(lexer^.buffer)
 | 
			
		||||
  free(cast(lexer^.buffer: Pointer))
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
proc lexer_initialize();
 | 
			
		||||
							
								
								
									
										362
									
								
								source/main.elna
									
									
									
									
									
								
							
							
						
						
									
										362
									
								
								source/main.elna
									
									
									
									
									
								
							@@ -3,87 +3,9 @@
 | 
			
		||||
  obtain one at https://mozilla.org/MPL/2.0/. *)
 | 
			
		||||
program;
 | 
			
		||||
 | 
			
		||||
import cstdio, cctype, common, command_line_interface, Lexer;
 | 
			
		||||
 | 
			
		||||
const
 | 
			
		||||
  SEEK_SET* := 0;
 | 
			
		||||
  SEEK_CUR* := 1;
 | 
			
		||||
  SEEK_END* := 2;
 | 
			
		||||
  STDIN := 0;
 | 
			
		||||
  STDOUT := 1;
 | 
			
		||||
  STDERR := 2;
 | 
			
		||||
import cstdio, cctype, common, command_line_interface, lexer;
 | 
			
		||||
 | 
			
		||||
type
 | 
			
		||||
  TokenKind* = (
 | 
			
		||||
    unknown,
 | 
			
		||||
    identifier,
 | 
			
		||||
    _if,
 | 
			
		||||
    _then,
 | 
			
		||||
    _else,
 | 
			
		||||
    _elsif,
 | 
			
		||||
    _while,
 | 
			
		||||
    _do,
 | 
			
		||||
    _proc,
 | 
			
		||||
    _begin,
 | 
			
		||||
    _end,
 | 
			
		||||
    _extern,
 | 
			
		||||
    _const,
 | 
			
		||||
    _var,
 | 
			
		||||
    _case,
 | 
			
		||||
    _of,
 | 
			
		||||
    _type,
 | 
			
		||||
    _record,
 | 
			
		||||
    _union,
 | 
			
		||||
    pipe,
 | 
			
		||||
    to,
 | 
			
		||||
    boolean,
 | 
			
		||||
    null,
 | 
			
		||||
    and,
 | 
			
		||||
    _or,
 | 
			
		||||
    not,
 | 
			
		||||
    _return,
 | 
			
		||||
    _cast,
 | 
			
		||||
    shift_left,
 | 
			
		||||
    shift_right,
 | 
			
		||||
    left_paren,
 | 
			
		||||
    right_paren,
 | 
			
		||||
    left_square,
 | 
			
		||||
    right_square,
 | 
			
		||||
    greater_equal,
 | 
			
		||||
    less_equal,
 | 
			
		||||
    greater_than,
 | 
			
		||||
    less_than,
 | 
			
		||||
    not_equal,
 | 
			
		||||
    equal,
 | 
			
		||||
    semicolon,
 | 
			
		||||
    dot,
 | 
			
		||||
    comma,
 | 
			
		||||
    plus,
 | 
			
		||||
    minus,
 | 
			
		||||
    multiplication,
 | 
			
		||||
    division,
 | 
			
		||||
    remainder,
 | 
			
		||||
    assignment,
 | 
			
		||||
    colon,
 | 
			
		||||
    hat,
 | 
			
		||||
    at,
 | 
			
		||||
    comment,
 | 
			
		||||
    integer,
 | 
			
		||||
    word,
 | 
			
		||||
    character,
 | 
			
		||||
    string,
 | 
			
		||||
    _defer,
 | 
			
		||||
    exclamation,
 | 
			
		||||
    arrow,
 | 
			
		||||
    trait,
 | 
			
		||||
    _program,
 | 
			
		||||
    _module,
 | 
			
		||||
    _import
 | 
			
		||||
  );
 | 
			
		||||
  Location* = record
 | 
			
		||||
    first: TextLocation;
 | 
			
		||||
    last: TextLocation
 | 
			
		||||
  end;
 | 
			
		||||
  SourceFile* = record
 | 
			
		||||
    buffer: [1024]Char;
 | 
			
		||||
    handle: ^FILE;
 | 
			
		||||
@@ -104,14 +26,13 @@ type
 | 
			
		||||
    head: proc(Pointer) -> Char
 | 
			
		||||
  end;
 | 
			
		||||
  Token* = record
 | 
			
		||||
    kind: TokenKind;
 | 
			
		||||
    kind: LexerKind;
 | 
			
		||||
    value: union
 | 
			
		||||
      int_value: Int;
 | 
			
		||||
      string: String;
 | 
			
		||||
      boolean_value: Bool;
 | 
			
		||||
      char_value: Char
 | 
			
		||||
    end;
 | 
			
		||||
    location: Location
 | 
			
		||||
    end
 | 
			
		||||
  end;
 | 
			
		||||
  Tokenizer* = record
 | 
			
		||||
    length: Word;
 | 
			
		||||
@@ -421,63 +342,63 @@ var
 | 
			
		||||
  current_token: Token;
 | 
			
		||||
begin
 | 
			
		||||
  if token_content = "if" then
 | 
			
		||||
    current_token.kind := TokenKind._if
 | 
			
		||||
    current_token.kind := LexerKind._if
 | 
			
		||||
  elsif token_content = "then" then
 | 
			
		||||
    current_token.kind := TokenKind._then
 | 
			
		||||
    current_token.kind := LexerKind._then
 | 
			
		||||
  elsif token_content = "else" then
 | 
			
		||||
    current_token.kind := TokenKind._else
 | 
			
		||||
    current_token.kind := LexerKind._else
 | 
			
		||||
  elsif token_content = "elsif" then
 | 
			
		||||
    current_token.kind := TokenKind._elsif
 | 
			
		||||
    current_token.kind := LexerKind._elsif
 | 
			
		||||
  elsif token_content = "while" then
 | 
			
		||||
    current_token.kind := TokenKind._while
 | 
			
		||||
    current_token.kind := LexerKind._while
 | 
			
		||||
  elsif token_content = "do" then
 | 
			
		||||
    current_token.kind := TokenKind._do
 | 
			
		||||
    current_token.kind := LexerKind._do
 | 
			
		||||
  elsif token_content = "proc" then
 | 
			
		||||
    current_token.kind := TokenKind._proc
 | 
			
		||||
    current_token.kind := LexerKind._proc
 | 
			
		||||
  elsif token_content = "begin" then
 | 
			
		||||
    current_token.kind := TokenKind._begin
 | 
			
		||||
    current_token.kind := LexerKind._begin
 | 
			
		||||
  elsif token_content = "end" then
 | 
			
		||||
    current_token.kind := TokenKind._end
 | 
			
		||||
    current_token.kind := LexerKind._end
 | 
			
		||||
  elsif token_content = "extern" then
 | 
			
		||||
    current_token.kind := TokenKind._extern
 | 
			
		||||
    current_token.kind := LexerKind._extern
 | 
			
		||||
  elsif token_content = "const" then
 | 
			
		||||
    current_token.kind := TokenKind._const
 | 
			
		||||
    current_token.kind := LexerKind._const
 | 
			
		||||
  elsif token_content = "var" then
 | 
			
		||||
    current_token.kind := TokenKind._var
 | 
			
		||||
    current_token.kind := LexerKind._var
 | 
			
		||||
  elsif token_content = "case" then
 | 
			
		||||
    current_token.kind := TokenKind._case
 | 
			
		||||
    current_token.kind := LexerKind._case
 | 
			
		||||
  elsif token_content = "of" then
 | 
			
		||||
    current_token.kind := TokenKind._of
 | 
			
		||||
    current_token.kind := LexerKind._of
 | 
			
		||||
  elsif token_content = "type" then
 | 
			
		||||
    current_token.kind := TokenKind._type
 | 
			
		||||
    current_token.kind := LexerKind._type
 | 
			
		||||
  elsif token_content = "record" then
 | 
			
		||||
    current_token.kind := TokenKind._record
 | 
			
		||||
    current_token.kind := LexerKind._record
 | 
			
		||||
  elsif token_content = "union" then
 | 
			
		||||
    current_token.kind := TokenKind._union
 | 
			
		||||
    current_token.kind := LexerKind._union
 | 
			
		||||
  elsif token_content = "true" then
 | 
			
		||||
    current_token.kind := TokenKind.boolean;
 | 
			
		||||
    current_token.kind := LexerKind.boolean;
 | 
			
		||||
    current_token.value.boolean_value := true
 | 
			
		||||
  elsif token_content = "false" then
 | 
			
		||||
    current_token.kind := TokenKind.boolean;
 | 
			
		||||
    current_token.kind := LexerKind.boolean;
 | 
			
		||||
    current_token.value.boolean_value := false
 | 
			
		||||
  elsif token_content = "nil" then
 | 
			
		||||
    current_token.kind := TokenKind.null
 | 
			
		||||
    current_token.kind := LexerKind.null
 | 
			
		||||
  elsif token_content = "or" then
 | 
			
		||||
    current_token.kind := TokenKind._or
 | 
			
		||||
    current_token.kind := LexerKind._or
 | 
			
		||||
  elsif token_content = "return" then
 | 
			
		||||
    current_token.kind := TokenKind._return
 | 
			
		||||
    current_token.kind := LexerKind._return
 | 
			
		||||
  elsif token_content = "cast" then
 | 
			
		||||
    current_token.kind := TokenKind._cast
 | 
			
		||||
    current_token.kind := LexerKind._cast
 | 
			
		||||
  elsif token_content = "defer" then
 | 
			
		||||
    current_token.kind := TokenKind._defer
 | 
			
		||||
    current_token.kind := LexerKind._defer
 | 
			
		||||
  elsif token_content = "program" then
 | 
			
		||||
    current_token.kind := TokenKind._program
 | 
			
		||||
    current_token.kind := LexerKind._program
 | 
			
		||||
  elsif token_content = "module" then
 | 
			
		||||
    current_token.kind := TokenKind._module
 | 
			
		||||
    current_token.kind := LexerKind._module
 | 
			
		||||
  elsif token_content = "import" then
 | 
			
		||||
    current_token.kind := TokenKind._import
 | 
			
		||||
    current_token.kind := LexerKind._import
 | 
			
		||||
  else
 | 
			
		||||
    current_token.kind := TokenKind.identifier;
 | 
			
		||||
    current_token.kind := LexerKind.identifier;
 | 
			
		||||
    current_token.value.string := string_dup(token_content)
 | 
			
		||||
  end;
 | 
			
		||||
 | 
			
		||||
@@ -500,7 +421,7 @@ var
 | 
			
		||||
  current_token: Token;
 | 
			
		||||
  first_char: Char;
 | 
			
		||||
begin
 | 
			
		||||
  current_token.kind := TokenKind.unknown;
 | 
			
		||||
  current_token.kind := LexerKind.unknown;
 | 
			
		||||
 | 
			
		||||
  first_char := source_code_head(source_code);
 | 
			
		||||
 | 
			
		||||
@@ -511,158 +432,158 @@ begin
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
    lexer_identifier(@source_code, token_buffer);
 | 
			
		||||
 | 
			
		||||
    current_token.kind := TokenKind.trait;
 | 
			
		||||
    current_token.kind := LexerKind.trait;
 | 
			
		||||
    current_token.value.string := string_dup(string_buffer_clear(token_buffer))
 | 
			
		||||
  elsif isdigit(cast(first_char: Int)) <> 0 then
 | 
			
		||||
    lexer_number(@source_code, @current_token.value.int_value);
 | 
			
		||||
 | 
			
		||||
    if source_code_expect(@source_code, 'u') then
 | 
			
		||||
      current_token.kind := TokenKind.word;
 | 
			
		||||
      current_token.kind := LexerKind.word;
 | 
			
		||||
        source_code_advance(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.integer
 | 
			
		||||
      current_token.kind := LexerKind.integer
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '(' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if source_code_empty(@source_code) then
 | 
			
		||||
      current_token.kind := TokenKind.left_paren
 | 
			
		||||
      current_token.kind := LexerKind.left_paren
 | 
			
		||||
    elsif source_code_head(source_code) = '*' then
 | 
			
		||||
      source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
      if lexer_comment(@source_code, token_buffer) then
 | 
			
		||||
        current_token.value.string := string_dup(string_buffer_clear(token_buffer));
 | 
			
		||||
        current_token.kind := TokenKind.comment
 | 
			
		||||
        current_token.kind := LexerKind.comment
 | 
			
		||||
      else
 | 
			
		||||
        current_token.kind := TokenKind.unknown
 | 
			
		||||
        current_token.kind := LexerKind.unknown
 | 
			
		||||
      end
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.left_paren
 | 
			
		||||
      current_token.kind := LexerKind.left_paren
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = ')' then
 | 
			
		||||
    current_token.kind := TokenKind.right_paren;
 | 
			
		||||
    current_token.kind := LexerKind.right_paren;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '\'' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if lexer_character(@source_code, @current_token.value.char_value) & source_code_expect(@source_code, '\'') then
 | 
			
		||||
      current_token.kind := TokenKind.character;
 | 
			
		||||
      current_token.kind := LexerKind.character;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.unknown
 | 
			
		||||
      current_token.kind := LexerKind.unknown
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '"' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if lexer_string(@source_code, token_buffer) then
 | 
			
		||||
      current_token.kind := TokenKind.string;
 | 
			
		||||
      current_token.kind := LexerKind.string;
 | 
			
		||||
      current_token.value.string := string_dup(string_buffer_clear(token_buffer))
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.unknown
 | 
			
		||||
      current_token.kind := LexerKind.unknown
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '[' then
 | 
			
		||||
    current_token.kind := TokenKind.left_square;
 | 
			
		||||
    current_token.kind := LexerKind.left_square;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = ']' then
 | 
			
		||||
    current_token.kind := TokenKind.right_square;
 | 
			
		||||
    current_token.kind := LexerKind.right_square;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '>' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if source_code_empty(@source_code) then
 | 
			
		||||
      current_token.kind := TokenKind.greater_than
 | 
			
		||||
      current_token.kind := LexerKind.greater_than
 | 
			
		||||
    elsif source_code_head(source_code) = '=' then
 | 
			
		||||
      current_token.kind := TokenKind.greater_equal;
 | 
			
		||||
      current_token.kind := LexerKind.greater_equal;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    elsif source_code_head(source_code) = '>' then
 | 
			
		||||
      current_token.kind := TokenKind.shift_right;
 | 
			
		||||
      current_token.kind := LexerKind.shift_right;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.greater_than
 | 
			
		||||
      current_token.kind := LexerKind.greater_than
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '<' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if source_code_empty(@source_code) then
 | 
			
		||||
      current_token.kind := TokenKind.less_than
 | 
			
		||||
      current_token.kind := LexerKind.less_than
 | 
			
		||||
    elsif source_code_head(source_code) = '=' then
 | 
			
		||||
      current_token.kind := TokenKind.less_equal;
 | 
			
		||||
      current_token.kind := LexerKind.less_equal;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    elsif source_code_head(source_code) = '<' then
 | 
			
		||||
      current_token.kind := TokenKind.shift_left;
 | 
			
		||||
      current_token.kind := LexerKind.shift_left;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    elsif source_code_head(source_code) = '>' then
 | 
			
		||||
      current_token.kind := TokenKind.not_equal;
 | 
			
		||||
      current_token.kind := LexerKind.not_equal;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.less_than
 | 
			
		||||
      current_token.kind := LexerKind.less_than
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '=' then
 | 
			
		||||
    current_token.kind := TokenKind.equal;
 | 
			
		||||
    current_token.kind := LexerKind.equal;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = ';' then
 | 
			
		||||
    current_token.kind := TokenKind.semicolon;
 | 
			
		||||
    current_token.kind := LexerKind.semicolon;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '.' then
 | 
			
		||||
    current_token.kind := TokenKind.dot;
 | 
			
		||||
    current_token.kind := LexerKind.dot;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = ',' then
 | 
			
		||||
    current_token.kind := TokenKind.comma;
 | 
			
		||||
    current_token.kind := LexerKind.comma;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '+' then
 | 
			
		||||
    current_token.kind := TokenKind.plus;
 | 
			
		||||
    current_token.kind := LexerKind.plus;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '-' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if source_code_empty(@source_code) then
 | 
			
		||||
      current_token.kind := TokenKind.minus
 | 
			
		||||
      current_token.kind := LexerKind.minus
 | 
			
		||||
    elsif source_code_head(source_code) = '>' then
 | 
			
		||||
      current_token.kind := TokenKind.arrow;
 | 
			
		||||
      current_token.kind := LexerKind.arrow;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.minus
 | 
			
		||||
      current_token.kind := LexerKind.minus
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '*' then
 | 
			
		||||
    current_token.kind := TokenKind.multiplication;
 | 
			
		||||
    current_token.kind := LexerKind.multiplication;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '/' then
 | 
			
		||||
    current_token.kind := TokenKind.division;
 | 
			
		||||
    current_token.kind := LexerKind.division;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '%' then
 | 
			
		||||
    current_token.kind := TokenKind.remainder;
 | 
			
		||||
    current_token.kind := LexerKind.remainder;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = ':' then
 | 
			
		||||
    source_code_advance(@source_code);
 | 
			
		||||
 | 
			
		||||
    if source_code_empty(@source_code) then
 | 
			
		||||
      current_token.kind := TokenKind.colon
 | 
			
		||||
      current_token.kind := LexerKind.colon
 | 
			
		||||
    elsif source_code_head(source_code) = '=' then
 | 
			
		||||
      current_token.kind := TokenKind.assignment;
 | 
			
		||||
      current_token.kind := LexerKind.assignment;
 | 
			
		||||
      source_code_advance(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
      current_token.kind := TokenKind.colon
 | 
			
		||||
      current_token.kind := LexerKind.colon
 | 
			
		||||
    end
 | 
			
		||||
  elsif first_char = '^' then
 | 
			
		||||
    current_token.kind := TokenKind.hat;
 | 
			
		||||
    current_token.kind := LexerKind.hat;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '@' then
 | 
			
		||||
    current_token.kind := TokenKind.at;
 | 
			
		||||
    current_token.kind := LexerKind.at;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '!' then
 | 
			
		||||
    current_token.kind := TokenKind.exclamation;
 | 
			
		||||
    current_token.kind := LexerKind.exclamation;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '&' then
 | 
			
		||||
    current_token.kind := TokenKind.and;
 | 
			
		||||
    current_token.kind := LexerKind.and;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '~' then
 | 
			
		||||
    current_token.kind := TokenKind.not;
 | 
			
		||||
    current_token.kind := LexerKind.not;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  elsif first_char = '|' then
 | 
			
		||||
    current_token.kind := TokenKind.pipe;
 | 
			
		||||
    current_token.kind := LexerKind.pipe;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  else
 | 
			
		||||
    current_token.kind := TokenKind.unknown;
 | 
			
		||||
    current_token.kind := LexerKind.unknown;
 | 
			
		||||
    source_code_advance(@source_code)
 | 
			
		||||
  end;
 | 
			
		||||
 | 
			
		||||
@@ -684,7 +605,7 @@ begin
 | 
			
		||||
  while ~source_code_empty(@source_code) do
 | 
			
		||||
    current_token := lexer_next(source_code, @token_buffer);
 | 
			
		||||
 | 
			
		||||
    if current_token.kind <> TokenKind.unknown then
 | 
			
		||||
    if current_token.kind <> LexerKind.unknown then
 | 
			
		||||
      lexer_add_token(@lexer, current_token);
 | 
			
		||||
        lexer_spaces(@source_code)
 | 
			
		||||
    else
 | 
			
		||||
@@ -711,142 +632,142 @@ begin
 | 
			
		||||
    current_token := tokens + i;
 | 
			
		||||
 | 
			
		||||
    case current_token^.kind of
 | 
			
		||||
      TokenKind._if:
 | 
			
		||||
      LexerKind._if:
 | 
			
		||||
        write_s("IF")
 | 
			
		||||
      | TokenKind._then:
 | 
			
		||||
      | LexerKind._then:
 | 
			
		||||
        write_s("THEN")
 | 
			
		||||
      | TokenKind._else:
 | 
			
		||||
      | LexerKind._else:
 | 
			
		||||
        write_s("ELSE")
 | 
			
		||||
      | TokenKind._elsif:
 | 
			
		||||
      | LexerKind._elsif:
 | 
			
		||||
        write_s("ELSIF")
 | 
			
		||||
      | TokenKind._while:
 | 
			
		||||
      | LexerKind._while:
 | 
			
		||||
        write_s("WHILE")
 | 
			
		||||
      | TokenKind._do:
 | 
			
		||||
      | LexerKind._do:
 | 
			
		||||
        write_s("DO")
 | 
			
		||||
      | TokenKind._proc:
 | 
			
		||||
      | LexerKind._proc:
 | 
			
		||||
        write_s("PROC")
 | 
			
		||||
      | TokenKind._begin:
 | 
			
		||||
      | LexerKind._begin:
 | 
			
		||||
        write_s("BEGIN")
 | 
			
		||||
      | TokenKind._end:
 | 
			
		||||
      | LexerKind._end:
 | 
			
		||||
        write_s("END")
 | 
			
		||||
      | TokenKind._extern:
 | 
			
		||||
      | LexerKind._extern:
 | 
			
		||||
        write_s("EXTERN")
 | 
			
		||||
      | TokenKind._const:
 | 
			
		||||
      | LexerKind._const:
 | 
			
		||||
        write_s("CONST")
 | 
			
		||||
      | TokenKind._var:
 | 
			
		||||
      | LexerKind._var:
 | 
			
		||||
        write_s("VAR")
 | 
			
		||||
     | TokenKind._case:
 | 
			
		||||
     | LexerKind._case:
 | 
			
		||||
        write_s("CASE")
 | 
			
		||||
      | TokenKind._of:
 | 
			
		||||
      | LexerKind._of:
 | 
			
		||||
        write_s("OF")
 | 
			
		||||
      | TokenKind._type:
 | 
			
		||||
      | LexerKind._type:
 | 
			
		||||
        write_s("TYPE")
 | 
			
		||||
      | TokenKind._record:
 | 
			
		||||
      | LexerKind._record:
 | 
			
		||||
        write_s("RECORD")
 | 
			
		||||
      | TokenKind._union:
 | 
			
		||||
      | LexerKind._union:
 | 
			
		||||
        write_s("UNION")
 | 
			
		||||
      | TokenKind.pipe:
 | 
			
		||||
      | LexerKind.pipe:
 | 
			
		||||
        write_s("|")
 | 
			
		||||
      | TokenKind.to:
 | 
			
		||||
      | LexerKind.to:
 | 
			
		||||
        write_s("TO")
 | 
			
		||||
      | TokenKind.boolean:
 | 
			
		||||
      | LexerKind.boolean:
 | 
			
		||||
        write_s("BOOLEAN<");
 | 
			
		||||
        write_b(current_token^.value.boolean_value);
 | 
			
		||||
        write_c('>')
 | 
			
		||||
      | TokenKind.null:
 | 
			
		||||
      | LexerKind.null:
 | 
			
		||||
        write_s("NIL")
 | 
			
		||||
      | TokenKind.and:
 | 
			
		||||
      | LexerKind.and:
 | 
			
		||||
        write_s("&")
 | 
			
		||||
      | TokenKind._or:
 | 
			
		||||
      | LexerKind._or:
 | 
			
		||||
        write_s("OR")
 | 
			
		||||
      | TokenKind.not:
 | 
			
		||||
      | LexerKind.not:
 | 
			
		||||
        write_s("~")
 | 
			
		||||
      | TokenKind._return:
 | 
			
		||||
      | LexerKind._return:
 | 
			
		||||
        write_s("RETURN")
 | 
			
		||||
      | TokenKind._cast:
 | 
			
		||||
      | LexerKind._cast:
 | 
			
		||||
        write_s("CAST")
 | 
			
		||||
      | TokenKind.shift_left:
 | 
			
		||||
      | LexerKind.shift_left:
 | 
			
		||||
        write_s("<<")
 | 
			
		||||
      | TokenKind.shift_right:
 | 
			
		||||
      | LexerKind.shift_right:
 | 
			
		||||
        write_s(">>")
 | 
			
		||||
      | TokenKind.identifier:
 | 
			
		||||
      | LexerKind.identifier:
 | 
			
		||||
        write_c('<');
 | 
			
		||||
        write_s(current_token^.value.string);
 | 
			
		||||
        write_c('>')
 | 
			
		||||
      | TokenKind.trait:
 | 
			
		||||
      | LexerKind.trait:
 | 
			
		||||
        write_c('#');
 | 
			
		||||
        write_s(current_token^.value.string)
 | 
			
		||||
      | TokenKind.left_paren:
 | 
			
		||||
      | LexerKind.left_paren:
 | 
			
		||||
        write_s("(")
 | 
			
		||||
      | TokenKind.right_paren:
 | 
			
		||||
      | LexerKind.right_paren:
 | 
			
		||||
        write_s(")")
 | 
			
		||||
      | TokenKind.left_square:
 | 
			
		||||
      | LexerKind.left_square:
 | 
			
		||||
        write_s("[")
 | 
			
		||||
      | TokenKind.right_square:
 | 
			
		||||
      | LexerKind.right_square:
 | 
			
		||||
        write_s("]")
 | 
			
		||||
      | TokenKind.greater_equal:
 | 
			
		||||
      | LexerKind.greater_equal:
 | 
			
		||||
        write_s(">=")
 | 
			
		||||
      | TokenKind.less_equal:
 | 
			
		||||
      | LexerKind.less_equal:
 | 
			
		||||
        write_s("<=")
 | 
			
		||||
      | TokenKind.greater_than:
 | 
			
		||||
      | LexerKind.greater_than:
 | 
			
		||||
        write_s(">")
 | 
			
		||||
      | TokenKind.less_than:
 | 
			
		||||
      | LexerKind.less_than:
 | 
			
		||||
        write_s("<")
 | 
			
		||||
      | TokenKind.equal:
 | 
			
		||||
      | LexerKind.equal:
 | 
			
		||||
        write_s("=")
 | 
			
		||||
      | TokenKind.not_equal:
 | 
			
		||||
      | LexerKind.not_equal:
 | 
			
		||||
        write_s("<>")
 | 
			
		||||
      | TokenKind.semicolon:
 | 
			
		||||
      | LexerKind.semicolon:
 | 
			
		||||
        write_c(';')
 | 
			
		||||
      | TokenKind.dot:
 | 
			
		||||
      | LexerKind.dot:
 | 
			
		||||
        write_c('.')
 | 
			
		||||
      | TokenKind.comma:
 | 
			
		||||
      | LexerKind.comma:
 | 
			
		||||
        write_c(',')
 | 
			
		||||
      | TokenKind.plus:
 | 
			
		||||
      | LexerKind.plus:
 | 
			
		||||
        write_c('+')
 | 
			
		||||
      | TokenKind.minus:
 | 
			
		||||
      | LexerKind.minus:
 | 
			
		||||
        write_c('-')
 | 
			
		||||
      | TokenKind.multiplication:
 | 
			
		||||
      | LexerKind.multiplication:
 | 
			
		||||
        write_c('*')
 | 
			
		||||
      | TokenKind.division:
 | 
			
		||||
      | LexerKind.division:
 | 
			
		||||
        write_c('/')
 | 
			
		||||
      | TokenKind.remainder:
 | 
			
		||||
      | LexerKind.remainder:
 | 
			
		||||
        write_c('%')
 | 
			
		||||
      | TokenKind.assignment:
 | 
			
		||||
      | LexerKind.assignment:
 | 
			
		||||
        write_s(":=")
 | 
			
		||||
      | TokenKind.colon:
 | 
			
		||||
      | LexerKind.colon:
 | 
			
		||||
        write_c(':')
 | 
			
		||||
      | TokenKind.hat:
 | 
			
		||||
      | LexerKind.hat:
 | 
			
		||||
        write_c('^')
 | 
			
		||||
      | TokenKind.at:
 | 
			
		||||
      | LexerKind.at:
 | 
			
		||||
        write_c('@')
 | 
			
		||||
      | TokenKind.comment:
 | 
			
		||||
      | LexerKind.comment:
 | 
			
		||||
        write_s("(* COMMENT *)")
 | 
			
		||||
      | TokenKind.integer:
 | 
			
		||||
      | LexerKind.integer:
 | 
			
		||||
        write_c('<');
 | 
			
		||||
        write_i(current_token^.value.int_value);
 | 
			
		||||
        write_c('>')
 | 
			
		||||
      | TokenKind.word:
 | 
			
		||||
      | LexerKind.word:
 | 
			
		||||
        write_c('<');
 | 
			
		||||
        write_i(current_token^.value.int_value);
 | 
			
		||||
        write_s("u>")
 | 
			
		||||
      | TokenKind.character:
 | 
			
		||||
      | LexerKind.character:
 | 
			
		||||
        write_c('<');
 | 
			
		||||
        write_i(cast(current_token^.value.char_value: Int));
 | 
			
		||||
        write_s("c>")
 | 
			
		||||
      | TokenKind.string:
 | 
			
		||||
      | LexerKind.string:
 | 
			
		||||
        write_s("\"...\"")
 | 
			
		||||
      | TokenKind._defer:
 | 
			
		||||
      | LexerKind._defer:
 | 
			
		||||
        write_s("DEFER")
 | 
			
		||||
      | TokenKind.exclamation:
 | 
			
		||||
      | LexerKind.exclamation:
 | 
			
		||||
        write_c('!')
 | 
			
		||||
      | TokenKind.arrow:
 | 
			
		||||
      | LexerKind.arrow:
 | 
			
		||||
        write_s("->")
 | 
			
		||||
      | TokenKind._program:
 | 
			
		||||
      | LexerKind._program:
 | 
			
		||||
        write_s("PROGRAM")
 | 
			
		||||
      | TokenKind._module:
 | 
			
		||||
      | LexerKind._module:
 | 
			
		||||
      write_s("MODULE")
 | 
			
		||||
      | TokenKind._import:
 | 
			
		||||
      | LexerKind._import:
 | 
			
		||||
      write_s("IMPORT")
 | 
			
		||||
      else
 | 
			
		||||
        write_s("UNKNOWN<");
 | 
			
		||||
@@ -922,6 +843,5 @@ begin
 | 
			
		||||
  return return_code
 | 
			
		||||
end;
 | 
			
		||||
 | 
			
		||||
begin
 | 
			
		||||
  exit(process(count, parameters))
 | 
			
		||||
  return process(count, parameters)
 | 
			
		||||
end.
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user