Implement .max and .min type properties
This commit is contained in:
parent
b358f8ba27
commit
994b91e0e5
@ -89,12 +89,6 @@ const {
|
||||
var {
|
||||
return yy::parser::make_VAR(this->location);
|
||||
}
|
||||
array {
|
||||
return yy::parser::make_ARRAY(this->location);
|
||||
}
|
||||
of {
|
||||
return yy::parser::make_OF(this->location);
|
||||
}
|
||||
type {
|
||||
return yy::parser::make_TYPE(this->location);
|
||||
}
|
||||
@ -104,12 +98,6 @@ record {
|
||||
union {
|
||||
return yy::parser::make_UNION(this->location);
|
||||
}
|
||||
pointer {
|
||||
return yy::parser::make_POINTER(this->location);
|
||||
}
|
||||
to {
|
||||
return yy::parser::make_TO(this->location);
|
||||
}
|
||||
true {
|
||||
return yy::parser::make_BOOLEAN(true, this->location);
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ along with GCC; see the file COPYING3. If not see
|
||||
%token <std::string> STRING "string"
|
||||
%token <bool> BOOLEAN
|
||||
%token IF WHILE DO THEN ELSE ELSIF RETURN
|
||||
%token CONST VAR PROCEDURE ARRAY OF TYPE RECORD POINTER TO UNION
|
||||
%token CONST VAR PROCEDURE TYPE RECORD UNION
|
||||
%token BEGIN_BLOCK END_BLOCK EXTERN DEFER
|
||||
%token LEFT_PAREN RIGHT_PAREN LEFT_SQUARE RIGHT_SQUARE SEMICOLON DOT COMMA
|
||||
%token AND OR NOT CAST SHIFT_LEFT SHIFT_RIGHT
|
||||
@ -118,7 +118,7 @@ along with GCC; see the file COPYING3. If not see
|
||||
%type <std::vector<elna::boot::type_definition *>> type_definitions type_part;
|
||||
%type <elna::boot::block *> block;
|
||||
%type <elna::boot::field_t> field_declaration;
|
||||
%type <std::vector<std::pair<std::string, std::shared_ptr<elna::boot::top_type>>>> field_list;
|
||||
%type <std::vector<std::pair<std::string, std::shared_ptr<elna::boot::top_type>>>> optional_fields fields;
|
||||
%type <std::vector<elna::boot::conditional_statements *>> elsif_statement_list;
|
||||
%type <elna::boot::cast_expression *> cast_expression;
|
||||
%type <elna::boot::defer_statement *> defer_statement;
|
||||
@ -414,9 +414,9 @@ statement:
|
||||
}
|
||||
| defer_statement { $$ = $1; }
|
||||
statements:
|
||||
statement SEMICOLON statements
|
||||
statement statements
|
||||
{
|
||||
std::swap($$, $3);
|
||||
std::swap($$, $2);
|
||||
$$.emplace($$.cbegin(), $1);
|
||||
}
|
||||
| statement { $$.push_back($1); }
|
||||
@ -425,27 +425,30 @@ optional_statements:
|
||||
| /* no statements */ {}
|
||||
field_declaration:
|
||||
IDENTIFIER COLON type_expression { $$ = std::make_pair($1, $3); }
|
||||
field_list:
|
||||
field_declaration field_list
|
||||
fields:
|
||||
field_declaration fields
|
||||
{
|
||||
std::swap($$, $2);
|
||||
$$.emplace($$.cbegin(), $1);
|
||||
}
|
||||
| field_declaration { $$.emplace_back($1); }
|
||||
optional_fields:
|
||||
fields { std::swap($$, $1); }
|
||||
| /* no fields */ {}
|
||||
type_expression:
|
||||
ARRAY INTEGER OF type_expression
|
||||
LEFT_SQUARE INTEGER RIGHT_SQUARE type_expression
|
||||
{
|
||||
$$ = std::make_shared<elna::boot::array_type>(elna::boot::make_position(@1), $4, $2);
|
||||
}
|
||||
| POINTER TO type_expression
|
||||
| HAT type_expression
|
||||
{
|
||||
$$ = std::make_shared<elna::boot::pointer_type>(elna::boot::make_position(@1), $3);
|
||||
$$ = std::make_shared<elna::boot::pointer_type>(elna::boot::make_position(@1), $2);
|
||||
}
|
||||
| RECORD field_list END_BLOCK
|
||||
| RECORD optional_fields END_BLOCK
|
||||
{
|
||||
$$ = std::make_shared<elna::boot::record_type>(elna::boot::make_position(@1), std::move($2));
|
||||
}
|
||||
| UNION field_list END_BLOCK
|
||||
| UNION fields END_BLOCK
|
||||
{
|
||||
$$ = std::make_shared<elna::boot::union_type>(elna::boot::make_position(@1), std::move($2));
|
||||
}
|
||||
|
@ -30,6 +30,18 @@ namespace gcc
|
||||
return linemap_position_for_column(line_table, position->column);
|
||||
}
|
||||
|
||||
std::string print_aggregate_name(tree type, const std::string& kind_name)
|
||||
{
|
||||
if (TYPE_IDENTIFIER(type) == NULL_TREE)
|
||||
{
|
||||
return kind_name;
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::string(IDENTIFIER_POINTER(TYPE_IDENTIFIER(type)));
|
||||
}
|
||||
}
|
||||
|
||||
std::string print_type(tree type)
|
||||
{
|
||||
gcc_assert(TYPE_P(type));
|
||||
@ -68,7 +80,7 @@ namespace gcc
|
||||
}
|
||||
else if (is_pointer_type(type))
|
||||
{
|
||||
return std::string("pointer to " + print_type(TREE_TYPE(type)));
|
||||
return std::string("^" + print_type(TREE_TYPE(type)));
|
||||
}
|
||||
else if (is_procedure_type(type))
|
||||
{
|
||||
@ -100,11 +112,11 @@ namespace gcc
|
||||
}
|
||||
else if (TREE_CODE(type) == RECORD_TYPE)
|
||||
{
|
||||
return "record";
|
||||
return print_aggregate_name(type, "record");
|
||||
}
|
||||
else if (TREE_CODE(type) == UNION_TYPE)
|
||||
{
|
||||
return "union";
|
||||
return print_aggregate_name(type, "union");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -733,6 +733,7 @@ namespace gcc
|
||||
if (result)
|
||||
{
|
||||
TREE_PUBLIC(definition_tree) = definition->exported;
|
||||
TYPE_NAME(tree_type) = get_identifier(definition->identifier.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -952,6 +953,14 @@ namespace gcc
|
||||
this->current_expression = build_int_cstu(elna_word_type_node,
|
||||
TYPE_ALIGN_UNIT(this->current_expression));
|
||||
}
|
||||
else if (expression->field() == "min" && is_integral_type(this->current_expression))
|
||||
{
|
||||
this->current_expression = TYPE_MIN_VALUE(this->current_expression);
|
||||
}
|
||||
else if (expression->field() == "max" && is_integral_type(this->current_expression))
|
||||
{
|
||||
this->current_expression = TYPE_MAX_VALUE(this->current_expression);
|
||||
}
|
||||
else
|
||||
{
|
||||
error_at(expression_location, "type '%s' does not have property '%s'",
|
||||
|
503
source.elna
503
source.elna
@ -76,7 +76,7 @@ type
|
||||
end
|
||||
TokenValue* = union
|
||||
int_value: Int
|
||||
string_value: pointer to Char
|
||||
string_value: ^Char
|
||||
string: String
|
||||
boolean_value: Bool
|
||||
char_value: Char
|
||||
@ -86,11 +86,9 @@ type
|
||||
value: TokenValue
|
||||
location: Location
|
||||
end
|
||||
FILE* = record
|
||||
dummy: Int
|
||||
end
|
||||
FILE* = record end
|
||||
CommandLine* = record
|
||||
input: pointer to Char
|
||||
input: ^Char
|
||||
tokenize: Bool
|
||||
syntax_tree: Bool
|
||||
end
|
||||
@ -98,11 +96,11 @@ type
|
||||
value: Int
|
||||
end
|
||||
ConstantDefinition* = record
|
||||
name: pointer to Char
|
||||
body: pointer to Literal
|
||||
name: ^Char
|
||||
body: ^Literal
|
||||
end
|
||||
ConstantPart* = record
|
||||
elements: pointer to pointer to ConstantDefinition
|
||||
elements: ^^ConstantDefinition
|
||||
count: Word
|
||||
end
|
||||
Program* = record
|
||||
@ -112,48 +110,48 @@ type
|
||||
(*
|
||||
External procedures.
|
||||
*)
|
||||
proc fopen(pathname: pointer to Char, mode: pointer to Char) -> pointer to FILE; extern
|
||||
proc fclose(stream: pointer to FILE) -> Int; extern
|
||||
proc fseek(stream: pointer to FILE, off: Int, whence: Int) -> Int; extern
|
||||
proc rewind(stream: pointer to FILE); extern
|
||||
proc ftell(stream: pointer to FILE) -> Int; extern
|
||||
proc fread(ptr: pointer to Byte, size: Word, nmemb: Word, stream: pointer to FILE) -> Word; extern
|
||||
proc write(fd: Int, buf: pointer to Byte, Word: Int) -> Int; extern
|
||||
proc fopen(pathname: ^Char, mode: ^Char) -> ^FILE; extern
|
||||
proc fclose(stream: ^FILE) -> Int; extern
|
||||
proc fseek(stream: ^FILE, off: Int, whence: Int) -> Int; extern
|
||||
proc rewind(stream: ^FILE); extern
|
||||
proc ftell(stream: ^FILE) -> Int; extern
|
||||
proc fread(ptr: ^Byte, size: Word, nmemb: Word, stream: ^FILE) -> Word; extern
|
||||
proc write(fd: Int, buf: ^Byte, Word: Int) -> Int; extern
|
||||
|
||||
proc malloc(size: Word) -> pointer to Byte; extern
|
||||
proc free(ptr: pointer to Byte); extern
|
||||
proc calloc(nmemb: Word, size: Word) -> pointer to Byte; extern
|
||||
proc realloc(ptr: pointer to Byte, size: Word) -> pointer to Byte; extern
|
||||
proc malloc(size: Word) -> ^Byte; extern
|
||||
proc free(ptr: ^Byte); extern
|
||||
proc calloc(nmemb: Word, size: Word) -> ^Byte; extern
|
||||
proc realloc(ptr: ^Byte, size: Word) -> ^Byte; extern
|
||||
|
||||
proc memset(ptr: pointer to Char, c: Int, n: Int) -> pointer to Char; extern
|
||||
proc memset(ptr: ^Char, c: Int, n: Int) -> ^Char; extern
|
||||
|
||||
proc strcmp(s1: pointer to Char, s2: pointer to Char) -> Int; extern
|
||||
proc strncmp(s1: pointer to Char, s2: pointer to Char, n: Word) -> Int; extern
|
||||
proc strncpy(dst: pointer to Char, src: pointer to Char, dsize: Word) -> pointer to Char; extern
|
||||
proc strcpy(dst: pointer to Char, src: pointer to Char) -> pointer to Char; extern
|
||||
proc strlen(ptr: pointer to Char) -> Word; extern
|
||||
proc strcmp(s1: ^Char, s2: ^Char) -> Int; extern
|
||||
proc strncmp(s1: ^Char, s2: ^Char, n: Word) -> Int; extern
|
||||
proc strncpy(dst: ^Char, src: ^Char, dsize: Word) -> ^Char; extern
|
||||
proc strcpy(dst: ^Char, src: ^Char) -> ^Char; extern
|
||||
proc strlen(ptr: ^Char) -> Word; extern
|
||||
|
||||
proc strtol(nptr: pointer to Char, endptr: pointer to pointer to Char, base: Int) -> Int; extern
|
||||
proc strtol(nptr: ^Char, endptr: ^^Char, base: Int) -> Int; extern
|
||||
|
||||
proc perror(s: pointer to Char); extern
|
||||
proc perror(s: ^Char); extern
|
||||
proc exit(code: Int); extern
|
||||
|
||||
(*
|
||||
Standard procedures.
|
||||
*)
|
||||
proc reallocarray(ptr: pointer to Byte, n: Word, size: Word) -> pointer to Byte;
|
||||
proc reallocarray(ptr: ^Byte, n: Word, size: Word) -> ^Byte;
|
||||
begin
|
||||
return realloc(ptr, n * size)
|
||||
end
|
||||
|
||||
proc write_s(value: String);
|
||||
begin
|
||||
write(0, cast(value.ptr: pointer to Byte), cast(value.length: Int))
|
||||
write(0, cast(value.ptr: ^Byte), cast(value.length: Int))
|
||||
end
|
||||
|
||||
proc write_z(value: pointer to Char);
|
||||
proc write_z(value: ^Char);
|
||||
begin
|
||||
write(0, cast(value: pointer to Byte), cast(strlen(value): Int))
|
||||
write(0, cast(value: ^Byte), cast(strlen(value): Int))
|
||||
end
|
||||
|
||||
proc write_b(value: Bool);
|
||||
@ -167,29 +165,29 @@ end
|
||||
|
||||
proc write_c(value: Char);
|
||||
begin
|
||||
write(0, cast(@value: pointer to Byte), 1)
|
||||
write(0, cast(@value: ^Byte), 1)
|
||||
end
|
||||
|
||||
proc write_i(value: Int);
|
||||
var
|
||||
digit: Int
|
||||
n: Word
|
||||
buffer: array 10 of Char
|
||||
buffer: [10]Char
|
||||
begin
|
||||
n := 10u;
|
||||
n := 10u
|
||||
|
||||
if value = 0 then
|
||||
write_c('0')
|
||||
end;
|
||||
end
|
||||
while value <> 0 do
|
||||
digit := value % 10;
|
||||
value := value / 10;
|
||||
digit := value % 10
|
||||
value := value / 10
|
||||
|
||||
buffer[n] := cast(cast('0': Int) + digit: Char);
|
||||
buffer[n] := cast(cast('0': Int) + digit: Char)
|
||||
n := n - 1u
|
||||
end;
|
||||
end
|
||||
while n < 10u do
|
||||
n := n + 1u;
|
||||
n := n + 1u
|
||||
write_c(buffer[n])
|
||||
end
|
||||
end
|
||||
@ -231,10 +229,10 @@ end
|
||||
|
||||
proc string_dup(origin: String) -> String;
|
||||
var
|
||||
copy: pointer to Char
|
||||
copy: ^Char
|
||||
begin
|
||||
copy := cast(malloc(origin.length): pointer to Char);
|
||||
strncpy(copy, origin.ptr, origin.length);
|
||||
copy := cast(malloc(origin.length): ^Char)
|
||||
strncpy(copy, origin.ptr, origin.length)
|
||||
|
||||
return String(copy, origin.length)
|
||||
end
|
||||
@ -248,85 +246,88 @@ begin
|
||||
return Position(1u, 1u)
|
||||
end
|
||||
|
||||
proc read_source(filename: pointer to Char, result: pointer to String) -> Bool;
|
||||
proc read_source(filename: ^Char, result: ^String) -> Bool;
|
||||
var
|
||||
input_file: pointer to FILE
|
||||
input_file: ^FILE
|
||||
source_size: Int
|
||||
input: pointer to Byte
|
||||
input: ^Byte
|
||||
begin
|
||||
input_file := fopen(filename, "rb\0".ptr);
|
||||
input_file := fopen(filename, "rb\0".ptr)
|
||||
|
||||
if input_file = nil then
|
||||
return false
|
||||
end;
|
||||
end
|
||||
defer
|
||||
fclose(input_file)
|
||||
end;
|
||||
end
|
||||
if fseek(input_file, 0, SEEK_END) <> 0 then
|
||||
return false
|
||||
end;
|
||||
source_size := ftell(input_file);
|
||||
end
|
||||
source_size := ftell(input_file)
|
||||
if source_size < 0 then
|
||||
return false
|
||||
end;
|
||||
rewind(input_file);
|
||||
end
|
||||
rewind(input_file)
|
||||
|
||||
input := malloc(cast(source_size: Word));
|
||||
input := malloc(cast(source_size: Word))
|
||||
if fread(input, cast(source_size: Word), 1u, input_file) <> 1u then
|
||||
return false
|
||||
end;
|
||||
result^ := String(cast(input: pointer to Char), cast(source_size: Word));
|
||||
end
|
||||
result^ := String(cast(input: ^Char), cast(source_size: Word))
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
proc escape_char(escape: Char, result: pointer to Char) -> Bool;
|
||||
proc escape_char(escape: Char, result: ^Char) -> Bool;
|
||||
var
|
||||
successful: Bool
|
||||
begin
|
||||
if escape = 'n' then
|
||||
result^ := '\n';
|
||||
return true
|
||||
result^ := '\n'
|
||||
successful := true
|
||||
elsif escape = 'a' then
|
||||
result^ := '\a';
|
||||
return true
|
||||
result^ := '\a'
|
||||
successful := true
|
||||
elsif escape = 'b' then
|
||||
result^ := '\b';
|
||||
return true
|
||||
result^ := '\b'
|
||||
successful := true
|
||||
elsif escape = 't' then
|
||||
result^ := '\t';
|
||||
return true
|
||||
result^ := '\t'
|
||||
successful := true
|
||||
elsif escape = 'f' then
|
||||
result^ := '\f';
|
||||
return true
|
||||
result^ := '\f'
|
||||
successful := true
|
||||
elsif escape = 'r' then
|
||||
result^ := '\r';
|
||||
return true
|
||||
result^ := '\r'
|
||||
successful := true
|
||||
elsif escape = 'v' then
|
||||
result^ := '\v';
|
||||
return true
|
||||
result^ := '\v'
|
||||
successful := true
|
||||
elsif escape = '\\' then
|
||||
result^ := '\\';
|
||||
return true
|
||||
result^ := '\\'
|
||||
successful := true
|
||||
elsif escape = '\'' then
|
||||
result^ := '\'';
|
||||
return true
|
||||
result^ := '\''
|
||||
successful := true
|
||||
elsif escape = '"' then
|
||||
result^ := '"';
|
||||
return true
|
||||
result^ := '"'
|
||||
successful := true
|
||||
elsif escape = '?' then
|
||||
result^ := '\?';
|
||||
return true
|
||||
result^ := '\?'
|
||||
successful := true
|
||||
elsif escape = '0' then
|
||||
result^ := '\0';
|
||||
return true
|
||||
result^ := '\0'
|
||||
successful := true
|
||||
else
|
||||
return false
|
||||
successful := false
|
||||
end
|
||||
return successful
|
||||
end
|
||||
|
||||
proc advance_source(source_code: SourceCode, length: Word) -> SourceCode;
|
||||
begin
|
||||
source_code.text := open_substring(source_code.text, length);
|
||||
source_code.position.column := source_code.position.column + length;
|
||||
source_code.text := open_substring(source_code.text, length)
|
||||
source_code.position.column := source_code.position.column + length
|
||||
|
||||
return source_code
|
||||
end
|
||||
@ -335,112 +336,112 @@ proc skip_spaces(source_code: SourceCode) -> SourceCode;
|
||||
begin
|
||||
while source_code.text.length > 0u and is_space(source_code.text[1u]) do
|
||||
if source_code.text[1u] = '\n' then
|
||||
source_code.position.line := source_code.position.line + 1u;
|
||||
source_code.position.line := source_code.position.line + 1u
|
||||
source_code.position.column := 1u
|
||||
else
|
||||
source_code.position.column := source_code.position.column + 1u
|
||||
end;
|
||||
end
|
||||
source_code.text := open_substring(source_code.text, 1u)
|
||||
end;
|
||||
end
|
||||
return source_code
|
||||
end
|
||||
|
||||
proc lex_identifier(source_code: pointer to SourceCode, token_content: pointer to String);
|
||||
proc lex_identifier(source_code: ^SourceCode, token_content: ^String);
|
||||
var
|
||||
content_length: Word
|
||||
begin
|
||||
content_length := 0u;
|
||||
token_content^ := source_code^.text;
|
||||
content_length := 0u
|
||||
token_content^ := source_code^.text
|
||||
|
||||
while is_alnum(source_code^.text[1u]) or source_code^.text[1u] = '_' do
|
||||
content_length := content_length + 1u;
|
||||
content_length := content_length + 1u
|
||||
source_code^ := advance_source(source_code^, 1u)
|
||||
end;
|
||||
end
|
||||
token_content^ := substring(token_content^, 0u, content_length)
|
||||
end
|
||||
|
||||
proc lex_comment(source_code: pointer to SourceCode, token_content: pointer to String) -> Bool;
|
||||
proc lex_comment(source_code: ^SourceCode, token_content: ^String) -> Bool;
|
||||
var
|
||||
content_length: Word
|
||||
begin
|
||||
content_length := 0u;
|
||||
token_content^ := source_code^.text;
|
||||
content_length := 0u
|
||||
token_content^ := source_code^.text
|
||||
|
||||
while source_code^.text.length > 1u do
|
||||
if source_code^.text[1u] = '*' and source_code^.text[2u] = ')' then
|
||||
source_code^ := advance_source(source_code^, 2u);
|
||||
token_content^ := substring(token_content^, 0u, content_length);
|
||||
source_code^ := advance_source(source_code^, 2u)
|
||||
token_content^ := substring(token_content^, 0u, content_length)
|
||||
|
||||
return true
|
||||
end;
|
||||
content_length := content_length + 1u;
|
||||
end
|
||||
content_length := content_length + 1u
|
||||
source_code^ := advance_source(source_code^, 1u)
|
||||
end;
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
proc lex_character(input: pointer to Char, current_token: pointer to Token) -> pointer to Char;
|
||||
proc lex_character(input: ^Char, current_token: ^Token) -> ^Char;
|
||||
begin
|
||||
if input^ = '\\' then
|
||||
input := input + 1;
|
||||
input := input + 1
|
||||
if escape_char(input^, @current_token^.value.char_value) then
|
||||
input := input + 1
|
||||
end
|
||||
elsif input^ <> '\0' then
|
||||
current_token^.value.char_value := input^;
|
||||
current_token^.value.char_value := input^
|
||||
input := input + 1
|
||||
end;
|
||||
end
|
||||
return input
|
||||
end
|
||||
|
||||
proc lex_string(input: pointer to Char, current_token: pointer to Token) -> pointer to Char;
|
||||
proc lex_string(input: ^Char, current_token: ^Token) -> ^Char;
|
||||
var
|
||||
token_end, constructed_string: pointer to Char
|
||||
token_end, constructed_string: ^Char
|
||||
token_length: Word
|
||||
is_valid: Bool
|
||||
begin
|
||||
token_end := input;
|
||||
token_end := input
|
||||
|
||||
while token_end^ <> '\0' and not ((token_end - 1)^ <> '\\' and token_end^ = '"') do
|
||||
token_end := token_end + 1
|
||||
end;
|
||||
end
|
||||
if token_end^ <> '\"' then
|
||||
return input
|
||||
end;
|
||||
token_length := cast(token_end - input: Word);
|
||||
current_token^.value.string_value := cast(calloc(token_length, 1u): pointer to Char);
|
||||
end
|
||||
token_length := cast(token_end - input: Word)
|
||||
current_token^.value.string_value := cast(calloc(token_length, 1u): ^Char)
|
||||
|
||||
is_valid := true;
|
||||
constructed_string := current_token^.value.string_value;
|
||||
is_valid := true
|
||||
constructed_string := current_token^.value.string_value
|
||||
while input < token_end and is_valid do
|
||||
|
||||
if input^ = '\\' then
|
||||
input := input + 1;
|
||||
input := input + 1
|
||||
if escape_char(input^, constructed_string) then
|
||||
input := input + 1
|
||||
else
|
||||
is_valid := false
|
||||
end
|
||||
elsif input^ <> '\0' then
|
||||
constructed_string^ := input^;
|
||||
constructed_string^ := input^
|
||||
input := input + 1
|
||||
end;
|
||||
end
|
||||
|
||||
constructed_string := constructed_string + 1
|
||||
end;
|
||||
end
|
||||
|
||||
return token_end
|
||||
end
|
||||
|
||||
proc print_tokens(tokens: pointer to Token, tokens_size: Word);
|
||||
proc print_tokens(tokens: ^Token, tokens_size: Word);
|
||||
var
|
||||
current_token: pointer to Token
|
||||
current_token: ^Token
|
||||
i: Word
|
||||
begin
|
||||
i := 0u;
|
||||
i := 0u
|
||||
while i < tokens_size do
|
||||
current_token := tokens + i;
|
||||
current_token := tokens + i
|
||||
|
||||
if current_token^.kind = TOKEN_IF then
|
||||
write_s("IF")
|
||||
@ -481,8 +482,8 @@ begin
|
||||
elsif current_token^.kind = TOKEN_TO then
|
||||
write_s("TO")
|
||||
elsif current_token^.kind = TOKEN_BOOLEAN then
|
||||
write_s("BOOLEAN<");
|
||||
write_b(current_token^.value.boolean_value);
|
||||
write_s("BOOLEAN<")
|
||||
write_b(current_token^.value.boolean_value)
|
||||
write_c('>')
|
||||
elsif current_token^.kind = TOKEN_NIL then
|
||||
write_s("NIL")
|
||||
@ -501,8 +502,8 @@ begin
|
||||
elsif current_token^.kind = TOKEN_SHIFT_RIGHT then
|
||||
write_s(">>")
|
||||
elsif current_token^.kind = TOKEN_IDENTIFIER then
|
||||
write_c('<');
|
||||
write_s(current_token^.value.string);
|
||||
write_c('<')
|
||||
write_s(current_token^.value.string)
|
||||
write_c('>')
|
||||
elsif current_token^.kind = TOKEN_LEFT_PAREN then
|
||||
write_s("(")
|
||||
@ -551,30 +552,30 @@ begin
|
||||
elsif current_token^.kind = TOKEN_COMMENT then
|
||||
write_s("(* COMMENT *)")
|
||||
elsif current_token^.kind = TOKEN_INTEGER then
|
||||
write_c('<');
|
||||
write_i(current_token^.value.int_value);
|
||||
write_c('<')
|
||||
write_i(current_token^.value.int_value)
|
||||
write_c('>')
|
||||
elsif current_token^.kind = TOKEN_WORD then
|
||||
write_c('<');
|
||||
write_i(current_token^.value.int_value);
|
||||
write_c('<')
|
||||
write_i(current_token^.value.int_value)
|
||||
write_s("u>")
|
||||
elsif current_token^.kind = TOKEN_CHARACTER then
|
||||
write_c('<');
|
||||
write_i(cast(current_token^.value.char_value: Int));
|
||||
write_c('<')
|
||||
write_i(cast(current_token^.value.char_value: Int))
|
||||
write_s("c>")
|
||||
elsif current_token^.kind = TOKEN_STRING then
|
||||
write_s("\"...\"")
|
||||
elsif current_token^.kind = TOKEN_DEFER then
|
||||
write_s("DEFER")
|
||||
else
|
||||
write_s("UNKNOWN<");
|
||||
write_i(current_token^.kind);
|
||||
write_s("UNKNOWN<")
|
||||
write_i(current_token^.kind)
|
||||
write_c('>')
|
||||
end;
|
||||
write_c(' ');
|
||||
end
|
||||
write_c(' ')
|
||||
|
||||
i := i + 1u
|
||||
end;
|
||||
end
|
||||
write_c('\n')
|
||||
end
|
||||
|
||||
@ -621,10 +622,10 @@ begin
|
||||
elsif "to" = token_content then
|
||||
current_token.kind := TOKEN_TO
|
||||
elsif "true" = token_content then
|
||||
current_token.kind := TOKEN_BOOLEAN;
|
||||
current_token.kind := TOKEN_BOOLEAN
|
||||
current_token.value.boolean_value := true
|
||||
elsif "false" = token_content then
|
||||
current_token.kind := TOKEN_BOOLEAN;
|
||||
current_token.kind := TOKEN_BOOLEAN
|
||||
current_token.value.boolean_value := false
|
||||
elsif "nil" = token_content then
|
||||
current_token.kind := TOKEN_NIL
|
||||
@ -641,55 +642,55 @@ begin
|
||||
elsif "defer" = token_content then
|
||||
current_token.kind := TOKEN_DEFER
|
||||
else
|
||||
current_token.kind := TOKEN_IDENTIFIER;
|
||||
current_token.kind := TOKEN_IDENTIFIER
|
||||
current_token.value.string := string_dup(token_content)
|
||||
end;
|
||||
end
|
||||
|
||||
return current_token
|
||||
end
|
||||
|
||||
proc tokenize(source_code: SourceCode, tokens_size: pointer to Word) -> pointer to Token;
|
||||
proc tokenize(source_code: SourceCode, tokens_size: ^Word) -> ^Token;
|
||||
var
|
||||
token_end: pointer to Char
|
||||
tokens, current_token: pointer to Token
|
||||
token_end: ^Char
|
||||
tokens, current_token: ^Token
|
||||
token_length: Word
|
||||
first_char: Char
|
||||
token_content: String
|
||||
begin
|
||||
tokens_size^ := 0u;
|
||||
tokens := nil;
|
||||
source_code := skip_spaces(source_code);
|
||||
tokens_size^ := 0u
|
||||
tokens := nil
|
||||
source_code := skip_spaces(source_code)
|
||||
|
||||
while source_code.text.length <> 0u do
|
||||
tokens := cast(reallocarray(cast(tokens: pointer to Byte), tokens_size^ + 1u, Token.size): pointer to Token);
|
||||
current_token := tokens + tokens_size^;
|
||||
first_char := source_code.text[1u];
|
||||
tokens := cast(reallocarray(cast(tokens: ^Byte), tokens_size^ + 1u, Token.size): ^Token)
|
||||
current_token := tokens + tokens_size^
|
||||
first_char := source_code.text[1u]
|
||||
|
||||
if is_alpha(first_char) or first_char = '_' then
|
||||
lex_identifier(@source_code, @token_content);
|
||||
lex_identifier(@source_code, @token_content)
|
||||
current_token^ := categorize_identifier(token_content)
|
||||
elsif is_digit(first_char) then
|
||||
token_end := nil;
|
||||
current_token^.value.int_value := strtol(source_code.text.ptr, @token_end, 10);
|
||||
token_length := cast(token_end - source_code.text.ptr: Word);
|
||||
token_end := nil
|
||||
current_token^.value.int_value := strtol(source_code.text.ptr, @token_end, 10)
|
||||
token_length := cast(token_end - source_code.text.ptr: Word)
|
||||
|
||||
if token_end^ = 'u' then
|
||||
current_token^.kind := TOKEN_WORD;
|
||||
current_token^.kind := TOKEN_WORD
|
||||
source_code := advance_source(source_code, token_length + 1u)
|
||||
else
|
||||
current_token^.kind := TOKEN_INTEGER;
|
||||
current_token^.kind := TOKEN_INTEGER
|
||||
source_code := advance_source(source_code, token_length)
|
||||
end
|
||||
elsif first_char = '(' then
|
||||
source_code := advance_source(source_code, 1u);
|
||||
source_code := advance_source(source_code, 1u)
|
||||
|
||||
if source_code.text.length = 0u then
|
||||
current_token^.kind := TOKEN_LEFT_PAREN
|
||||
elsif source_code.text[1u] = '*' then
|
||||
source_code := advance_source(source_code, 1u);
|
||||
source_code := advance_source(source_code, 1u)
|
||||
|
||||
if lex_comment(@source_code, @token_content) then
|
||||
current_token^.value.string := string_dup(token_content);
|
||||
current_token^.value.string := string_dup(token_content)
|
||||
current_token^.kind := TOKEN_COMMENT
|
||||
else
|
||||
current_token^.kind := 0
|
||||
@ -698,180 +699,180 @@ begin
|
||||
current_token^.kind := TOKEN_LEFT_PAREN
|
||||
end
|
||||
elsif first_char = ')' then
|
||||
current_token^.kind := TOKEN_RIGHT_PAREN;
|
||||
current_token^.kind := TOKEN_RIGHT_PAREN
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '\'' then
|
||||
token_end := lex_character(source_code.text.ptr + 1, current_token);
|
||||
token_length := cast(token_end - source_code.text.ptr: Word);
|
||||
token_end := lex_character(source_code.text.ptr + 1, current_token)
|
||||
token_length := cast(token_end - source_code.text.ptr: Word)
|
||||
|
||||
if token_end^ = '\'' then
|
||||
current_token^.kind := TOKEN_CHARACTER;
|
||||
current_token^.kind := TOKEN_CHARACTER
|
||||
source_code := advance_source(source_code, token_length + 1u)
|
||||
else
|
||||
source_code := advance_source(source_code, 1u)
|
||||
end
|
||||
elsif first_char = '"' then
|
||||
token_end := lex_string(source_code.text.ptr + 1, current_token);
|
||||
token_end := lex_string(source_code.text.ptr + 1, current_token)
|
||||
|
||||
if token_end^ = '"' then
|
||||
current_token^.kind := TOKEN_STRING;
|
||||
token_length := cast(token_end - source_code.text.ptr: Word);
|
||||
current_token^.kind := TOKEN_STRING
|
||||
token_length := cast(token_end - source_code.text.ptr: Word)
|
||||
source_code := advance_source(source_code, token_length + 1u)
|
||||
end
|
||||
elsif first_char = '[' then
|
||||
current_token^.kind := TOKEN_LEFT_SQUARE;
|
||||
current_token^.kind := TOKEN_LEFT_SQUARE
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = ']' then
|
||||
current_token^.kind := TOKEN_RIGHT_SQUARE;
|
||||
current_token^.kind := TOKEN_RIGHT_SQUARE
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '>' then
|
||||
source_code := advance_source(source_code, 1u);
|
||||
source_code := advance_source(source_code, 1u)
|
||||
|
||||
if source_code.text.length = 0u then
|
||||
current_token^.kind := TOKEN_GREATER_THAN
|
||||
elsif source_code.text[1u] = '=' then
|
||||
current_token^.kind := TOKEN_GREATER_EQUAL;
|
||||
current_token^.kind := TOKEN_GREATER_EQUAL
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif source_code.text[1u] = '>' then
|
||||
current_token^.kind := TOKEN_SHIFT_RIGHT;
|
||||
current_token^.kind := TOKEN_SHIFT_RIGHT
|
||||
source_code := advance_source(source_code, 1u)
|
||||
else
|
||||
current_token^.kind := TOKEN_GREATER_THAN
|
||||
end
|
||||
elsif first_char = '<' then
|
||||
source_code := advance_source(source_code, 1u);
|
||||
source_code := advance_source(source_code, 1u)
|
||||
|
||||
if source_code.text.length = 0u then
|
||||
current_token^.kind := TOKEN_LESS_THAN
|
||||
elsif source_code.text[1u] = '=' then
|
||||
current_token^.kind := TOKEN_LESS_EQUAL;
|
||||
current_token^.kind := TOKEN_LESS_EQUAL
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif source_code.text[1u] = '<' then
|
||||
current_token^.kind := TOKEN_SHIFT_LEFT;
|
||||
current_token^.kind := TOKEN_SHIFT_LEFT
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif source_code.text[1u] = '>' then
|
||||
current_token^.kind := TOKEN_NOT_EQUAL;
|
||||
current_token^.kind := TOKEN_NOT_EQUAL
|
||||
source_code := advance_source(source_code, 1u)
|
||||
else
|
||||
current_token^.kind := TOKEN_LESS_THAN
|
||||
end
|
||||
elsif first_char = '=' then
|
||||
current_token^.kind := TOKEN_EQUAL;
|
||||
current_token^.kind := TOKEN_EQUAL
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = ';' then
|
||||
current_token^.kind := TOKEN_SEMICOLON;
|
||||
current_token^.kind := TOKEN_SEMICOLON
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '.' then
|
||||
current_token^.kind := TOKEN_DOT;
|
||||
current_token^.kind := TOKEN_DOT
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = ',' then
|
||||
current_token^.kind := TOKEN_COMMA;
|
||||
current_token^.kind := TOKEN_COMMA
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '+' then
|
||||
current_token^.kind := TOKEN_PLUS;
|
||||
current_token^.kind := TOKEN_PLUS
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '-' then
|
||||
current_token^.kind := TOKEN_MINUS;
|
||||
current_token^.kind := TOKEN_MINUS
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '*' then
|
||||
current_token^.kind := TOKEN_MULTIPLICATION;
|
||||
current_token^.kind := TOKEN_MULTIPLICATION
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '/' then
|
||||
current_token^.kind := TOKEN_DIVISION;
|
||||
current_token^.kind := TOKEN_DIVISION
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '%' then
|
||||
current_token^.kind := TOKEN_REMAINDER;
|
||||
current_token^.kind := TOKEN_REMAINDER
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = ':' then
|
||||
source_code := advance_source(source_code, 1u);
|
||||
source_code := advance_source(source_code, 1u)
|
||||
|
||||
if source_code.text.length = 0u then
|
||||
current_token^.kind := TOKEN_COLON
|
||||
elsif source_code.text[1u] = '=' then
|
||||
current_token^.kind := TOKEN_ASSIGNMENT;
|
||||
current_token^.kind := TOKEN_ASSIGNMENT
|
||||
source_code := advance_source(source_code, 1u)
|
||||
else
|
||||
current_token^.kind := TOKEN_COLON
|
||||
end
|
||||
elsif first_char = '^' then
|
||||
current_token^.kind := TOKEN_HAT;
|
||||
current_token^.kind := TOKEN_HAT
|
||||
source_code := advance_source(source_code, 1u)
|
||||
elsif first_char = '@' then
|
||||
current_token^.kind := TOKEN_AT;
|
||||
current_token^.kind := TOKEN_AT
|
||||
source_code := advance_source(source_code, 1u)
|
||||
else
|
||||
current_token^.kind := 0;
|
||||
current_token^.kind := 0
|
||||
source_code := advance_source(source_code, 1u)
|
||||
end;
|
||||
end
|
||||
|
||||
if current_token^.kind <> 0 then
|
||||
tokens_size^ := tokens_size^ + 1u;
|
||||
tokens_size^ := tokens_size^ + 1u
|
||||
source_code := skip_spaces(source_code)
|
||||
else
|
||||
write_s("Lexical analysis error on \"");
|
||||
write_c(first_char);
|
||||
write_s("Lexical analysis error on \"")
|
||||
write_c(first_char)
|
||||
write_s("\".\n")
|
||||
end
|
||||
end;
|
||||
end
|
||||
|
||||
return tokens
|
||||
end
|
||||
|
||||
proc parse_literal(tokens: pointer to pointer to Token, tokens_size: pointer to Word) -> pointer to Literal;
|
||||
proc parse_literal(tokens: ^^Token, tokens_size: ^Word) -> ^Literal;
|
||||
begin
|
||||
return cast(calloc(1u, Literal.size): pointer to Literal)
|
||||
return cast(calloc(1u, Literal.size): ^Literal)
|
||||
end
|
||||
|
||||
proc parse_constant_definition(tokens: pointer to pointer to Token,
|
||||
tokens_size: pointer to Word) -> pointer to ConstantDefinition;
|
||||
proc parse_constant_definition(tokens: ^^Token,
|
||||
tokens_size: ^Word) -> ^ConstantDefinition;
|
||||
var
|
||||
result: pointer to ConstantDefinition
|
||||
result: ^ConstantDefinition
|
||||
begin
|
||||
result := cast(calloc(1u, ConstantDefinition.size): pointer to ConstantDefinition);
|
||||
result := cast(calloc(1u, ConstantDefinition.size): ^ConstantDefinition)
|
||||
|
||||
result^.name := cast(malloc(strlen(tokens^^.value.string_value)): pointer to Char);
|
||||
strcpy(result^.name, tokens^^.value.string_value);
|
||||
result^.name := cast(malloc(strlen(tokens^^.value.string_value)): ^Char)
|
||||
strcpy(result^.name, tokens^^.value.string_value)
|
||||
|
||||
tokens^ := tokens^ + 2u;
|
||||
tokens_size := tokens_size - 2u;
|
||||
tokens^ := tokens^ + 2u
|
||||
tokens_size := tokens_size - 2u
|
||||
|
||||
write_z(result^.name);
|
||||
write_c('\n');
|
||||
write_z(result^.name)
|
||||
write_c('\n')
|
||||
|
||||
result^.body := parse_literal(tokens, tokens_size);
|
||||
result^.body := parse_literal(tokens, tokens_size)
|
||||
|
||||
tokens^ := tokens^ + 2u;
|
||||
tokens_size := tokens_size - 2u;
|
||||
tokens^ := tokens^ + 2u
|
||||
tokens_size := tokens_size - 2u
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
proc parse_program(tokens: pointer to pointer to Token, tokens_size: pointer to Word) -> pointer to Program;
|
||||
proc parse_program(tokens: ^^Token, tokens_size: ^Word) -> ^Program;
|
||||
var
|
||||
result: pointer to Program
|
||||
current_constant: pointer to pointer to ConstantDefinition
|
||||
result: ^Program
|
||||
current_constant: ^^ConstantDefinition
|
||||
begin
|
||||
result := cast(calloc(1u, Program.size): pointer to Program);
|
||||
result := cast(calloc(1u, Program.size): ^Program)
|
||||
|
||||
result^.constants.elements := nil;
|
||||
result^.constants.count := 0u;
|
||||
result^.constants.elements := nil
|
||||
result^.constants.count := 0u
|
||||
|
||||
if tokens^^.kind = TOKEN_CONST then
|
||||
tokens^ := tokens^ + 1;
|
||||
tokens_size^ := tokens_size^ - 1u;
|
||||
tokens^ := tokens^ + 1
|
||||
tokens_size^ := tokens_size^ - 1u
|
||||
|
||||
while tokens_size^ > 0u and tokens^^.kind = TOKEN_IDENTIFIER do
|
||||
result^.constants.elements := cast(
|
||||
reallocarray(
|
||||
cast(result^.constants.elements: pointer to Byte),
|
||||
cast(result^.constants.elements: ^Byte),
|
||||
result^.constants.count + 1u,
|
||||
(pointer to ConstantDefinition).size
|
||||
) : pointer to pointer to ConstantDefinition);
|
||||
current_constant := result^.constants.elements + result^.constants.count;
|
||||
(^ConstantDefinition).size
|
||||
) : ^^ConstantDefinition)
|
||||
current_constant := result^.constants.elements + result^.constants.count
|
||||
|
||||
result^.constants.count := result^.constants.count + 1u;
|
||||
result^.constants.count := result^.constants.count + 1u
|
||||
|
||||
current_constant^ := parse_constant_definition(tokens, tokens_size);
|
||||
current_constant^ := parse_constant_definition(tokens, tokens_size)
|
||||
if current_constant^ = nil then
|
||||
return nil
|
||||
end
|
||||
@ -879,20 +880,20 @@ begin
|
||||
end
|
||||
end
|
||||
|
||||
proc parse_command_line*(argc: Int, argv: pointer to pointer to Char) -> pointer to CommandLine;
|
||||
proc parse_command_line*(argc: Int, argv: ^^Char) -> ^CommandLine;
|
||||
var
|
||||
parameter: pointer to pointer to Char
|
||||
parameter: ^^Char
|
||||
i: Int
|
||||
result: pointer to CommandLine
|
||||
result: ^CommandLine
|
||||
begin
|
||||
i := 1;
|
||||
result := cast(malloc(CommandLine.size): pointer to CommandLine);
|
||||
result^.tokenize := false;
|
||||
result^.syntax_tree := false;
|
||||
result^.input := nil;
|
||||
i := 1
|
||||
result := cast(malloc(CommandLine.size): ^CommandLine)
|
||||
result^.tokenize := false
|
||||
result^.syntax_tree := false
|
||||
result^.input := nil
|
||||
|
||||
while i < argc do
|
||||
parameter := argv + i;
|
||||
parameter := argv + i
|
||||
|
||||
if strcmp(parameter^, "--tokenize\0".ptr) = 0 then
|
||||
result^.tokenize := true
|
||||
@ -901,53 +902,53 @@ begin
|
||||
elsif parameter^^ <> '-' then
|
||||
result^.input := parameter^
|
||||
else
|
||||
write_s("Fatal error: Unknown command line options:");
|
||||
write_s("Fatal error: Unknown command line options:")
|
||||
|
||||
write_c(' ');
|
||||
write_z(parameter^);
|
||||
write_s(".\n");
|
||||
write_c(' ')
|
||||
write_z(parameter^)
|
||||
write_s(".\n")
|
||||
|
||||
return nil
|
||||
end;
|
||||
end
|
||||
|
||||
i := i + 1
|
||||
end;
|
||||
end
|
||||
if result^.input = nil then
|
||||
write_s("Fatal error: no input files.\n");
|
||||
write_s("Fatal error: no input files.\n")
|
||||
return nil
|
||||
end;
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
proc process(argc: Int, argv: pointer to pointer to Char) -> Int;
|
||||
proc process(argc: Int, argv: ^^Char) -> Int;
|
||||
var
|
||||
tokens: pointer to Token
|
||||
tokens: ^Token
|
||||
tokens_size: Word
|
||||
source_code: SourceCode
|
||||
command_line: pointer to CommandLine
|
||||
command_line: ^CommandLine
|
||||
begin
|
||||
command_line := parse_command_line(argc, argv);
|
||||
command_line := parse_command_line(argc, argv)
|
||||
if command_line = nil then
|
||||
return 2
|
||||
end;
|
||||
end
|
||||
|
||||
source_code.position := make_position();
|
||||
source_code.position := make_position()
|
||||
if not read_source(command_line^.input, @source_code.text) then
|
||||
perror(command_line^.input);
|
||||
perror(command_line^.input)
|
||||
return 3
|
||||
end;
|
||||
tokens := tokenize(source_code, @tokens_size);
|
||||
end
|
||||
tokens := tokenize(source_code, @tokens_size)
|
||||
|
||||
if command_line^.tokenize then
|
||||
print_tokens(tokens, tokens_size)
|
||||
end;
|
||||
end
|
||||
if command_line^.syntax_tree then
|
||||
parse_program(@tokens, @tokens_size)
|
||||
end;
|
||||
end
|
||||
return 0
|
||||
end
|
||||
|
||||
begin
|
||||
exit(process(cast(count: Int), cast(parameters: pointer to pointer to Char)))
|
||||
exit(process(cast(count: Int), cast(parameters: ^^Char)))
|
||||
end.
|
||||
|
Loading…
x
Reference in New Issue
Block a user