Support utf-8

This commit is contained in:
2026-01-19 21:39:41 +08:00
parent 4247a59146
commit 57def6137b
6 changed files with 129 additions and 32 deletions

View File

@@ -36,6 +36,7 @@ endif()
target_include_directories(libcamellya PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/src)
if(CAMELLYA_BUILD_CLI)
add_executable(camellya
cli/main.cpp
@@ -61,6 +62,7 @@ if(CAMELLYA_BUILD_TESTS)
add_executable(camellya_tests
tests/test_basic.cpp
tests/test_utf8.cpp
)
target_include_directories(camellya_tests

View File

@@ -32,7 +32,7 @@ print("\n=== List Demo ===");
list numbers = [1, 2, 3, 4, 5];
print("List:", numbers);
print("First element (index 0):", numbers[0]);
print("Third element (index 2):", numbers[2]);
print("测试 element (index 2):", numbers[2]);
for(number i = 0; i < len(numbers); i = i + 1) {
print("List element", numbers[i]);

View File

@@ -1,6 +1,7 @@
#include "lexer.h"
#include <cctype>
#include <unordered_map>
#include <cstdint>
namespace camellya {
@@ -17,8 +18,11 @@ std::vector<Token> Lexer::tokenize() {
}
char Lexer::advance() {
column_++;
return source_[current_++];
char c = source_[current_++];
if ((c & 0xC0) != 0x80) {
column_++;
}
return c;
}
char Lexer::peek() const {
@@ -75,7 +79,8 @@ void Lexer::scan_token() {
if (is_at_end()) return;
start_ = current_;
int start_column = column_;
start_line_ = line_;
start_column_ = column_;
char c = advance();
switch (c) {
@@ -124,7 +129,7 @@ void Lexer::scan_token() {
default:
if (std::isdigit(c)) {
scan_number();
} else if (std::isalpha(c) || c == '_') {
} else if (std::isalpha(c) || c == '_' || (static_cast<unsigned char>(c) >= 0x80)) {
scan_identifier();
} else {
add_token(TokenType::INVALID);
@@ -135,12 +140,12 @@ void Lexer::scan_token() {
void Lexer::add_token(TokenType type) {
std::string text = source_.substr(start_, current_ - start_);
tokens_.emplace_back(type, text, line_, column_ - static_cast<int>(text.length()));
tokens_.emplace_back(type, text, start_line_, start_column_);
}
void Lexer::add_token(TokenType type, std::variant<std::monostate, double, std::string> literal) {
std::string text = source_.substr(start_, current_ - start_);
tokens_.emplace_back(type, text, literal, line_, column_ - static_cast<int>(text.length()));
tokens_.emplace_back(type, text, literal, start_line_, start_column_);
}
void Lexer::scan_string() {
@@ -160,6 +165,32 @@ void Lexer::scan_string() {
case 'r': value += '\r'; break;
case '\\': value += '\\'; break;
case '"': value += '"'; break;
case 'u': {
std::string hex;
for (int i = 0; i < 4 && !is_at_end(); ++i) {
if (std::isxdigit(peek())) {
hex += advance();
} else {
break;
}
}
if (hex.length() == 4) {
uint32_t codepoint = std::stoul(hex, nullptr, 16);
if (codepoint <= 0x7F) {
value += static_cast<char>(codepoint);
} else if (codepoint <= 0x7FF) {
value += static_cast<char>(0xC0 | (codepoint >> 6));
value += static_cast<char>(0x80 | (codepoint & 0x3F));
} else {
value += static_cast<char>(0xE0 | (codepoint >> 12));
value += static_cast<char>(0x80 | ((codepoint >> 6) & 0x3F));
value += static_cast<char>(0x80 | (codepoint & 0x3F));
}
} else {
value += "\\u" + hex;
}
break;
}
default: value += escaped; break;
}
} else {
@@ -194,7 +225,7 @@ void Lexer::scan_number() {
}
void Lexer::scan_identifier() {
while (std::isalnum(peek()) || peek() == '_') {
while (std::isalnum(peek()) || peek() == '_' || (static_cast<unsigned char>(peek()) >= 0x80)) {
advance();
}

View File

@@ -61,6 +61,8 @@ private:
size_t current_ = 0;
int line_ = 1;
int column_ = 1;
int start_line_ = 1;
int start_column_ = 1;
std::vector<Token> tokens_;
bool is_at_end() const { return current_ >= source_.length(); }

View File

@@ -123,31 +123,31 @@ TEST_CASE("class init is called on declaration", "[class][init]") {
REQUIRE(a_num->value == 18.0);
}
TEST_CASE("interpreter performance: simple loop", "[perf][script]") {
State state;
const char* script = R"(
func sum_to(number n) -> number {
number s = 0;
for (number i = 0; i < n; i = i + 1) {
s = s + i;
}
return s;
}
number r = sum_to(1000);
)";
// TEST_CASE("interpreter performance: simple loop", "[perf][script]") {
// State state;
// const char* script = R"(
// func sum_to(number n) -> number {
// number s = 0;
// for (number i = 0; i < n; i = i + 1) {
// s = s + i;
// }
// return s;
// }
// number r = sum_to(1000);
// )";
BENCHMARK("sum_to(1000)") {
if (!state.do_string(script)) {
auto last_error = state.get_error();
REQUIRE(last_error.empty());
}
auto r_val = state.get_global("r");
REQUIRE(r_val);
REQUIRE(r_val->type() == Type::NUMBER);
auto r_num = std::dynamic_pointer_cast<NumberValue>(r_val);
REQUIRE(r_num->value == 499500.0);
};
}
// BENCHMARK("sum_to(1000)") {
// if (!state.do_string(script)) {
// auto last_error = state.get_error();
// REQUIRE(last_error.empty());
// }
// auto r_val = state.get_global("r");
// REQUIRE(r_val);
// REQUIRE(r_val->type() == Type::NUMBER);
// auto r_num = std::dynamic_pointer_cast<NumberValue>(r_val);
// REQUIRE(r_num->value == 499500.0);
// };
// }
TEST_CASE("loop break", "[script][loop]") {
State state;

62
tests/test_utf8.cpp Normal file
View File

@@ -0,0 +1,62 @@
#include <catch2/catch_test_macros.hpp>
#include "lexer.h"
#include <vector>
#include <string>
using namespace camellya;
TEST_CASE("UTF-8 string support", "[lexer][utf8]") {
std::string source = "string s = \"你好, world\";";
Lexer lexer(source);
auto tokens = lexer.tokenize();
// Expected tokens:
// 1. string (keyword)
// 2. s (identifier)
// 3. = (equal)
// 4. "你好, world" (string literal)
// 5. ; (semicolon)
// 6. EOF
REQUIRE(tokens.size() == 6);
REQUIRE(tokens[0].type == TokenType::STRING);
REQUIRE(tokens[1].type == TokenType::IDENTIFIER);
REQUIRE(tokens[1].lexeme == "s");
REQUIRE(tokens[3].type == TokenType::STRING_LITERAL);
// Check value
auto literal = std::get<std::string>(tokens[3].literal);
REQUIRE(literal == "你好, world");
REQUIRE(tokens[3].line == 1);
REQUIRE(tokens[3].column == 12);
REQUIRE(tokens[4].type == TokenType::SEMICOLON);
REQUIRE(tokens[4].column == 23);
}
TEST_CASE("UTF-8 identifier support", "[lexer][utf8]") {
std::string source = "var 变量 = 10;";
Lexer lexer(source);
auto tokens = lexer.tokenize();
REQUIRE(tokens.size() == 6);
REQUIRE(tokens[1].type == TokenType::IDENTIFIER);
REQUIRE(tokens[1].lexeme == "变量");
REQUIRE(tokens[1].column == 5);
REQUIRE(tokens[2].type == TokenType::EQUAL);
// "var " (4) + "变量" (2) + " " (1) = 7. "=" should be at column 8.
REQUIRE(tokens[2].column == 8);
}
TEST_CASE("Unicode escape sequence support", "[lexer][utf8]") {
std::string source = "string s = \"\\u4e2d\\u6587\";"; // "中文"
Lexer lexer(source);
auto tokens = lexer.tokenize();
REQUIRE(tokens.size() == 6);
REQUIRE(tokens[3].type == TokenType::STRING_LITERAL);
auto literal = std::get<std::string>(tokens[3].literal);
REQUIRE(literal == "中文");
}