main
Brett 2023-11-17 02:06:09 -05:00
parent 40858868d2
commit e3cdca3218
10 changed files with 236 additions and 30 deletions

View File

@ -5,7 +5,7 @@ option(ENABLE_ADDRSAN "Enable the address sanitizer" OFF)
option(ENABLE_UBSAN "Enable the ub sanitizer" OFF)
option(ENABLE_TSAN "Enable the thread data race sanitizer" OFF)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD 20)
option(BUILD_PROFILING OFF)
option(BUILD_NBT OFF)

View File

@ -69,20 +69,6 @@ class brainfuck_interpreter
{ delete[] _data; }
};
template<typename functor>
static inline void match(functor f, int sp, size_t& index, const std::string& program)
{
while (f(index) < program.size())
{
if (program[index] == '[')
sp++;
if (program[index] == ']')
sp--;
if (sp == 0)
break;
}
}
void interpret_bf(const std::string& program);
#endif //BRAINFUCK_MISC_BF_INTERPRETER_H

16
include/bf_mips_codegen.h Normal file
View File

@ -0,0 +1,16 @@
#pragma once
/*
* Created by Brett on 17/11/23.
* Licensed under GNU General Public License V3.0
* See LICENSE file for license detail
*/
#ifndef BRAINFUCK_MISC_BF_MIPS_CODEGEN_H
#define BRAINFUCK_MISC_BF_MIPS_CODEGEN_H
#include <bf_tokenizer.h>
#include <fstream>
void codegen(const std::vector<token_t>& tokens, std::ostream& out);
#endif //BRAINFUCK_MISC_BF_MIPS_CODEGEN_H

View File

@ -10,6 +10,7 @@
#include <vector>
#include <string>
#include <optional>
enum class bf_token
{
@ -23,7 +24,16 @@ enum class bf_token
CLOSE
};
std::vector<bf_token> tokenize(const std::string& program);
struct token_t
{
bf_token token;
std::optional<std::string> name = {};
explicit token_t(bf_token token): token(token) {}
};
std::vector<token_t> tokenize(const std::string& program);
std::vector<token_t>& bf_name(std::vector<token_t>& tokens);
#endif //BRAINFUCK_MISC_BF_TOKENIZER_H

@ -1 +1 @@
Subproject commit fd9fa5454d47c60e41792c65beccf3988f3f2225
Subproject commit 06f87c973415f8344fcc56b5f13bc12e46984aa9

View File

@ -6,6 +6,20 @@
#include <bf_interpreter.h>
#include <iostream>
template<typename functor>
static inline void match(functor f, int sp, size_t& index, const std::string& program)
{
while (f(index) < program.size())
{
if (program[index] == '[')
sp++;
if (program[index] == ']')
sp--;
if (sp == 0)
break;
}
}
void interpret_bf(const std::string& program)
{
brainfuck_interpreter fuck;

130
src/bf_mips_codegen.cpp Normal file
View File

@ -0,0 +1,130 @@
/*
* Created by Brett on 17/11/23.
* Licensed under GNU General Public License V3.0
* See LICENSE file for license detail
*/
#include <bf_mips_codegen.h>
#include <iostream>
void process_print(const std::vector<token_t>& tokens, size_t index);
void codegen(const std::vector<token_t>& tokens, std::ostream& out)
{
size_t index = 0;
// skip past comments
if (tokens[index].token == bf_token::OPEN)
while (tokens[index].token != bf_token::CLOSE)
index++;
process_print(tokens, index);
size_t sp = 0;
while (index < tokens.size())
{
auto& token = tokens[index++];
std::string name{"UNNAMED"};
if (token.name.has_value())
name = token.name.value();
switch (token.token)
{
case bf_token::INC_DP:
std::cout << "\taddi $t0, $t0, 1\n";
break;
case bf_token::DEC_DP:
std::cout << "\tsubi $t0, $t0, 1\n";
break;
case bf_token::INC_DV:
std::cout << "\tlb $t1, ($t0)\n"
<< "\taddi $t1, $t1, 1\n"
<< "\tsb $t1, ($t0)\n";
break;
case bf_token::DEC_DV:
std::cout << "\tlb $t1, ($t0)\n"
<< "\tsubi $t1, $t1, 1\n"
<< "\tsb $t1, ($t0)\n";
break;
case bf_token::PRINT:
std::cout << "\tli $v0, 11\n"
<< "\tlb $a0, ($t0)\n"
<< "\tsyscall\n";
break;
case bf_token::READ:
std::cout << "\tli $v0, 12\n"
<< "\tsyscall\n"
<< "\tsb $v0, ($t0)\n";
break;
case bf_token::OPEN:
std::cout << "\tlb $t1, ($t0)\n"
<< "\tbeqz $t1, BF_CLOSE_" << name << "_" << std::to_string(sp) << '\n'
<< "BF_OPEN_" << name << "_" << std::to_string(sp) << ":\n";
sp++;
break;
case bf_token::CLOSE:
sp--;
std::cout << "\tlb $t1, ($t0)\n"
<< "\tbnez $t1, BF_OPEN_" << name << "_" << std::to_string(sp) << '\n'
<< "BF_CLOSE_" << name << "_" << std::to_string(sp) << ":\n";
break;
}
}
}
inline void tabulate(size_t v)
{
for (size_t i = 0; i < v; i++)
std::cout << '\t';
}
void process_print(const std::vector<token_t>& tokens, size_t index)
{
size_t sp = 0;
while (index < tokens.size())
{
auto& token = tokens[index++];
switch (token.token)
{
case bf_token::INC_DP:
tabulate(sp);
std::cout << "Increase DP\n";
break;
case bf_token::DEC_DP:
tabulate(sp);
std::cout << "Decrease DP\n";
break;
case bf_token::INC_DV:
tabulate(sp);
std::cout << "Increase DV\n";
break;
case bf_token::DEC_DV:
tabulate(sp);
std::cout << "Decrease DV\n";
break;
case bf_token::PRINT:
tabulate(sp);
std::cout << "Print\n";
break;
case bf_token::READ:
tabulate(sp);
std::cout << "Read\n";
break;
case bf_token::OPEN:
tabulate(sp);
std::cout << "If(";
if (token.name.has_value())
std::cout << token.name.value() << "\n";
else
std::cout << "UNNAMED" << "\n";
sp++;
break;
case bf_token::CLOSE:
sp--;
tabulate(sp);
if (token.name.has_value())
std::cout << token.name.value();
else
std::cout << "UNNAMED";
std::cout << ")\n";
break;
}
}
}

View File

@ -4,10 +4,12 @@
* See LICENSE file for license detail
*/
#include <bf_tokenizer.h>
#include <sstream>
#include <random>
std::vector<bf_token> tokenize(const std::string& program)
std::vector<token_t> tokenize(const std::string& program)
{
std::vector<bf_token> tokens;
std::vector<token_t> tokens;
size_t index = 0;
while (index < program.size())
@ -16,28 +18,28 @@ std::vector<bf_token> tokenize(const std::string& program)
switch (c)
{
case '>':
tokens.push_back(bf_token::INC_DP);
tokens.emplace_back(bf_token::INC_DP);
break;
case '<':
tokens.push_back(bf_token::DEC_DP);
tokens.emplace_back(bf_token::DEC_DP);
break;
case '+':
tokens.push_back(bf_token::INC_DV);
tokens.emplace_back(bf_token::INC_DV);
break;
case '-':
tokens.push_back(bf_token::DEC_DV);
tokens.emplace_back(bf_token::DEC_DV);
break;
case '.':
tokens.push_back(bf_token::PRINT);
tokens.emplace_back(bf_token::PRINT);
break;
case ',':
tokens.push_back(bf_token::READ);
tokens.emplace_back(bf_token::READ);
break;
case '[':
tokens.push_back(bf_token::OPEN);
tokens.emplace_back(bf_token::OPEN);
break;
case ']':
tokens.push_back(bf_token::CLOSE);
tokens.emplace_back(bf_token::CLOSE);
break;
default:
break;
@ -47,3 +49,45 @@ std::vector<bf_token> tokenize(const std::string& program)
return tokens;
}
std::string generateName()
{
std::stringstream ss;
ss << std::hex;
std::random_device rd;
std::seed_seq seed{rd(), rd(), rd(), rd()};
std::mt19937_64 gen(seed);
std::uniform_int_distribution<int> dis(0, 15);
for (int i = 0; i < 8; i++)
ss << dis(gen);
return ss.str();
}
std::vector<token_t>& bf_name(std::vector<token_t>& tokens)
{
size_t search_index = 0;
while (search_index < tokens.size())
{
if (tokens[search_index].token == bf_token::OPEN)
{
auto name = generateName();
size_t sp = 1;
size_t search_2 = search_index;
tokens[search_index].name = name;
while (++search_2 < tokens.size())
{
if (tokens[search_2].token == bf_token::OPEN)
sp++;
if (tokens[search_2].token == bf_token::CLOSE)
sp--;
if (sp == 0)
break;
}
tokens[search_2].name = name;
}
search_index++;
}
return tokens;
}

View File

@ -4,16 +4,18 @@
#include <sstream>
#include <blt/std/loader.h>
#include <bf_interpreter.h>
#include <bf_mips_codegen.h>
int main(int argc, const char** argv)
{
std::string file{"../life.bf"};
std::string file{"../helloworld.bf"};
if (argc > 1)
file = argv[1];
auto program = blt::fs::loadBrainFuckFile(file);
interpret_bf(program);
auto tokens = tokenize(program);
bf_name(tokens);
codegen(tokens, std::cout);
return 0;
}

4
test.bf Normal file
View File

@ -0,0 +1,4 @@
++++++++++++++++++++
++++++++++++++++++++
++++++++++++++++++++
+++++.