images/blog-posts

使计算器支持语句块

返回教程主页

上篇 递归向下算法实现Calc

下面我们来为计算器程序增加语句块功能,使得程序可以做批量运算,类似于程序语言中的代码块。

这次的代码以上一篇《递归向下算法实现Calc》的代码为基础编写,如果发现不熟悉当下的内容可以回顾一下之前的篇章。

代码清单【go语言为例】

package main

import (
    "fmt"
    "strconv"
    "io/ioutil"
    "./bklexer"
)

type Node interface {
    GetValue() float64
}

type Block struct {
    statements []Node
}

func NewBlock() *Block {
    return &Block{}
}

func (block *Block) AddStatement(statement Node) {
    block.statements = append(block.statements, statement)
}

func (block *Block) Eval() {
    for i, statement := range block.statements {
        fmt.Printf("out[%d] = %f\n", i, statement.GetValue())
    }
}

type Number struct {
    value float64
}

func NewNumber(token *BKLexer.Token) *Number {
    value, _ := strconv.ParseFloat(token.Source, 64)
    return &Number{value: value}
}

func (number *Number) GetValue() float64 {
    return number.value
}

type BinaryOpt struct {
    opt string
    lhs Node
    rhs Node
}

func NewBinaryOpt(token *BKLexer.Token, lhs Node, rhs Node) *BinaryOpt {
    return &BinaryOpt{opt: token.Source, lhs: lhs, rhs: rhs}
}

func (binaryOpt *BinaryOpt) GetValue() float64 {
    lhs, rhs := binaryOpt.lhs, binaryOpt.rhs
    switch binaryOpt.opt {
        case "+": return lhs.GetValue() + rhs.GetValue()
        case "-": return lhs.GetValue() - rhs.GetValue()
        case "*": return lhs.GetValue() * rhs.GetValue()
        case "/": return lhs.GetValue() / rhs.GetValue()
    }
    return 0
}

func parse(lexer *BKLexer.Lexer) *Block {
    block := NewBlock()
    token := lexer.NextToken()
    for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {
        token = lexer.NextToken()
    }
    for token.TType != BKLexer.TOKEN_TYPE_EOF {
        statement := parse_binary_add(lexer)
        if statement == nil {
            return nil;
        }
        token = lexer.GetToken()
        if token.TType != BKLexer.TOKEN_TYPE_NEWLINE &&
           token.TType != BKLexer.TOKEN_TYPE_EOF {
            return nil;
        }
        block.AddStatement(statement)
        for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {
            token = lexer.NextToken()
        }
    }
    return block
}

func parse_binary_add(lexer *BKLexer.Lexer) Node {
    lhs := parse_binary_mul(lexer)
    if lhs == nil {
        return nil
    }
    token := lexer.GetToken()
    for token.Source == "+" || token.Source == "-" {
        lexer.NextToken()
        rhs := parse_binary_mul(lexer)
        if rhs == nil {
            return nil
        }
        lhs = NewBinaryOpt(token, lhs, rhs)
        token = lexer.GetToken()
    }
    return lhs
}

func parse_binary_mul(lexer *BKLexer.Lexer) Node {
    lhs := parse_number(lexer)
    if lhs == nil {
        return nil
    }
    token := lexer.GetToken()
    for token.Source == "*" || token.Source == "/" {
        lexer.NextToken()
        rhs := parse_number(lexer)
        if rhs == nil {
            return nil
        }
        lhs = NewBinaryOpt(token, lhs, rhs)
        token = lexer.GetToken()
    }
    return lhs
}

func parse_number(lexer *BKLexer.Lexer) Node {
    token := lexer.GetToken()
    if token.Name == "LPAR" {
        lexer.NextToken()
        expr := parse_binary_add(lexer)
        if expr == nil {
            return nil
        }
        token := lexer.GetToken()
        if token.Name != "RPAR" {
            return nil
        }
        lexer.NextToken()
        return expr
    }
    if token.Name == "NUMBER" {
        number := NewNumber(token)
        lexer.NextToken()
        return number
    }
    return nil
}

func main() {
    fmt.Println("Hello My Calc.")

    lexer := BKLexer.NewLexer()
    lexer.AddRule("\\d+\\.?\\d*", "NUMBER")
    lexer.AddRule("\\+", "PLUS")
    lexer.AddRule("-", "MINUS")
    lexer.AddRule("\\*", "MUL")
    lexer.AddRule("/", "DIV")
    lexer.AddRule("\\(", "LPAR")
    lexer.AddRule("\\)", "RPAR")
    lexer.AddIgnores("[ \\f\\t]+")
    lexer.AddIgnores("#[^\\r\\n]*")

    bytes, err := ioutil.ReadFile("../test.txt")
    if err != nil {
        fmt.Println("read faild")
        return
    }
    code := string(bytes)
    fmt.Println(code)
    lexer.Build(code)
    result := parse(lexer)
    if result == nil {
        fmt.Println("null result")
        return
    }
    result.Eval()
}

引入需要使用的包

import (
    "fmt"
    "strconv"
    "io/ioutil"
    "./bklexer"
)
  • fmt 打印输出
  • strconv 字符串转换
  • io/ioutil 读取文件
  • ./bklexer 用于词法解析

定义语句块结构体

type Block struct {
    statements []Node
}

func NewBlock() *Block {
    return &Block{}
}

Block结构的成员statements用于存储每一条语句,我们使用NewBlock方法实例这个结构。

为语句块结构添加成员方法

func (block *Block) AddStatement(statement Node) {
    block.statements = append(block.statements, statement)
}

func (block *Block) Eval() {
    for i, statement := range block.statements {
        fmt.Printf("out[%d] = %f\n", i, statement.GetValue())
    }
}

AddStatement用于插入新的语句,Eval用于批量计算结果并打印出来。

定义语法解释器入口函数

func parse(lexer *BKLexer.Lexer) *Block {
    block := NewBlock()
    token := lexer.NextToken()
    for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {
        token = lexer.NextToken()
    }
    for token.TType != BKLexer.TOKEN_TYPE_EOF {
        statement := parse_binary_add(lexer)
        if statement == nil {
            return nil;
        }
        token = lexer.GetToken()
        if token.TType != BKLexer.TOKEN_TYPE_NEWLINE &&
           token.TType != BKLexer.TOKEN_TYPE_EOF {
            return nil;
        }
        block.AddStatement(statement)
        for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {
            token = lexer.NextToken()
        }
    }
    return block
}

parse中首先实例一个block结构对象,然后开始解析Token,使用循环解析的方法依序解析得到每一条statement并插入block中,最后返回block。

需要注意的是我们每一条语句的末尾要么是换行要么是结尾,否则将被视为解析错误并返回nil:

        token = lexer.GetToken()
        if token.TType != BKLexer.TOKEN_TYPE_NEWLINE &&
           token.TType != BKLexer.TOKEN_TYPE_EOF {
            return nil;
        }

在我们解析完一条语句后,应当马上越过后面的换行符,这可以使得我们在批量计算中忽略空白行:

        for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {
            token = lexer.NextToken()
        }

有几处修改需要注意

func parse_number(lexer *BKLexer.Lexer) Node {
    token := lexer.GetToken()
    
    ......

parse_number函数中,token := lexer.NextToken()变为token := lexer.GetToken(),因此我们需要确保各个解析函数互相调用时需要先主动在当前函数中调用NextToken()方法。

例如:

......
func parse(lexer *BKLexer.Lexer) *Block {
    ......
    token := lexer.NextToken()
    for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {
        token = lexer.NextToken()
    }
    for token.TType != BKLexer.TOKEN_TYPE_EOF {
        statement := parse_binary_add(lexer)
......
func parse_binary_add(lexer *BKLexer.Lexer) Node {
    ......
    for token.Source == "+" || token.Source == "-" {
        lexer.NextToken()
        rhs := parse_binary_mul(lexer)
    ......

定义词法解析器规则

lexer := BKLexer.NewLexer()
lexer.AddRule("\\d+\\.?\\d*", "NUMBER")
lexer.AddRule("\\+", "PLUS")
lexer.AddRule("-", "MINUS")
lexer.AddRule("\\*", "MUL")
lexer.AddRule("/", "DIV")
lexer.AddRule("\\(", "LPAR")
lexer.AddRule("\\)", "RPAR")
lexer.AddIgnores("[ \\f\\t]+")
lexer.AddIgnores("#[^\\r\\n]*")

需要注意的是这里对无效字符的设定还增加了新的规则lexer.AddIgnores("#[^\\r\\n]*"),这使得程序支持Python的注释语法。

读取文件进行解析计算

bytes, err := ioutil.ReadFile("../test.txt")
if err != nil {
    fmt.Println("read faild")
    return
}
code := string(bytes)
fmt.Println(code)
lexer.Build(code)
result := parse(lexer)
if result == nil {
    fmt.Println("null result")
    return
}
result.Eval()

使用一段测试脚本进行测试

测试内容:

1 + 2 # plus
3 - 4

# here is a comment

5 * 6 # mul
7 / 8

1 + (2 - 3) * 4 / 5 # composite

运行结果:

➜ go run calc.go
Hello My Calc.
1 + 2 # plus
3 - 4

# here is a comment

5 * 6 # mul
7 / 8

1 + (2 - 3) * 4 / 5 # composite

out[0] = 3.000000
out[1] = -1.000000
out[2] = 30.000000
out[3] = 0.875000
out[4] = 0.200000

下篇 使程序语言支持变量

SUBSCRIBE


🔒 No spam. Unsubscribe any time.

About kk

kk

Vincenzo Antedoro is an engineer who helps those who want to invest in renewables. For the rest he enjoys teaching with the method of learning by doing..

» More about kk