efp.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. // Package efp (Excel Formula Parser) tokenise an Excel formula using an
  2. // implementation of E. W. Bachtal's algorithm, found here:
  3. // https://ewbi.blogs.com/develops/2004/12/excel_formula_p.html
  4. //
  5. // Go language version by Ri Xu: https://xuri.me
  6. package efp
  7. import (
  8. "regexp"
  9. "strconv"
  10. "strings"
  11. )
  12. // QuoteDouble, QuoteSingle and other's constants are token definitions.
  13. const (
  14. // Character constants
  15. QuoteDouble = "\""
  16. QuoteSingle = "'"
  17. BracketClose = "]"
  18. BracketOpen = "["
  19. BraceOpen = "{"
  20. BraceClose = "}"
  21. ParenOpen = "("
  22. ParenClose = ")"
  23. Semicolon = ";"
  24. Whitespace = " "
  25. Comma = ","
  26. ErrorStart = "#"
  27. OperatorsSN = "+-"
  28. OperatorsInfix = "+-*/^&=><"
  29. OperatorsPostfix = "%"
  30. // Token type
  31. TokenTypeNoop = "Noop"
  32. TokenTypeOperand = "Operand"
  33. TokenTypeFunction = "Function"
  34. TokenTypeSubexpression = "Subexpression"
  35. TokenTypeArgument = "Argument"
  36. TokenTypeOperatorPrefix = "OperatorPrefix"
  37. TokenTypeOperatorInfix = "OperatorInfix"
  38. TokenTypeOperatorPostfix = "OperatorPostfix"
  39. TokenTypeWhitespace = "Whitespace"
  40. TokenTypeUnknown = "Unknown"
  41. // Token subtypes
  42. TokenSubTypeNothing = "Nothing"
  43. TokenSubTypeStart = "Start"
  44. TokenSubTypeStop = "Stop"
  45. TokenSubTypeText = "Text"
  46. TokenSubTypeNumber = "Number"
  47. TokenSubTypeLogical = "Logical"
  48. TokenSubTypeError = "Error"
  49. TokenSubTypeRange = "Range"
  50. TokenSubTypeMath = "Math"
  51. TokenSubTypeConcatenation = "Concatenation"
  52. TokenSubTypeIntersection = "Intersection"
  53. TokenSubTypeUnion = "Union"
  54. )
  55. // Token encapsulate a formula token.
  56. type Token struct {
  57. TValue string
  58. TType string
  59. TSubType string
  60. }
  61. // Tokens directly maps the ordered list of tokens.
  62. // Attributes:
  63. //
  64. // items - Ordered list
  65. // index - Current position in the list
  66. //
  67. type Tokens struct {
  68. Index int
  69. Items []Token
  70. }
  71. // Parser inheritable container. TokenStack directly maps a LIFO stack of
  72. // tokens.
  73. type Parser struct {
  74. Formula string
  75. Tokens Tokens
  76. TokenStack Tokens
  77. Offset int
  78. Token string
  79. InString bool
  80. InPath bool
  81. InRange bool
  82. InError bool
  83. }
  84. // fToken provides function to encapsulate a formula token.
  85. func fToken(value, tokenType, subType string) Token {
  86. return Token{
  87. TValue: value,
  88. TType: tokenType,
  89. TSubType: subType,
  90. }
  91. }
  92. // fTokens provides function to handle an ordered list of tokens.
  93. func fTokens() Tokens {
  94. return Tokens{
  95. Index: -1,
  96. }
  97. }
  98. // add provides function to add a token to the end of the list.
  99. func (tk *Tokens) add(value, tokenType, subType string) Token {
  100. token := fToken(value, tokenType, subType)
  101. tk.addRef(token)
  102. return token
  103. }
  104. // addRef provides function to add a token to the end of the list.
  105. func (tk *Tokens) addRef(token Token) {
  106. tk.Items = append(tk.Items, token)
  107. }
  108. // reset provides function to reset the index to -1.
  109. func (tk *Tokens) reset() {
  110. tk.Index = -1
  111. }
  112. // BOF provides function to check whether or not beginning of list.
  113. func (tk *Tokens) BOF() bool {
  114. return tk.Index <= 0
  115. }
  116. // EOF provides function to check whether or not end of list.
  117. func (tk *Tokens) EOF() bool {
  118. return tk.Index >= (len(tk.Items) - 1)
  119. }
  120. // moveNext provides function to move the index along one.
  121. func (tk *Tokens) moveNext() bool {
  122. if tk.EOF() {
  123. return false
  124. }
  125. tk.Index++
  126. return true
  127. }
  128. // current return the current token.
  129. func (tk *Tokens) current() *Token {
  130. if tk.Index == -1 {
  131. return nil
  132. }
  133. return &tk.Items[tk.Index]
  134. }
  135. // next return the next token (leave the index unchanged).
  136. func (tk *Tokens) next() *Token {
  137. if tk.EOF() {
  138. return nil
  139. }
  140. return &tk.Items[tk.Index+1]
  141. }
  142. // previous return the previous token (leave the index unchanged).
  143. func (tk *Tokens) previous() *Token {
  144. if tk.Index < 1 {
  145. return nil
  146. }
  147. return &tk.Items[tk.Index-1]
  148. }
  149. // push provides function to push a token onto the stack.
  150. func (tk *Tokens) push(token Token) {
  151. tk.Items = append(tk.Items, token)
  152. }
  153. // pop provides function to pop a token off the stack.
  154. func (tk *Tokens) pop() Token {
  155. if len(tk.Items) == 0 {
  156. return Token{
  157. TType: TokenTypeFunction,
  158. TSubType: TokenSubTypeStop,
  159. }
  160. }
  161. t := tk.Items[len(tk.Items)-1]
  162. tk.Items = tk.Items[:len(tk.Items)-1]
  163. return fToken("", t.TType, TokenSubTypeStop)
  164. }
  165. // token provides function to non-destructively return the top item on the
  166. // stack.
  167. func (tk *Tokens) token() *Token {
  168. if len(tk.Items) > 0 {
  169. return &tk.Items[len(tk.Items)-1]
  170. }
  171. return nil
  172. }
  173. // value return the top token's value.
  174. func (tk *Tokens) value() string {
  175. if tk.token() == nil {
  176. return ""
  177. }
  178. return tk.token().TValue
  179. }
  180. // tp return the top token's type.
  181. func (tk *Tokens) tp() string {
  182. if tk.token() == nil {
  183. return ""
  184. }
  185. return tk.token().TType
  186. }
  187. // subtype return the top token's subtype.
  188. func (tk *Tokens) subtype() string {
  189. if tk.token() == nil {
  190. return ""
  191. }
  192. return tk.token().TSubType
  193. }
  194. // ExcelParser provides function to parse an Excel formula into a stream of
  195. // tokens.
  196. func ExcelParser() Parser {
  197. return Parser{}
  198. }
  199. // getTokens return a token stream (list).
  200. func (ps *Parser) getTokens(formula string) Tokens {
  201. ps.Formula = strings.TrimSpace(ps.Formula)
  202. f := []rune(ps.Formula)
  203. if len(f) > 0 {
  204. if string(f[0]) != "=" {
  205. ps.Formula = "=" + ps.Formula
  206. }
  207. }
  208. // state-dependent character evaluation (order is important)
  209. for !ps.EOF() {
  210. // double-quoted strings
  211. // embeds are doubled
  212. // end marks token
  213. if ps.InString {
  214. if ps.currentChar() == "\"" {
  215. if ps.nextChar() == "\"" {
  216. ps.Token += "\""
  217. ps.Offset++
  218. } else {
  219. ps.InString = false
  220. ps.Tokens.add(ps.Token, TokenTypeOperand, TokenSubTypeText)
  221. ps.Token = ""
  222. }
  223. } else {
  224. ps.Token += ps.currentChar()
  225. }
  226. ps.Offset++
  227. continue
  228. }
  229. // single-quoted strings (links)
  230. // embeds are double
  231. // end does not mark a token
  232. if ps.InPath {
  233. if ps.currentChar() == "'" {
  234. if ps.nextChar() == "'" {
  235. ps.Token += "'"
  236. ps.Offset++
  237. } else {
  238. ps.InPath = false
  239. }
  240. } else {
  241. ps.Token += ps.currentChar()
  242. }
  243. ps.Offset++
  244. continue
  245. }
  246. // bracketed strings (range offset or linked workbook name)
  247. // no embeds (changed to "()" by Excel)
  248. // end does not mark a token
  249. if ps.InRange {
  250. if ps.currentChar() == "]" {
  251. ps.InRange = false
  252. }
  253. ps.Token += ps.currentChar()
  254. ps.Offset++
  255. continue
  256. }
  257. // error values
  258. // end marks a token, determined from absolute list of values
  259. if ps.InError {
  260. ps.Token += ps.currentChar()
  261. ps.Offset++
  262. errors := map[string]string{",#NULL!,": "", ",#DIV/0!,": "", ",#VALUE!,": "", ",#REF!,": "", ",#NAME?,": "", ",#NUM!,": "", ",#N/A,": ""}
  263. _, ok := errors[","+ps.Token+","]
  264. if ok {
  265. ps.InError = false
  266. ps.Tokens.add(ps.Token, TokenTypeOperand, TokenSubTypeError)
  267. ps.Token = ""
  268. }
  269. continue
  270. }
  271. // scientific notation check
  272. if strings.ContainsAny(ps.currentChar(), "+-") && len(ps.Token) > 1 {
  273. match, _ := regexp.MatchString(`^[1-9]{1}(\.[0-9]+)?E{1}$`, ps.Token)
  274. if match {
  275. ps.Token += ps.currentChar()
  276. ps.Offset++
  277. continue
  278. }
  279. }
  280. // independent character evaluation (order not important)
  281. // establish state-dependent character evaluations
  282. if ps.currentChar() == "\"" {
  283. if len(ps.Token) > 0 {
  284. // not expected
  285. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  286. ps.Token = ""
  287. }
  288. ps.InString = true
  289. ps.Offset++
  290. continue
  291. }
  292. if ps.currentChar() == "'" {
  293. if len(ps.Token) > 0 {
  294. // not expected
  295. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  296. ps.Token = ""
  297. }
  298. ps.InPath = true
  299. ps.Offset++
  300. continue
  301. }
  302. if ps.currentChar() == "[" {
  303. ps.InRange = true
  304. ps.Token += ps.currentChar()
  305. ps.Offset++
  306. continue
  307. }
  308. if ps.currentChar() == "#" {
  309. if len(ps.Token) > 0 {
  310. // not expected
  311. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  312. ps.Token = ""
  313. }
  314. ps.InError = true
  315. ps.Token += ps.currentChar()
  316. ps.Offset++
  317. continue
  318. }
  319. // mark start and end of arrays and array rows
  320. if ps.currentChar() == "{" {
  321. if len(ps.Token) > 0 {
  322. // not expected
  323. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  324. ps.Token = ""
  325. }
  326. ps.TokenStack.push(ps.Tokens.add("ARRAY", TokenTypeFunction, TokenSubTypeStart))
  327. ps.TokenStack.push(ps.Tokens.add("ARRAYROW", TokenTypeFunction, TokenSubTypeStart))
  328. ps.Offset++
  329. continue
  330. }
  331. if ps.currentChar() == ";" {
  332. if len(ps.Token) > 0 {
  333. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  334. ps.Token = ""
  335. }
  336. ps.Tokens.addRef(ps.TokenStack.pop())
  337. ps.Tokens.add(",", TokenTypeArgument, "")
  338. ps.TokenStack.push(ps.Tokens.add("ARRAYROW", TokenTypeFunction, TokenSubTypeStart))
  339. ps.Offset++
  340. continue
  341. }
  342. if ps.currentChar() == "}" {
  343. if len(ps.Token) > 0 {
  344. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  345. ps.Token = ""
  346. }
  347. ps.Tokens.addRef(ps.TokenStack.pop())
  348. ps.Tokens.addRef(ps.TokenStack.pop())
  349. ps.Offset++
  350. continue
  351. }
  352. // trim white-space
  353. if ps.currentChar() == " " {
  354. if len(ps.Token) > 0 {
  355. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  356. ps.Token = ""
  357. }
  358. ps.Tokens.add("", TokenTypeWhitespace, "")
  359. ps.Offset++
  360. for (ps.currentChar() == " ") && (!ps.EOF()) {
  361. ps.Offset++
  362. }
  363. continue
  364. }
  365. // multi-character comparators
  366. comparators := map[string]string{",>=,": "", ",<=,": "", ",<>,": ""}
  367. _, ok := comparators[","+ps.doubleChar()+","]
  368. if ok {
  369. if len(ps.Token) > 0 {
  370. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  371. ps.Token = ""
  372. }
  373. ps.Tokens.add(ps.doubleChar(), TokenTypeOperatorInfix, TokenSubTypeLogical)
  374. ps.Offset += 2
  375. continue
  376. }
  377. // standard infix operators
  378. operators := map[string]string{"+": "", "-": "", "*": "", "/": "", "^": "", "&": "", "=": "", ">": "", "<": ""}
  379. _, ok = operators[ps.currentChar()]
  380. if ok {
  381. if len(ps.Token) > 0 {
  382. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  383. ps.Token = ""
  384. }
  385. ps.Tokens.add(ps.currentChar(), TokenTypeOperatorInfix, "")
  386. ps.Offset++
  387. continue
  388. }
  389. // standard postfix operators
  390. if ps.currentChar() == "%" {
  391. if len(ps.Token) > 0 {
  392. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  393. ps.Token = ""
  394. }
  395. ps.Tokens.add(ps.currentChar(), TokenTypeOperatorPostfix, "")
  396. ps.Offset++
  397. continue
  398. }
  399. // start subexpression or function
  400. if ps.currentChar() == "(" {
  401. if len(ps.Token) > 0 {
  402. ps.TokenStack.push(ps.Tokens.add(ps.Token, TokenTypeFunction, TokenSubTypeStart))
  403. ps.Token = ""
  404. } else {
  405. ps.TokenStack.push(ps.Tokens.add("", TokenTypeSubexpression, TokenSubTypeStart))
  406. }
  407. ps.Offset++
  408. continue
  409. }
  410. // function, subexpression, array parameters
  411. if ps.currentChar() == "," {
  412. if len(ps.Token) > 0 {
  413. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  414. ps.Token = ""
  415. }
  416. if ps.TokenStack.tp() != TokenTypeFunction {
  417. ps.Tokens.add(ps.currentChar(), TokenTypeOperatorInfix, TokenSubTypeUnion)
  418. } else {
  419. ps.Tokens.add(ps.currentChar(), TokenTypeArgument, "")
  420. }
  421. ps.Offset++
  422. continue
  423. }
  424. // stop subexpression
  425. if ps.currentChar() == ")" {
  426. if len(ps.Token) > 0 {
  427. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  428. ps.Token = ""
  429. }
  430. ps.Tokens.addRef(ps.TokenStack.pop())
  431. ps.Offset++
  432. continue
  433. }
  434. // token accumulation
  435. ps.Token += ps.currentChar()
  436. ps.Offset++
  437. }
  438. // dump remaining accumulation
  439. if len(ps.Token) > 0 {
  440. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  441. }
  442. // move all tokens to a new collection, excluding all unnecessary white-space tokens
  443. tokens2 := fTokens()
  444. for ps.Tokens.moveNext() {
  445. token := ps.Tokens.current()
  446. if token.TType == TokenTypeWhitespace {
  447. if ps.Tokens.BOF() || ps.Tokens.EOF() {
  448. } else if !(((ps.Tokens.previous().TType == TokenTypeFunction) && (ps.Tokens.previous().TSubType == TokenSubTypeStop)) || ((ps.Tokens.previous().TType == TokenTypeSubexpression) && (ps.Tokens.previous().TSubType == TokenSubTypeStop)) || (ps.Tokens.previous().TType == TokenTypeOperand)) {
  449. } else if !(((ps.Tokens.next().TType == TokenTypeFunction) && (ps.Tokens.next().TSubType == TokenSubTypeStart)) || ((ps.Tokens.next().TType == TokenTypeSubexpression) && (ps.Tokens.next().TSubType == TokenSubTypeStart)) || (ps.Tokens.next().TType == TokenTypeOperand)) {
  450. } else {
  451. tokens2.add(token.TValue, TokenTypeOperatorInfix, TokenSubTypeIntersection)
  452. }
  453. continue
  454. }
  455. tokens2.addRef(Token{
  456. TValue: token.TValue,
  457. TType: token.TType,
  458. TSubType: token.TSubType,
  459. })
  460. }
  461. // switch infix "-" operator to prefix when appropriate, switch infix "+"
  462. // operator to noop when appropriate, identify operand and infix-operator
  463. // subtypes, pull "@" from in front of function names
  464. for tokens2.moveNext() {
  465. token := tokens2.current()
  466. if (token.TType == TokenTypeOperatorInfix) && (token.TValue == "-") {
  467. if tokens2.BOF() {
  468. token.TType = TokenTypeOperatorPrefix
  469. } else if ((tokens2.previous().TType == TokenTypeFunction) && (tokens2.previous().TSubType == TokenSubTypeStop)) || ((tokens2.previous().TType == TokenTypeSubexpression) && (tokens2.previous().TSubType == TokenSubTypeStop)) || (tokens2.previous().TType == TokenTypeOperatorPostfix) || (tokens2.previous().TType == TokenTypeOperand) {
  470. token.TSubType = TokenSubTypeMath
  471. } else {
  472. token.TType = TokenTypeOperatorPrefix
  473. }
  474. continue
  475. }
  476. if (token.TType == TokenTypeOperatorInfix) && (token.TValue == "+") {
  477. if tokens2.BOF() {
  478. token.TType = TokenTypeNoop
  479. } else if (tokens2.previous().TType == TokenTypeFunction) && (tokens2.previous().TSubType == TokenSubTypeStop) || ((tokens2.previous().TType == TokenTypeSubexpression) && (tokens2.previous().TSubType == TokenSubTypeStop) || (tokens2.previous().TType == TokenTypeOperatorPostfix) || (tokens2.previous().TType == TokenTypeOperand)) {
  480. token.TSubType = TokenSubTypeMath
  481. } else {
  482. token.TType = TokenTypeNoop
  483. }
  484. continue
  485. }
  486. if (token.TType == TokenTypeOperatorInfix) && (len(token.TSubType) == 0) {
  487. op := map[string]string{"<": "", ">": "", "=": ""}
  488. _, ok := op[token.TValue[0:1]]
  489. if ok {
  490. token.TSubType = TokenSubTypeLogical
  491. } else if token.TValue == "&" {
  492. token.TSubType = TokenSubTypeConcatenation
  493. } else {
  494. token.TSubType = TokenSubTypeMath
  495. }
  496. continue
  497. }
  498. if (token.TType == TokenTypeOperand) && (len(token.TSubType) == 0) {
  499. if _, err := strconv.ParseFloat(token.TValue, 64); err != nil {
  500. if (token.TValue == "TRUE") || (token.TValue == "FALSE") {
  501. token.TSubType = TokenSubTypeLogical
  502. } else {
  503. token.TSubType = TokenSubTypeRange
  504. }
  505. } else {
  506. token.TSubType = TokenSubTypeNumber
  507. }
  508. continue
  509. }
  510. if token.TType == TokenTypeFunction {
  511. if (len(token.TValue) > 0) && token.TValue[0:1] == "@" {
  512. token.TValue = token.TValue[1:]
  513. }
  514. continue
  515. }
  516. }
  517. tokens2.reset()
  518. // move all tokens to a new collection, excluding all noops
  519. tokens := fTokens()
  520. for tokens2.moveNext() {
  521. if tokens2.current().TType != TokenTypeNoop {
  522. tokens.addRef(Token{
  523. TValue: tokens2.current().TValue,
  524. TType: tokens2.current().TType,
  525. TSubType: tokens2.current().TSubType,
  526. })
  527. }
  528. }
  529. tokens.reset()
  530. return tokens
  531. }
  532. // doubleChar provides function to get two characters after the current
  533. // position.
  534. func (ps *Parser) doubleChar() string {
  535. if len([]rune(ps.Formula)) >= ps.Offset+2 {
  536. return string([]rune(ps.Formula)[ps.Offset : ps.Offset+2])
  537. }
  538. return ""
  539. }
  540. // currentChar provides function to get the character of the current position.
  541. func (ps *Parser) currentChar() string {
  542. return string([]rune(ps.Formula)[ps.Offset])
  543. }
  544. // nextChar provides function to get the next character of the current position.
  545. func (ps *Parser) nextChar() string {
  546. if len([]rune(ps.Formula)) >= ps.Offset+2 {
  547. return string([]rune(ps.Formula)[ps.Offset+1 : ps.Offset+2])
  548. }
  549. return ""
  550. }
  551. // EOF provides function to check whether or not end of tokens stack.
  552. func (ps *Parser) EOF() bool {
  553. return ps.Offset >= len([]rune(ps.Formula))
  554. }
  555. // Parse provides function to parse formula as a token stream (list).
  556. func (ps *Parser) Parse(formula string) []Token {
  557. ps.Formula = formula
  558. ps.Tokens = ps.getTokens(formula)
  559. return ps.Tokens.Items
  560. }
  561. // PrettyPrint provides function to pretty the parsed result with the indented
  562. // format.
  563. func (ps *Parser) PrettyPrint() string {
  564. indent := 0
  565. output := ""
  566. for _, t := range ps.Tokens.Items {
  567. if t.TSubType == TokenSubTypeStop {
  568. indent--
  569. }
  570. for i := 0; i < indent; i++ {
  571. output += "\t"
  572. }
  573. output += t.TValue + " <" + t.TType + "> <" + t.TSubType + ">" + "\n"
  574. if t.TSubType == TokenSubTypeStart {
  575. indent++
  576. }
  577. }
  578. return output
  579. }
  580. // Render provides function to get formatted formula after parsed.
  581. func (ps *Parser) Render() string {
  582. output := ""
  583. for _, t := range ps.Tokens.Items {
  584. if t.TType == TokenTypeFunction && t.TSubType == TokenSubTypeStart {
  585. output += t.TValue + "("
  586. } else if t.TType == TokenTypeFunction && t.TSubType == TokenSubTypeStop {
  587. output += ")"
  588. } else if t.TType == TokenTypeSubexpression && t.TSubType == TokenSubTypeStart {
  589. output += "("
  590. } else if t.TType == TokenTypeSubexpression && t.TSubType == TokenSubTypeStop {
  591. output += ")"
  592. } else if t.TType == TokenTypeOperand && t.TSubType == TokenSubTypeText {
  593. output += "\"" + t.TValue + "\""
  594. } else if t.TType == TokenTypeOperatorInfix && t.TSubType == TokenSubTypeIntersection {
  595. output += " "
  596. } else {
  597. output += t.TValue
  598. }
  599. }
  600. return output
  601. }