encode_arm64.s 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. // Copyright 2020 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build !appengine
  5. // +build gc
  6. // +build !noasm
  7. #include "textflag.h"
  8. // The asm code generally follows the pure Go code in encode_other.go, except
  9. // where marked with a "!!!".
  10. // ----------------------------------------------------------------------------
  11. // func emitLiteral(dst, lit []byte) int
  12. //
  13. // All local variables fit into registers. The register allocation:
  14. // - R3 len(lit)
  15. // - R4 n
  16. // - R6 return value
  17. // - R8 &dst[i]
  18. // - R10 &lit[0]
  19. //
  20. // The 32 bytes of stack space is to call runtime·memmove.
  21. //
  22. // The unusual register allocation of local variables, such as R10 for the
  23. // source pointer, matches the allocation used at the call site in encodeBlock,
  24. // which makes it easier to manually inline this function.
  25. TEXT ·emitLiteral(SB), NOSPLIT, $32-56
  26. MOVD dst_base+0(FP), R8
  27. MOVD lit_base+24(FP), R10
  28. MOVD lit_len+32(FP), R3
  29. MOVD R3, R6
  30. MOVW R3, R4
  31. SUBW $1, R4, R4
  32. MOVW $60, R2
  33. CMPW R2, R4
  34. BLT oneByte
  35. MOVW $256, R2
  36. CMPW R2, R4
  37. BLT twoBytes
  38. threeBytes:
  39. MOVD $0xf4, R2
  40. MOVB R2, 0(R8)
  41. MOVW R4, 1(R8)
  42. ADD $3, R8, R8
  43. ADD $3, R6, R6
  44. B memmove
  45. twoBytes:
  46. MOVD $0xf0, R2
  47. MOVB R2, 0(R8)
  48. MOVB R4, 1(R8)
  49. ADD $2, R8, R8
  50. ADD $2, R6, R6
  51. B memmove
  52. oneByte:
  53. LSLW $2, R4, R4
  54. MOVB R4, 0(R8)
  55. ADD $1, R8, R8
  56. ADD $1, R6, R6
  57. memmove:
  58. MOVD R6, ret+48(FP)
  59. // copy(dst[i:], lit)
  60. //
  61. // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
  62. // R8, R10 and R3 as arguments.
  63. MOVD R8, 8(RSP)
  64. MOVD R10, 16(RSP)
  65. MOVD R3, 24(RSP)
  66. CALL runtime·memmove(SB)
  67. RET
  68. // ----------------------------------------------------------------------------
  69. // func emitCopy(dst []byte, offset, length int) int
  70. //
  71. // All local variables fit into registers. The register allocation:
  72. // - R3 length
  73. // - R7 &dst[0]
  74. // - R8 &dst[i]
  75. // - R11 offset
  76. //
  77. // The unusual register allocation of local variables, such as R11 for the
  78. // offset, matches the allocation used at the call site in encodeBlock, which
  79. // makes it easier to manually inline this function.
  80. TEXT ·emitCopy(SB), NOSPLIT, $0-48
  81. MOVD dst_base+0(FP), R8
  82. MOVD R8, R7
  83. MOVD offset+24(FP), R11
  84. MOVD length+32(FP), R3
  85. loop0:
  86. // for length >= 68 { etc }
  87. MOVW $68, R2
  88. CMPW R2, R3
  89. BLT step1
  90. // Emit a length 64 copy, encoded as 3 bytes.
  91. MOVD $0xfe, R2
  92. MOVB R2, 0(R8)
  93. MOVW R11, 1(R8)
  94. ADD $3, R8, R8
  95. SUB $64, R3, R3
  96. B loop0
  97. step1:
  98. // if length > 64 { etc }
  99. MOVD $64, R2
  100. CMP R2, R3
  101. BLE step2
  102. // Emit a length 60 copy, encoded as 3 bytes.
  103. MOVD $0xee, R2
  104. MOVB R2, 0(R8)
  105. MOVW R11, 1(R8)
  106. ADD $3, R8, R8
  107. SUB $60, R3, R3
  108. step2:
  109. // if length >= 12 || offset >= 2048 { goto step3 }
  110. MOVD $12, R2
  111. CMP R2, R3
  112. BGE step3
  113. MOVW $2048, R2
  114. CMPW R2, R11
  115. BGE step3
  116. // Emit the remaining copy, encoded as 2 bytes.
  117. MOVB R11, 1(R8)
  118. LSRW $3, R11, R11
  119. AND $0xe0, R11, R11
  120. SUB $4, R3, R3
  121. LSLW $2, R3
  122. AND $0xff, R3, R3
  123. ORRW R3, R11, R11
  124. ORRW $1, R11, R11
  125. MOVB R11, 0(R8)
  126. ADD $2, R8, R8
  127. // Return the number of bytes written.
  128. SUB R7, R8, R8
  129. MOVD R8, ret+40(FP)
  130. RET
  131. step3:
  132. // Emit the remaining copy, encoded as 3 bytes.
  133. SUB $1, R3, R3
  134. AND $0xff, R3, R3
  135. LSLW $2, R3, R3
  136. ORRW $2, R3, R3
  137. MOVB R3, 0(R8)
  138. MOVW R11, 1(R8)
  139. ADD $3, R8, R8
  140. // Return the number of bytes written.
  141. SUB R7, R8, R8
  142. MOVD R8, ret+40(FP)
  143. RET
  144. // ----------------------------------------------------------------------------
  145. // func extendMatch(src []byte, i, j int) int
  146. //
  147. // All local variables fit into registers. The register allocation:
  148. // - R6 &src[0]
  149. // - R7 &src[j]
  150. // - R13 &src[len(src) - 8]
  151. // - R14 &src[len(src)]
  152. // - R15 &src[i]
  153. //
  154. // The unusual register allocation of local variables, such as R15 for a source
  155. // pointer, matches the allocation used at the call site in encodeBlock, which
  156. // makes it easier to manually inline this function.
  157. TEXT ·extendMatch(SB), NOSPLIT, $0-48
  158. MOVD src_base+0(FP), R6
  159. MOVD src_len+8(FP), R14
  160. MOVD i+24(FP), R15
  161. MOVD j+32(FP), R7
  162. ADD R6, R14, R14
  163. ADD R6, R15, R15
  164. ADD R6, R7, R7
  165. MOVD R14, R13
  166. SUB $8, R13, R13
  167. cmp8:
  168. // As long as we are 8 or more bytes before the end of src, we can load and
  169. // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
  170. CMP R13, R7
  171. BHI cmp1
  172. MOVD (R15), R3
  173. MOVD (R7), R4
  174. CMP R4, R3
  175. BNE bsf
  176. ADD $8, R15, R15
  177. ADD $8, R7, R7
  178. B cmp8
  179. bsf:
  180. // If those 8 bytes were not equal, XOR the two 8 byte values, and return
  181. // the index of the first byte that differs.
  182. // RBIT reverses the bit order, then CLZ counts the leading zeros, the
  183. // combination of which finds the least significant bit which is set.
  184. // The arm64 architecture is little-endian, and the shift by 3 converts
  185. // a bit index to a byte index.
  186. EOR R3, R4, R4
  187. RBIT R4, R4
  188. CLZ R4, R4
  189. ADD R4>>3, R7, R7
  190. // Convert from &src[ret] to ret.
  191. SUB R6, R7, R7
  192. MOVD R7, ret+40(FP)
  193. RET
  194. cmp1:
  195. // In src's tail, compare 1 byte at a time.
  196. CMP R7, R14
  197. BLS extendMatchEnd
  198. MOVB (R15), R3
  199. MOVB (R7), R4
  200. CMP R4, R3
  201. BNE extendMatchEnd
  202. ADD $1, R15, R15
  203. ADD $1, R7, R7
  204. B cmp1
  205. extendMatchEnd:
  206. // Convert from &src[ret] to ret.
  207. SUB R6, R7, R7
  208. MOVD R7, ret+40(FP)
  209. RET
  210. // ----------------------------------------------------------------------------
  211. // func encodeBlock(dst, src []byte) (d int)
  212. //
  213. // All local variables fit into registers, other than "var table". The register
  214. // allocation:
  215. // - R3 . .
  216. // - R4 . .
  217. // - R5 64 shift
  218. // - R6 72 &src[0], tableSize
  219. // - R7 80 &src[s]
  220. // - R8 88 &dst[d]
  221. // - R9 96 sLimit
  222. // - R10 . &src[nextEmit]
  223. // - R11 104 prevHash, currHash, nextHash, offset
  224. // - R12 112 &src[base], skip
  225. // - R13 . &src[nextS], &src[len(src) - 8]
  226. // - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
  227. // - R15 120 candidate
  228. // - R16 . hash constant, 0x1e35a7bd
  229. // - R17 . &table
  230. // - . 128 table
  231. //
  232. // The second column (64, 72, etc) is the stack offset to spill the registers
  233. // when calling other functions. We could pack this slightly tighter, but it's
  234. // simpler to have a dedicated spill map independent of the function called.
  235. //
  236. // "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
  237. // extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
  238. // local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
  239. TEXT ·encodeBlock(SB), 0, $32896-56
  240. MOVD dst_base+0(FP), R8
  241. MOVD src_base+24(FP), R7
  242. MOVD src_len+32(FP), R14
  243. // shift, tableSize := uint32(32-8), 1<<8
  244. MOVD $24, R5
  245. MOVD $256, R6
  246. MOVW $0xa7bd, R16
  247. MOVKW $(0x1e35<<16), R16
  248. calcShift:
  249. // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
  250. // shift--
  251. // }
  252. MOVD $16384, R2
  253. CMP R2, R6
  254. BGE varTable
  255. CMP R14, R6
  256. BGE varTable
  257. SUB $1, R5, R5
  258. LSL $1, R6, R6
  259. B calcShift
  260. varTable:
  261. // var table [maxTableSize]uint16
  262. //
  263. // In the asm code, unlike the Go code, we can zero-initialize only the
  264. // first tableSize elements. Each uint16 element is 2 bytes and each VST1
  265. // writes 64 bytes, so we can do only tableSize/32 writes instead of the
  266. // 2048 writes that would zero-initialize all of table's 32768 bytes.
  267. // This clear could overrun the first tableSize elements, but it won't
  268. // overrun the allocated stack size.
  269. ADD $128, RSP, R17
  270. MOVD R17, R4
  271. // !!! R6 = &src[tableSize]
  272. ADD R6<<1, R17, R6
  273. // zero the SIMD registers
  274. VEOR V0.B16, V0.B16, V0.B16
  275. VEOR V1.B16, V1.B16, V1.B16
  276. VEOR V2.B16, V2.B16, V2.B16
  277. VEOR V3.B16, V3.B16, V3.B16
  278. memclr:
  279. VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R4)
  280. CMP R4, R6
  281. BHI memclr
  282. // !!! R6 = &src[0]
  283. MOVD R7, R6
  284. // sLimit := len(src) - inputMargin
  285. MOVD R14, R9
  286. SUB $15, R9, R9
  287. // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't
  288. // change for the rest of the function.
  289. MOVD R5, 64(RSP)
  290. MOVD R6, 72(RSP)
  291. MOVD R9, 96(RSP)
  292. // nextEmit := 0
  293. MOVD R6, R10
  294. // s := 1
  295. ADD $1, R7, R7
  296. // nextHash := hash(load32(src, s), shift)
  297. MOVW 0(R7), R11
  298. MULW R16, R11, R11
  299. LSRW R5, R11, R11
  300. outer:
  301. // for { etc }
  302. // skip := 32
  303. MOVD $32, R12
  304. // nextS := s
  305. MOVD R7, R13
  306. // candidate := 0
  307. MOVD $0, R15
  308. inner0:
  309. // for { etc }
  310. // s := nextS
  311. MOVD R13, R7
  312. // bytesBetweenHashLookups := skip >> 5
  313. MOVD R12, R14
  314. LSR $5, R14, R14
  315. // nextS = s + bytesBetweenHashLookups
  316. ADD R14, R13, R13
  317. // skip += bytesBetweenHashLookups
  318. ADD R14, R12, R12
  319. // if nextS > sLimit { goto emitRemainder }
  320. MOVD R13, R3
  321. SUB R6, R3, R3
  322. CMP R9, R3
  323. BHI emitRemainder
  324. // candidate = int(table[nextHash])
  325. MOVHU 0(R17)(R11<<1), R15
  326. // table[nextHash] = uint16(s)
  327. MOVD R7, R3
  328. SUB R6, R3, R3
  329. MOVH R3, 0(R17)(R11<<1)
  330. // nextHash = hash(load32(src, nextS), shift)
  331. MOVW 0(R13), R11
  332. MULW R16, R11
  333. LSRW R5, R11, R11
  334. // if load32(src, s) != load32(src, candidate) { continue } break
  335. MOVW 0(R7), R3
  336. MOVW (R6)(R15*1), R4
  337. CMPW R4, R3
  338. BNE inner0
  339. fourByteMatch:
  340. // As per the encode_other.go code:
  341. //
  342. // A 4-byte match has been found. We'll later see etc.
  343. // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
  344. // on inputMargin in encode.go.
  345. MOVD R7, R3
  346. SUB R10, R3, R3
  347. MOVD $16, R2
  348. CMP R2, R3
  349. BLE emitLiteralFastPath
  350. // ----------------------------------------
  351. // Begin inline of the emitLiteral call.
  352. //
  353. // d += emitLiteral(dst[d:], src[nextEmit:s])
  354. MOVW R3, R4
  355. SUBW $1, R4, R4
  356. MOVW $60, R2
  357. CMPW R2, R4
  358. BLT inlineEmitLiteralOneByte
  359. MOVW $256, R2
  360. CMPW R2, R4
  361. BLT inlineEmitLiteralTwoBytes
  362. inlineEmitLiteralThreeBytes:
  363. MOVD $0xf4, R1
  364. MOVB R1, 0(R8)
  365. MOVW R4, 1(R8)
  366. ADD $3, R8, R8
  367. B inlineEmitLiteralMemmove
  368. inlineEmitLiteralTwoBytes:
  369. MOVD $0xf0, R1
  370. MOVB R1, 0(R8)
  371. MOVB R4, 1(R8)
  372. ADD $2, R8, R8
  373. B inlineEmitLiteralMemmove
  374. inlineEmitLiteralOneByte:
  375. LSLW $2, R4, R4
  376. MOVB R4, 0(R8)
  377. ADD $1, R8, R8
  378. inlineEmitLiteralMemmove:
  379. // Spill local variables (registers) onto the stack; call; unspill.
  380. //
  381. // copy(dst[i:], lit)
  382. //
  383. // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
  384. // R8, R10 and R3 as arguments.
  385. MOVD R8, 8(RSP)
  386. MOVD R10, 16(RSP)
  387. MOVD R3, 24(RSP)
  388. // Finish the "d +=" part of "d += emitLiteral(etc)".
  389. ADD R3, R8, R8
  390. MOVD R7, 80(RSP)
  391. MOVD R8, 88(RSP)
  392. MOVD R15, 120(RSP)
  393. CALL runtime·memmove(SB)
  394. MOVD 64(RSP), R5
  395. MOVD 72(RSP), R6
  396. MOVD 80(RSP), R7
  397. MOVD 88(RSP), R8
  398. MOVD 96(RSP), R9
  399. MOVD 120(RSP), R15
  400. B inner1
  401. inlineEmitLiteralEnd:
  402. // End inline of the emitLiteral call.
  403. // ----------------------------------------
  404. emitLiteralFastPath:
  405. // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
  406. MOVB R3, R4
  407. SUBW $1, R4, R4
  408. AND $0xff, R4, R4
  409. LSLW $2, R4, R4
  410. MOVB R4, (R8)
  411. ADD $1, R8, R8
  412. // !!! Implement the copy from lit to dst as a 16-byte load and store.
  413. // (Encode's documentation says that dst and src must not overlap.)
  414. //
  415. // This always copies 16 bytes, instead of only len(lit) bytes, but that's
  416. // OK. Subsequent iterations will fix up the overrun.
  417. //
  418. // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
  419. // 16-byte loads and stores. This technique probably wouldn't be as
  420. // effective on architectures that are fussier about alignment.
  421. VLD1 0(R10), [V0.B16]
  422. VST1 [V0.B16], 0(R8)
  423. ADD R3, R8, R8
  424. inner1:
  425. // for { etc }
  426. // base := s
  427. MOVD R7, R12
  428. // !!! offset := base - candidate
  429. MOVD R12, R11
  430. SUB R15, R11, R11
  431. SUB R6, R11, R11
  432. // ----------------------------------------
  433. // Begin inline of the extendMatch call.
  434. //
  435. // s = extendMatch(src, candidate+4, s+4)
  436. // !!! R14 = &src[len(src)]
  437. MOVD src_len+32(FP), R14
  438. ADD R6, R14, R14
  439. // !!! R13 = &src[len(src) - 8]
  440. MOVD R14, R13
  441. SUB $8, R13, R13
  442. // !!! R15 = &src[candidate + 4]
  443. ADD $4, R15, R15
  444. ADD R6, R15, R15
  445. // !!! s += 4
  446. ADD $4, R7, R7
  447. inlineExtendMatchCmp8:
  448. // As long as we are 8 or more bytes before the end of src, we can load and
  449. // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
  450. CMP R13, R7
  451. BHI inlineExtendMatchCmp1
  452. MOVD (R15), R3
  453. MOVD (R7), R4
  454. CMP R4, R3
  455. BNE inlineExtendMatchBSF
  456. ADD $8, R15, R15
  457. ADD $8, R7, R7
  458. B inlineExtendMatchCmp8
  459. inlineExtendMatchBSF:
  460. // If those 8 bytes were not equal, XOR the two 8 byte values, and return
  461. // the index of the first byte that differs.
  462. // RBIT reverses the bit order, then CLZ counts the leading zeros, the
  463. // combination of which finds the least significant bit which is set.
  464. // The arm64 architecture is little-endian, and the shift by 3 converts
  465. // a bit index to a byte index.
  466. EOR R3, R4, R4
  467. RBIT R4, R4
  468. CLZ R4, R4
  469. ADD R4>>3, R7, R7
  470. B inlineExtendMatchEnd
  471. inlineExtendMatchCmp1:
  472. // In src's tail, compare 1 byte at a time.
  473. CMP R7, R14
  474. BLS inlineExtendMatchEnd
  475. MOVB (R15), R3
  476. MOVB (R7), R4
  477. CMP R4, R3
  478. BNE inlineExtendMatchEnd
  479. ADD $1, R15, R15
  480. ADD $1, R7, R7
  481. B inlineExtendMatchCmp1
  482. inlineExtendMatchEnd:
  483. // End inline of the extendMatch call.
  484. // ----------------------------------------
  485. // ----------------------------------------
  486. // Begin inline of the emitCopy call.
  487. //
  488. // d += emitCopy(dst[d:], base-candidate, s-base)
  489. // !!! length := s - base
  490. MOVD R7, R3
  491. SUB R12, R3, R3
  492. inlineEmitCopyLoop0:
  493. // for length >= 68 { etc }
  494. MOVW $68, R2
  495. CMPW R2, R3
  496. BLT inlineEmitCopyStep1
  497. // Emit a length 64 copy, encoded as 3 bytes.
  498. MOVD $0xfe, R1
  499. MOVB R1, 0(R8)
  500. MOVW R11, 1(R8)
  501. ADD $3, R8, R8
  502. SUBW $64, R3, R3
  503. B inlineEmitCopyLoop0
  504. inlineEmitCopyStep1:
  505. // if length > 64 { etc }
  506. MOVW $64, R2
  507. CMPW R2, R3
  508. BLE inlineEmitCopyStep2
  509. // Emit a length 60 copy, encoded as 3 bytes.
  510. MOVD $0xee, R1
  511. MOVB R1, 0(R8)
  512. MOVW R11, 1(R8)
  513. ADD $3, R8, R8
  514. SUBW $60, R3, R3
  515. inlineEmitCopyStep2:
  516. // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
  517. MOVW $12, R2
  518. CMPW R2, R3
  519. BGE inlineEmitCopyStep3
  520. MOVW $2048, R2
  521. CMPW R2, R11
  522. BGE inlineEmitCopyStep3
  523. // Emit the remaining copy, encoded as 2 bytes.
  524. MOVB R11, 1(R8)
  525. LSRW $8, R11, R11
  526. LSLW $5, R11, R11
  527. SUBW $4, R3, R3
  528. AND $0xff, R3, R3
  529. LSLW $2, R3, R3
  530. ORRW R3, R11, R11
  531. ORRW $1, R11, R11
  532. MOVB R11, 0(R8)
  533. ADD $2, R8, R8
  534. B inlineEmitCopyEnd
  535. inlineEmitCopyStep3:
  536. // Emit the remaining copy, encoded as 3 bytes.
  537. SUBW $1, R3, R3
  538. LSLW $2, R3, R3
  539. ORRW $2, R3, R3
  540. MOVB R3, 0(R8)
  541. MOVW R11, 1(R8)
  542. ADD $3, R8, R8
  543. inlineEmitCopyEnd:
  544. // End inline of the emitCopy call.
  545. // ----------------------------------------
  546. // nextEmit = s
  547. MOVD R7, R10
  548. // if s >= sLimit { goto emitRemainder }
  549. MOVD R7, R3
  550. SUB R6, R3, R3
  551. CMP R3, R9
  552. BLS emitRemainder
  553. // As per the encode_other.go code:
  554. //
  555. // We could immediately etc.
  556. // x := load64(src, s-1)
  557. MOVD -1(R7), R14
  558. // prevHash := hash(uint32(x>>0), shift)
  559. MOVW R14, R11
  560. MULW R16, R11, R11
  561. LSRW R5, R11, R11
  562. // table[prevHash] = uint16(s-1)
  563. MOVD R7, R3
  564. SUB R6, R3, R3
  565. SUB $1, R3, R3
  566. MOVHU R3, 0(R17)(R11<<1)
  567. // currHash := hash(uint32(x>>8), shift)
  568. LSR $8, R14, R14
  569. MOVW R14, R11
  570. MULW R16, R11, R11
  571. LSRW R5, R11, R11
  572. // candidate = int(table[currHash])
  573. MOVHU 0(R17)(R11<<1), R15
  574. // table[currHash] = uint16(s)
  575. ADD $1, R3, R3
  576. MOVHU R3, 0(R17)(R11<<1)
  577. // if uint32(x>>8) == load32(src, candidate) { continue }
  578. MOVW (R6)(R15*1), R4
  579. CMPW R4, R14
  580. BEQ inner1
  581. // nextHash = hash(uint32(x>>16), shift)
  582. LSR $8, R14, R14
  583. MOVW R14, R11
  584. MULW R16, R11, R11
  585. LSRW R5, R11, R11
  586. // s++
  587. ADD $1, R7, R7
  588. // break out of the inner1 for loop, i.e. continue the outer loop.
  589. B outer
  590. emitRemainder:
  591. // if nextEmit < len(src) { etc }
  592. MOVD src_len+32(FP), R3
  593. ADD R6, R3, R3
  594. CMP R3, R10
  595. BEQ encodeBlockEnd
  596. // d += emitLiteral(dst[d:], src[nextEmit:])
  597. //
  598. // Push args.
  599. MOVD R8, 8(RSP)
  600. MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative.
  601. MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative.
  602. MOVD R10, 32(RSP)
  603. SUB R10, R3, R3
  604. MOVD R3, 40(RSP)
  605. MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative.
  606. // Spill local variables (registers) onto the stack; call; unspill.
  607. MOVD R8, 88(RSP)
  608. CALL ·emitLiteral(SB)
  609. MOVD 88(RSP), R8
  610. // Finish the "d +=" part of "d += emitLiteral(etc)".
  611. MOVD 56(RSP), R1
  612. ADD R1, R8, R8
  613. encodeBlockEnd:
  614. MOVD dst_base+0(FP), R3
  615. SUB R3, R8, R8
  616. MOVD R8, d+48(FP)
  617. RET