helper.go 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021
  1. // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
  2. // Use of this source code is governed by a MIT license found in the LICENSE file.
  3. package codec
  4. // Contains code shared by both encode and decode.
  5. // Some shared ideas around encoding/decoding
  6. // ------------------------------------------
  7. //
  8. // If an interface{} is passed, we first do a type assertion to see if it is
  9. // a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
  10. //
  11. // If we start with a reflect.Value, we are already in reflect.Value land and
  12. // will try to grab the function for the underlying Type and directly call that function.
  13. // This is more performant than calling reflect.Value.Interface().
  14. //
  15. // This still helps us bypass many layers of reflection, and give best performance.
  16. //
  17. // Containers
  18. // ------------
  19. // Containers in the stream are either associative arrays (key-value pairs) or
  20. // regular arrays (indexed by incrementing integers).
  21. //
  22. // Some streams support indefinite-length containers, and use a breaking
  23. // byte-sequence to denote that the container has come to an end.
  24. //
  25. // Some streams also are text-based, and use explicit separators to denote the
  26. // end/beginning of different values.
  27. //
  28. // Philosophy
  29. // ------------
  30. // On decode, this codec will update containers appropriately:
  31. // - If struct, update fields from stream into fields of struct.
  32. // If field in stream not found in struct, handle appropriately (based on option).
  33. // If a struct field has no corresponding value in the stream, leave it AS IS.
  34. // If nil in stream, set value to nil/zero value.
  35. // - If map, update map from stream.
  36. // If the stream value is NIL, set the map to nil.
  37. // - if slice, try to update up to length of array in stream.
  38. // if container len is less than stream array length,
  39. // and container cannot be expanded, handled (based on option).
  40. // This means you can decode 4-element stream array into 1-element array.
  41. //
  42. // ------------------------------------
  43. // On encode, user can specify omitEmpty. This means that the value will be omitted
  44. // if the zero value. The problem may occur during decode, where omitted values do not affect
  45. // the value being decoded into. This means that if decoding into a struct with an
  46. // int field with current value=5, and the field is omitted in the stream, then after
  47. // decoding, the value will still be 5 (not 0).
  48. // omitEmpty only works if you guarantee that you always decode into zero-values.
  49. //
  50. // ------------------------------------
  51. // We could have truncated a map to remove keys not available in the stream,
  52. // or set values in the struct which are not in the stream to their zero values.
  53. // We decided against it because there is no efficient way to do it.
  54. // We may introduce it as an option later.
  55. // However, that will require enabling it for both runtime and code generation modes.
  56. //
  57. // To support truncate, we need to do 2 passes over the container:
  58. // map
  59. // - first collect all keys (e.g. in k1)
  60. // - for each key in stream, mark k1 that the key should not be removed
  61. // - after updating map, do second pass and call delete for all keys in k1 which are not marked
  62. // struct:
  63. // - for each field, track the *typeInfo s1
  64. // - iterate through all s1, and for each one not marked, set value to zero
  65. // - this involves checking the possible anonymous fields which are nil ptrs.
  66. // too much work.
  67. //
  68. // ------------------------------------------
  69. // Error Handling is done within the library using panic.
  70. //
  71. // This way, the code doesn't have to keep checking if an error has happened,
  72. // and we don't have to keep sending the error value along with each call
  73. // or storing it in the En|Decoder and checking it constantly along the way.
  74. //
  75. // We considered storing the error is En|Decoder.
  76. // - once it has its err field set, it cannot be used again.
  77. // - panicing will be optional, controlled by const flag.
  78. // - code should always check error first and return early.
  79. //
  80. // We eventually decided against it as it makes the code clumsier to always
  81. // check for these error conditions.
  82. //
  83. // ------------------------------------------
  84. // We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
  85. // Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
  86. //
  87. // Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
  88. // Instead, the short-lived objects use free-lists that live as long as the object exists.
  89. //
  90. // ------------------------------------------
  91. // Performance is affected by the following:
  92. // - Bounds Checking
  93. // - Inlining
  94. // - Pointer chasing
  95. // This package tries hard to manage the performance impact of these.
  96. //
  97. // ------------------------------------------
  98. // To alleviate performance due to pointer-chasing:
  99. // - Prefer non-pointer values in a struct field
  100. // - Refer to these directly within helper classes
  101. // e.g. json.go refers directly to d.d.decRd
  102. //
  103. // We made the changes to embed En/Decoder in en/decDriver,
  104. // but we had to explicitly reference the fields as opposed to using a function
  105. // to get the better performance that we were looking for.
  106. // For example, we explicitly call d.d.decRd.fn() instead of d.d.r().fn().
  107. //
  108. // ------------------------------------------
  109. // Bounds Checking
  110. // - Allow bytesDecReader to incur "bounds check error", and
  111. // recover that as an io.EOF.
  112. // This allows the bounds check branch to always be taken by the branch predictor,
  113. // giving better performance (in theory), while ensuring that the code is shorter.
  114. //
  115. // ------------------------------------------
  116. // Escape Analysis
  117. // - Prefer to return non-pointers if the value is used right away.
  118. // Newly allocated values returned as pointers will be heap-allocated as they escape.
  119. //
  120. // Prefer functions and methods that
  121. // - take no parameters and
  122. // - return no results and
  123. // - do not allocate.
  124. // These are optimized by the runtime.
  125. // For example, in json, we have dedicated functions for ReadMapElemKey, etc
  126. // which do not delegate to readDelim, as readDelim takes a parameter.
  127. // The difference in runtime was as much as 5%.
  128. //
  129. // ------------------------------------------
  130. // Handling Nil
  131. // - In dynamic (reflection) mode, decodeValue and encodeValue handle nil at the top
  132. // - Consequently, methods used with them as a parent in the chain e.g. kXXX
  133. // methods do not handle nil.
  134. // - Fastpath methods also do not handle nil.
  135. // The switch called in (en|de)code(...) handles it so the dependent calls don't have to.
  136. // - codecgen will handle nil before calling into the library for further work also.
  137. //
  138. // ------------------------------------------
  139. // Passing reflect.Kind to functions that take a reflect.Value
  140. // - Note that reflect.Value.Kind() is very cheap, as its fundamentally a binary AND of 2 numbers
  141. //
  142. // ------------------------------------------
  143. // Transient values during decoding
  144. //
  145. // With reflection, the stack is not used. Consequently, values which may be stack-allocated in
  146. // normal use will cause a heap allocation when using reflection.
  147. //
  148. // There are cases where we know that a value is transient, and we just need to decode into it
  149. // temporarily so we can right away use its value for something else.
  150. //
  151. // In these situations, we can elide the heap allocation by being deliberate with use of a pre-cached
  152. // scratch memory or scratch value.
  153. //
  154. // We use this for situations:
  155. // - decode into a temp value x, and then set x into an interface
  156. // - decode into a temp value, for use as a map key, to lookup up a map value
  157. // - decode into a temp value, for use as a map value, to set into a map
  158. // - decode into a temp value, for sending into a channel
  159. //
  160. // By definition, Transient values are NEVER pointer-shaped values,
  161. // like pointer, func, map, chan. Using transient for pointer-shaped values
  162. // can lead to data corruption when GC tries to follow what it saw as a pointer at one point.
  163. //
  164. // In general, transient values are values which can be decoded as an atomic value
  165. // using a single call to the decDriver. This naturally includes bool or numeric types.
  166. //
  167. // Note that some values which "contain" pointers, specifically string and slice,
  168. // can also be transient. In the case of string, it is decoded as an atomic value.
  169. // In the case of a slice, decoding into its elements always uses an addressable
  170. // value in memory ie we grow the slice, and then decode directly into the memory
  171. // address corresponding to that index in the slice.
  172. //
  173. // To handle these string and slice values, we have to use a scratch value
  174. // which has the same shape of a string or slice.
  175. //
  176. // Consequently, the full range of types which can be transient is:
  177. // - numbers
  178. // - bool
  179. // - string
  180. // - slice
  181. //
  182. // and whbut we MUST use a scratch space with that element
  183. // being defined as an unsafe.Pointer to start with.
  184. //
  185. // We have to be careful with maps. Because we iterate map keys and values during a range,
  186. // we must have 2 variants of the scratch space/value for maps and keys separately.
  187. //
  188. // These are the TransientAddrK and TransientAddr2K methods of decPerType.
  189. import (
  190. "encoding"
  191. "encoding/binary"
  192. "errors"
  193. "fmt"
  194. "io"
  195. "math"
  196. "reflect"
  197. "runtime"
  198. "sort"
  199. "strconv"
  200. "strings"
  201. "sync"
  202. "sync/atomic"
  203. "time"
  204. "unicode/utf8"
  205. )
  206. // if debugging is true, then
  207. // - within Encode/Decode, do not recover from panic's
  208. // - etc
  209. //
  210. // Note: Negative tests that check for errors will fail, so only use this
  211. // when debugging, and run only one test at a time preferably.
  212. //
  213. // Note: RPC tests depend on getting the error from an Encode/Decode call.
  214. // Consequently, they will always fail if debugging = true.
  215. const debugging = false
  216. const (
  217. // containerLenUnknown is length returned from Read(Map|Array)Len
  218. // when a format doesn't know apiori.
  219. // For example, json doesn't pre-determine the length of a container (sequence/map).
  220. containerLenUnknown = -1
  221. // containerLenNil is length returned from Read(Map|Array)Len
  222. // when a 'nil' was encountered in the stream.
  223. containerLenNil = math.MinInt32
  224. // [N]byte is handled by converting to []byte first,
  225. // and sending to the dedicated fast-path function for []byte.
  226. //
  227. // Code exists in case our understanding is wrong.
  228. // keep the defensive code behind this flag, so we can remove/hide it if needed.
  229. // For now, we enable the defensive code (ie set it to true).
  230. handleBytesWithinKArray = true
  231. // Support encoding.(Binary|Text)(Unm|M)arshaler.
  232. // This constant flag will enable or disable it.
  233. supportMarshalInterfaces = true
  234. // bytesFreeListNoCache is used for debugging, when we want to skip using a cache of []byte.
  235. bytesFreeListNoCache = false
  236. // size of the cacheline: defaulting to value for archs: amd64, arm64, 386
  237. // should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
  238. cacheLineSize = 64
  239. wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
  240. wordSize = wordSizeBits / 8
  241. // MARKER: determines whether to skip calling fastpath(En|De)codeTypeSwitch.
  242. // Calling the fastpath switch in encode() or decode() could be redundant,
  243. // as we still have to introspect it again within fnLoad
  244. // to determine the function to use for values of that type.
  245. skipFastpathTypeSwitchInDirectCall = false
  246. )
  247. const cpu32Bit = ^uint(0)>>32 == 0
  248. type rkind byte
  249. const (
  250. rkindPtr = rkind(reflect.Ptr)
  251. rkindString = rkind(reflect.String)
  252. rkindChan = rkind(reflect.Chan)
  253. )
  254. type mapKeyFastKind uint8
  255. const (
  256. mapKeyFastKind32 = iota + 1
  257. mapKeyFastKind32ptr
  258. mapKeyFastKind64
  259. mapKeyFastKind64ptr
  260. mapKeyFastKindStr
  261. )
  262. var (
  263. // use a global mutex to ensure each Handle is initialized.
  264. // We do this, so we don't have to store the basicHandle mutex
  265. // directly in BasicHandle, so it can be shallow-copied.
  266. handleInitMu sync.Mutex
  267. must mustHdl
  268. halt panicHdl
  269. digitCharBitset bitset256
  270. numCharBitset bitset256
  271. whitespaceCharBitset bitset256
  272. asciiAlphaNumBitset bitset256
  273. // numCharWithExpBitset64 bitset64
  274. // numCharNoExpBitset64 bitset64
  275. // whitespaceCharBitset64 bitset64
  276. //
  277. // // hasptrBitset sets bit for all kinds which always have internal pointers
  278. // hasptrBitset bitset32
  279. // refBitset sets bit for all kinds which are direct internal references
  280. refBitset bitset32
  281. // isnilBitset sets bit for all kinds which can be compared to nil
  282. isnilBitset bitset32
  283. // numBoolBitset sets bit for all number and bool kinds
  284. numBoolBitset bitset32
  285. // numBoolStrSliceBitset sets bits for all kinds which are numbers, bool, strings and slices
  286. numBoolStrSliceBitset bitset32
  287. // scalarBitset sets bit for all kinds which are scalars/primitives and thus immutable
  288. scalarBitset bitset32
  289. mapKeyFastKindVals [32]mapKeyFastKind
  290. // codecgen is set to true by codecgen, so that tests, etc can use this information as needed.
  291. codecgen bool
  292. oneByteArr [1]byte
  293. zeroByteSlice = oneByteArr[:0:0]
  294. eofReader devNullReader
  295. )
  296. var (
  297. errMapTypeNotMapKind = errors.New("MapType MUST be of Map Kind")
  298. errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
  299. errExtFnWriteExtUnsupported = errors.New("BytesExt.WriteExt is not supported")
  300. errExtFnReadExtUnsupported = errors.New("BytesExt.ReadExt is not supported")
  301. errExtFnConvertExtUnsupported = errors.New("InterfaceExt.ConvertExt is not supported")
  302. errExtFnUpdateExtUnsupported = errors.New("InterfaceExt.UpdateExt is not supported")
  303. errPanicUndefined = errors.New("panic: undefined error")
  304. errHandleInited = errors.New("cannot modify initialized Handle")
  305. errNoFormatHandle = errors.New("no handle (cannot identify format)")
  306. )
  307. var pool4tiload = sync.Pool{
  308. New: func() interface{} {
  309. return &typeInfoLoad{
  310. etypes: make([]uintptr, 0, 4),
  311. sfis: make([]structFieldInfo, 0, 4),
  312. sfiNames: make(map[string]uint16, 4),
  313. }
  314. },
  315. }
  316. func init() {
  317. xx := func(f mapKeyFastKind, k ...reflect.Kind) {
  318. for _, v := range k {
  319. mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
  320. }
  321. }
  322. var f mapKeyFastKind
  323. f = mapKeyFastKind64
  324. if wordSizeBits == 32 {
  325. f = mapKeyFastKind32
  326. }
  327. xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
  328. f = mapKeyFastKind64ptr
  329. if wordSizeBits == 32 {
  330. f = mapKeyFastKind32ptr
  331. }
  332. xx(f, reflect.Ptr)
  333. xx(mapKeyFastKindStr, reflect.String)
  334. xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
  335. xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
  336. numBoolBitset.
  337. set(byte(reflect.Bool)).
  338. set(byte(reflect.Int)).
  339. set(byte(reflect.Int8)).
  340. set(byte(reflect.Int16)).
  341. set(byte(reflect.Int32)).
  342. set(byte(reflect.Int64)).
  343. set(byte(reflect.Uint)).
  344. set(byte(reflect.Uint8)).
  345. set(byte(reflect.Uint16)).
  346. set(byte(reflect.Uint32)).
  347. set(byte(reflect.Uint64)).
  348. set(byte(reflect.Uintptr)).
  349. set(byte(reflect.Float32)).
  350. set(byte(reflect.Float64)).
  351. set(byte(reflect.Complex64)).
  352. set(byte(reflect.Complex128))
  353. numBoolStrSliceBitset = numBoolBitset
  354. numBoolStrSliceBitset.
  355. set(byte(reflect.String)).
  356. set(byte(reflect.Slice))
  357. scalarBitset = numBoolBitset
  358. scalarBitset.
  359. set(byte(reflect.String))
  360. // MARKER: reflect.Array is not a scalar, as its contents can be modified.
  361. refBitset.
  362. set(byte(reflect.Map)).
  363. set(byte(reflect.Ptr)).
  364. set(byte(reflect.Func)).
  365. set(byte(reflect.Chan)).
  366. set(byte(reflect.UnsafePointer))
  367. isnilBitset = refBitset
  368. isnilBitset.
  369. set(byte(reflect.Interface)).
  370. set(byte(reflect.Slice))
  371. // hasptrBitset = isnilBitset
  372. //
  373. // hasptrBitset.
  374. // set(byte(reflect.String))
  375. for i := byte(0); i <= utf8.RuneSelf; i++ {
  376. if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') {
  377. asciiAlphaNumBitset.set(i)
  378. }
  379. switch i {
  380. case ' ', '\t', '\r', '\n':
  381. whitespaceCharBitset.set(i)
  382. case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
  383. digitCharBitset.set(i)
  384. numCharBitset.set(i)
  385. case '.', '+', '-':
  386. numCharBitset.set(i)
  387. case 'e', 'E':
  388. numCharBitset.set(i)
  389. }
  390. }
  391. }
  392. // driverStateManager supports the runtime state of an (enc|dec)Driver.
  393. //
  394. // During a side(En|De)code call, we can capture the state, reset it,
  395. // and then restore it later to continue the primary encoding/decoding.
  396. type driverStateManager interface {
  397. resetState()
  398. captureState() interface{}
  399. restoreState(state interface{})
  400. }
  401. type bdAndBdread struct {
  402. bdRead bool
  403. bd byte
  404. }
  405. func (x bdAndBdread) captureState() interface{} { return x }
  406. func (x *bdAndBdread) resetState() { x.bd, x.bdRead = 0, false }
  407. func (x *bdAndBdread) reset() { x.resetState() }
  408. func (x *bdAndBdread) restoreState(v interface{}) { *x = v.(bdAndBdread) }
  409. type clsErr struct {
  410. err error // error on closing
  411. closed bool // is it closed?
  412. }
  413. type charEncoding uint8
  414. const (
  415. _ charEncoding = iota // make 0 unset
  416. cUTF8
  417. cUTF16LE
  418. cUTF16BE
  419. cUTF32LE
  420. cUTF32BE
  421. // Deprecated: not a true char encoding value
  422. cRAW charEncoding = 255
  423. )
  424. // valueType is the stream type
  425. type valueType uint8
  426. const (
  427. valueTypeUnset valueType = iota
  428. valueTypeNil
  429. valueTypeInt
  430. valueTypeUint
  431. valueTypeFloat
  432. valueTypeBool
  433. valueTypeString
  434. valueTypeSymbol
  435. valueTypeBytes
  436. valueTypeMap
  437. valueTypeArray
  438. valueTypeTime
  439. valueTypeExt
  440. // valueTypeInvalid = 0xff
  441. )
  442. var valueTypeStrings = [...]string{
  443. "Unset",
  444. "Nil",
  445. "Int",
  446. "Uint",
  447. "Float",
  448. "Bool",
  449. "String",
  450. "Symbol",
  451. "Bytes",
  452. "Map",
  453. "Array",
  454. "Timestamp",
  455. "Ext",
  456. }
  457. func (x valueType) String() string {
  458. if int(x) < len(valueTypeStrings) {
  459. return valueTypeStrings[x]
  460. }
  461. return strconv.FormatInt(int64(x), 10)
  462. }
  463. // note that containerMapStart and containerArraySend are not sent.
  464. // This is because the ReadXXXStart and EncodeXXXStart already does these.
  465. type containerState uint8
  466. const (
  467. _ containerState = iota
  468. containerMapStart
  469. containerMapKey
  470. containerMapValue
  471. containerMapEnd
  472. containerArrayStart
  473. containerArrayElem
  474. containerArrayEnd
  475. )
  476. // do not recurse if a containing type refers to an embedded type
  477. // which refers back to its containing type (via a pointer).
  478. // The second time this back-reference happens, break out,
  479. // so as not to cause an infinite loop.
  480. const rgetMaxRecursion = 2
  481. // fauxUnion is used to keep track of the primitives decoded.
  482. //
  483. // Without it, we would have to decode each primitive and wrap it
  484. // in an interface{}, causing an allocation.
  485. // In this model, the primitives are decoded in a "pseudo-atomic" fashion,
  486. // so we can rest assured that no other decoding happens while these
  487. // primitives are being decoded.
  488. //
  489. // maps and arrays are not handled by this mechanism.
  490. type fauxUnion struct {
  491. // r RawExt // used for RawExt, uint, []byte.
  492. // primitives below
  493. u uint64
  494. i int64
  495. f float64
  496. l []byte
  497. s string
  498. // ---- cpu cache line boundary?
  499. t time.Time
  500. b bool
  501. // state
  502. v valueType
  503. }
  504. // typeInfoLoad is a transient object used while loading up a typeInfo.
  505. type typeInfoLoad struct {
  506. etypes []uintptr
  507. sfis []structFieldInfo
  508. sfiNames map[string]uint16
  509. }
  510. func (x *typeInfoLoad) reset() {
  511. x.etypes = x.etypes[:0]
  512. x.sfis = x.sfis[:0]
  513. for k := range x.sfiNames { // optimized to zero the map
  514. delete(x.sfiNames, k)
  515. }
  516. }
  517. // mirror json.Marshaler and json.Unmarshaler here,
  518. // so we don't import the encoding/json package
  519. type jsonMarshaler interface {
  520. MarshalJSON() ([]byte, error)
  521. }
  522. type jsonUnmarshaler interface {
  523. UnmarshalJSON([]byte) error
  524. }
  525. type isZeroer interface {
  526. IsZero() bool
  527. }
  528. type isCodecEmptyer interface {
  529. IsCodecEmpty() bool
  530. }
  531. type codecError struct {
  532. err error
  533. name string
  534. pos int
  535. encode bool
  536. }
  537. func (e *codecError) Cause() error {
  538. return e.err
  539. }
  540. func (e *codecError) Unwrap() error {
  541. return e.err
  542. }
  543. func (e *codecError) Error() string {
  544. if e.encode {
  545. return fmt.Sprintf("%s encode error: %v", e.name, e.err)
  546. }
  547. return fmt.Sprintf("%s decode error [pos %d]: %v", e.name, e.pos, e.err)
  548. }
  549. func wrapCodecErr(in error, name string, numbytesread int, encode bool) (out error) {
  550. x, ok := in.(*codecError)
  551. if ok && x.pos == numbytesread && x.name == name && x.encode == encode {
  552. return in
  553. }
  554. return &codecError{in, name, numbytesread, encode}
  555. }
  556. var (
  557. bigen bigenHelper
  558. bigenstd = binary.BigEndian
  559. structInfoFieldName = "_struct"
  560. mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
  561. mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
  562. intfSliceTyp = reflect.TypeOf([]interface{}(nil))
  563. intfTyp = intfSliceTyp.Elem()
  564. reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
  565. stringTyp = reflect.TypeOf("")
  566. timeTyp = reflect.TypeOf(time.Time{})
  567. rawExtTyp = reflect.TypeOf(RawExt{})
  568. rawTyp = reflect.TypeOf(Raw{})
  569. uintptrTyp = reflect.TypeOf(uintptr(0))
  570. uint8Typ = reflect.TypeOf(uint8(0))
  571. uint8SliceTyp = reflect.TypeOf([]uint8(nil))
  572. uintTyp = reflect.TypeOf(uint(0))
  573. intTyp = reflect.TypeOf(int(0))
  574. mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
  575. binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
  576. binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
  577. textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
  578. textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
  579. jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
  580. jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
  581. selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
  582. missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem()
  583. iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
  584. isCodecEmptyerTyp = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem()
  585. isSelferViaCodecgenerTyp = reflect.TypeOf((*isSelferViaCodecgener)(nil)).Elem()
  586. uint8TypId = rt2id(uint8Typ)
  587. uint8SliceTypId = rt2id(uint8SliceTyp)
  588. rawExtTypId = rt2id(rawExtTyp)
  589. rawTypId = rt2id(rawTyp)
  590. intfTypId = rt2id(intfTyp)
  591. timeTypId = rt2id(timeTyp)
  592. stringTypId = rt2id(stringTyp)
  593. mapStrIntfTypId = rt2id(mapStrIntfTyp)
  594. mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
  595. intfSliceTypId = rt2id(intfSliceTyp)
  596. // mapBySliceTypId = rt2id(mapBySliceTyp)
  597. intBitsize = uint8(intTyp.Bits())
  598. uintBitsize = uint8(uintTyp.Bits())
  599. // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
  600. bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
  601. chkOvf checkOverflow
  602. )
  603. var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
  604. // SelfExt is a sentinel extension signifying that types
  605. // registered with it SHOULD be encoded and decoded
  606. // based on the native mode of the format.
  607. //
  608. // This allows users to define a tag for an extension,
  609. // but signify that the types should be encoded/decoded as the native encoding.
  610. // This way, users need not also define how to encode or decode the extension.
  611. var SelfExt = &extFailWrapper{}
  612. // Selfer defines methods by which a value can encode or decode itself.
  613. //
  614. // Any type which implements Selfer will be able to encode or decode itself.
  615. // Consequently, during (en|de)code, this takes precedence over
  616. // (text|binary)(M|Unm)arshal or extension support.
  617. //
  618. // By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
  619. // If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
  620. // For example, the snippet below will cause such an error.
  621. //
  622. // type testSelferRecur struct{}
  623. // func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
  624. // func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
  625. //
  626. // Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
  627. // This is because, during each decode, we first check the the next set of bytes
  628. // represent nil, and if so, we just set the value to nil.
  629. type Selfer interface {
  630. CodecEncodeSelf(*Encoder)
  631. CodecDecodeSelf(*Decoder)
  632. }
  633. type isSelferViaCodecgener interface {
  634. codecSelferViaCodecgen()
  635. }
  636. // MissingFielder defines the interface allowing structs to internally decode or encode
  637. // values which do not map to struct fields.
  638. //
  639. // We expect that this interface is bound to a pointer type (so the mutation function works).
  640. //
  641. // A use-case is if a version of a type unexports a field, but you want compatibility between
  642. // both versions during encoding and decoding.
  643. //
  644. // Note that the interface is completely ignored during codecgen.
  645. type MissingFielder interface {
  646. // CodecMissingField is called to set a missing field and value pair.
  647. //
  648. // It returns true if the missing field was set on the struct.
  649. CodecMissingField(field []byte, value interface{}) bool
  650. // CodecMissingFields returns the set of fields which are not struct fields.
  651. //
  652. // Note that the returned map may be mutated by the caller.
  653. CodecMissingFields() map[string]interface{}
  654. }
  655. // MapBySlice is a tag interface that denotes the slice or array value should encode as a map
  656. // in the stream, and can be decoded from a map in the stream.
  657. //
  658. // The slice or array must contain a sequence of key-value pairs.
  659. // The length of the slice or array must be even (fully divisible by 2).
  660. //
  661. // This affords storing a map in a specific sequence in the stream.
  662. //
  663. // Example usage:
  664. //
  665. // type T1 []string // or []int or []Point or any other "slice" type
  666. // func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
  667. // type T2 struct { KeyValues T1 }
  668. //
  669. // var kvs = []string{"one", "1", "two", "2", "three", "3"}
  670. // var v2 = T2{ KeyValues: T1(kvs) }
  671. // // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
  672. //
  673. // The support of MapBySlice affords the following:
  674. // - A slice or array type which implements MapBySlice will be encoded as a map
  675. // - A slice can be decoded from a map in the stream
  676. type MapBySlice interface {
  677. MapBySlice()
  678. }
  679. // basicHandleRuntimeState holds onto all BasicHandle runtime and cached config information.
  680. //
  681. // Storing this outside BasicHandle allows us create shallow copies of a Handle,
  682. // which can be used e.g. when we need to modify config fields temporarily.
  683. // Shallow copies are used within tests, so we can modify some config fields for a test
  684. // temporarily when running tests in parallel, without running the risk that a test executing
  685. // in parallel with other tests does not see a transient modified values not meant for it.
  686. type basicHandleRuntimeState struct {
  687. // these are used during runtime.
  688. // At init time, they should have nothing in them.
  689. rtidFns atomicRtidFnSlice
  690. rtidFnsNoExt atomicRtidFnSlice
  691. // Note: basicHandleRuntimeState is not comparable, due to these slices here (extHandle, intf2impls).
  692. // If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
  693. // Thses slices are used all the time, so keep as slices (not pointers).
  694. extHandle
  695. intf2impls
  696. mu sync.Mutex
  697. jsonHandle bool
  698. binaryHandle bool
  699. // timeBuiltin is initialized from TimeNotBuiltin, and used internally.
  700. // once initialized, it cannot be changed, as the function for encoding/decoding time.Time
  701. // will have been cached and the TimeNotBuiltin value will not be consulted thereafter.
  702. timeBuiltin bool
  703. _ bool // padding
  704. }
  705. // BasicHandle encapsulates the common options and extension functions.
  706. //
  707. // Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
  708. type BasicHandle struct {
  709. // BasicHandle is always a part of a different type.
  710. // It doesn't have to fit into it own cache lines.
  711. // TypeInfos is used to get the type info for any type.
  712. //
  713. // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
  714. TypeInfos *TypeInfos
  715. *basicHandleRuntimeState
  716. // ---- cache line
  717. DecodeOptions
  718. // ---- cache line
  719. EncodeOptions
  720. RPCOptions
  721. // TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
  722. //
  723. // All Handlers should know how to encode/decode time.Time as part of the core
  724. // format specification, or as a standard extension defined by the format.
  725. //
  726. // However, users can elect to handle time.Time as a custom extension, or via the
  727. // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
  728. // To elect this behavior, users can set TimeNotBuiltin=true.
  729. //
  730. // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
  731. // (for Cbor and Msgpack), where time.Time was not a builtin supported type.
  732. //
  733. // Note: DO NOT CHANGE AFTER FIRST USE.
  734. //
  735. // Once a Handle has been initialized (used), do not modify this option. It will be ignored.
  736. TimeNotBuiltin bool
  737. // ExplicitRelease configures whether Release() is implicitly called after an encode or
  738. // decode call.
  739. //
  740. // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...)
  741. // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer,
  742. // then you do not want it to be implicitly closed after each Encode/Decode call.
  743. // Doing so will unnecessarily return resources to the shared pool, only for you to
  744. // grab them right after again to do another Encode/Decode call.
  745. //
  746. // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when
  747. // you are truly done.
  748. //
  749. // As an alternative, you can explicitly set a finalizer - so its resources
  750. // are returned to the shared pool before it is garbage-collected. Do it as below:
  751. // runtime.SetFinalizer(e, (*Encoder).Release)
  752. // runtime.SetFinalizer(d, (*Decoder).Release)
  753. //
  754. // Deprecated: This is not longer used as pools are only used for long-lived objects
  755. // which are shared across goroutines.
  756. // Setting this value has no effect. It is maintained for backward compatibility.
  757. ExplicitRelease bool
  758. // ---- cache line
  759. inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
  760. }
  761. // initHandle does a one-time initialization of the handle.
  762. // After this is run, do not modify the Handle, as some modifications are ignored
  763. // e.g. extensions, registered interfaces, TimeNotBuiltIn, etc
  764. func initHandle(hh Handle) {
  765. x := hh.getBasicHandle()
  766. // MARKER: We need to simulate once.Do, to ensure no data race within the block.
  767. // Consequently, below would not work.
  768. //
  769. // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
  770. // x.be = hh.isBinary()
  771. // x.js = hh.isJson
  772. // x.n = hh.Name()[0]
  773. // }
  774. // simulate once.Do using our own stored flag and mutex as a CompareAndSwap
  775. // is not sufficient, since a race condition can occur within init(Handle) function.
  776. // init is made noinline, so that this function can be inlined by its caller.
  777. if atomic.LoadUint32(&x.inited) == 0 {
  778. x.initHandle(hh)
  779. }
  780. }
  781. func (x *BasicHandle) basicInit() {
  782. x.rtidFns.store(nil)
  783. x.rtidFnsNoExt.store(nil)
  784. x.timeBuiltin = !x.TimeNotBuiltin
  785. }
  786. func (x *BasicHandle) init() {}
  787. func (x *BasicHandle) isInited() bool {
  788. return atomic.LoadUint32(&x.inited) != 0
  789. }
  790. // clearInited: DANGEROUS - only use in testing, etc
  791. func (x *BasicHandle) clearInited() {
  792. atomic.StoreUint32(&x.inited, 0)
  793. }
  794. // TimeBuiltin returns whether time.Time OOTB support is used,
  795. // based on the initial configuration of TimeNotBuiltin
  796. func (x *basicHandleRuntimeState) TimeBuiltin() bool {
  797. return x.timeBuiltin
  798. }
  799. func (x *basicHandleRuntimeState) isJs() bool {
  800. return x.jsonHandle
  801. }
  802. func (x *basicHandleRuntimeState) isBe() bool {
  803. return x.binaryHandle
  804. }
  805. func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
  806. rk := rt.Kind()
  807. for rk == reflect.Ptr {
  808. rt = rt.Elem()
  809. rk = rt.Kind()
  810. }
  811. if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
  812. return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
  813. }
  814. rtid := rt2id(rt)
  815. // handle all natively supported type appropriately, so they cannot have an extension.
  816. // However, we do not return an error for these, as we do not document that.
  817. // Instead, we silently treat as a no-op, and return.
  818. switch rtid {
  819. case rawTypId, rawExtTypId:
  820. return
  821. case timeTypId:
  822. if x.timeBuiltin {
  823. return
  824. }
  825. }
  826. for i := range x.extHandle {
  827. v := &x.extHandle[i]
  828. if v.rtid == rtid {
  829. v.tag, v.ext = tag, ext
  830. return
  831. }
  832. }
  833. rtidptr := rt2id(reflect.PtrTo(rt))
  834. x.extHandle = append(x.extHandle, extTypeTagFn{rtid, rtidptr, rt, tag, ext})
  835. return
  836. }
  837. // initHandle should be called only from codec.initHandle global function.
  838. // make it uninlineable, as it is called at most once for each handle.
  839. //
  840. //go:noinline
  841. func (x *BasicHandle) initHandle(hh Handle) {
  842. handleInitMu.Lock()
  843. defer handleInitMu.Unlock() // use defer, as halt may panic below
  844. if x.inited == 0 {
  845. if x.basicHandleRuntimeState == nil {
  846. x.basicHandleRuntimeState = new(basicHandleRuntimeState)
  847. }
  848. x.jsonHandle = hh.isJson()
  849. x.binaryHandle = hh.isBinary()
  850. // ensure MapType and SliceType are of correct type
  851. if x.MapType != nil && x.MapType.Kind() != reflect.Map {
  852. halt.onerror(errMapTypeNotMapKind)
  853. }
  854. if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
  855. halt.onerror(errSliceTypeNotSliceKind)
  856. }
  857. x.basicInit()
  858. hh.init()
  859. atomic.StoreUint32(&x.inited, 1)
  860. }
  861. }
  862. func (x *BasicHandle) getBasicHandle() *BasicHandle {
  863. return x
  864. }
  865. func (x *BasicHandle) typeInfos() *TypeInfos {
  866. if x.TypeInfos != nil {
  867. return x.TypeInfos
  868. }
  869. return defTypeInfos
  870. }
  871. func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  872. return x.typeInfos().get(rtid, rt)
  873. }
  874. func findRtidFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
  875. // binary search. adapted from sort/search.go.
  876. // Note: we use goto (instead of for loop) so this can be inlined.
  877. // h, i, j := 0, 0, len(s)
  878. var h uint // var h, i uint
  879. var j = uint(len(s))
  880. LOOP:
  881. if i < j {
  882. h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
  883. if s[h].rtid < rtid {
  884. i = h + 1
  885. } else {
  886. j = h
  887. }
  888. goto LOOP
  889. }
  890. if i < uint(len(s)) && s[i].rtid == rtid {
  891. fn = s[i].fn
  892. }
  893. return
  894. }
  895. func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
  896. return x.fnVia(rt, x.typeInfos(), &x.rtidFns, x.CheckCircularRef, true)
  897. }
  898. func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
  899. return x.fnVia(rt, x.typeInfos(), &x.rtidFnsNoExt, x.CheckCircularRef, false)
  900. }
  901. func (x *basicHandleRuntimeState) fnVia(rt reflect.Type, tinfos *TypeInfos, fs *atomicRtidFnSlice, checkCircularRef, checkExt bool) (fn *codecFn) {
  902. rtid := rt2id(rt)
  903. sp := fs.load()
  904. if sp != nil {
  905. if _, fn = findRtidFn(sp, rtid); fn != nil {
  906. return
  907. }
  908. }
  909. fn = x.fnLoad(rt, rtid, tinfos, checkCircularRef, checkExt)
  910. x.mu.Lock()
  911. sp = fs.load()
  912. // since this is an atomic load/store, we MUST use a different array each time,
  913. // else we have a data race when a store is happening simultaneously with a findRtidFn call.
  914. if sp == nil {
  915. sp = []codecRtidFn{{rtid, fn}}
  916. fs.store(sp)
  917. } else {
  918. idx, fn2 := findRtidFn(sp, rtid)
  919. if fn2 == nil {
  920. sp2 := make([]codecRtidFn, len(sp)+1)
  921. copy(sp2[idx+1:], sp[idx:])
  922. copy(sp2, sp[:idx])
  923. sp2[idx] = codecRtidFn{rtid, fn}
  924. fs.store(sp2)
  925. }
  926. }
  927. x.mu.Unlock()
  928. return
  929. }
  930. func fnloadFastpathUnderlying(ti *typeInfo) (f *fastpathE, u reflect.Type) {
  931. var rtid uintptr
  932. var idx int
  933. rtid = rt2id(ti.fastpathUnderlying)
  934. idx = fastpathAvIndex(rtid)
  935. if idx == -1 {
  936. return
  937. }
  938. f = &fastpathAv[idx]
  939. if uint8(reflect.Array) == ti.kind {
  940. u = reflectArrayOf(ti.rt.Len(), ti.elem)
  941. } else {
  942. u = f.rt
  943. }
  944. return
  945. }
  946. func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, checkCircularRef, checkExt bool) (fn *codecFn) {
  947. fn = new(codecFn)
  948. fi := &(fn.i)
  949. ti := tinfos.get(rtid, rt)
  950. fi.ti = ti
  951. rk := reflect.Kind(ti.kind)
  952. // anything can be an extension except the built-in ones: time, raw and rawext.
  953. // ensure we check for these types, then if extension, before checking if
  954. // it implementes one of the pre-declared interfaces.
  955. fi.addrDf = true
  956. // fi.addrEf = true
  957. if rtid == timeTypId && x.timeBuiltin {
  958. fn.fe = (*Encoder).kTime
  959. fn.fd = (*Decoder).kTime
  960. } else if rtid == rawTypId {
  961. fn.fe = (*Encoder).raw
  962. fn.fd = (*Decoder).raw
  963. } else if rtid == rawExtTypId {
  964. fn.fe = (*Encoder).rawExt
  965. fn.fd = (*Decoder).rawExt
  966. fi.addrD = true
  967. fi.addrE = true
  968. } else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
  969. fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
  970. fn.fe = (*Encoder).ext
  971. fn.fd = (*Decoder).ext
  972. fi.addrD = true
  973. if rk == reflect.Struct || rk == reflect.Array {
  974. fi.addrE = true
  975. }
  976. } else if (ti.flagSelfer || ti.flagSelferPtr) &&
  977. !(checkCircularRef && ti.flagSelferViaCodecgen && ti.kind == byte(reflect.Struct)) {
  978. // do not use Selfer generated by codecgen if it is a struct and CheckCircularRef=true
  979. fn.fe = (*Encoder).selferMarshal
  980. fn.fd = (*Decoder).selferUnmarshal
  981. fi.addrD = ti.flagSelferPtr
  982. fi.addrE = ti.flagSelferPtr
  983. } else if supportMarshalInterfaces && x.isBe() &&
  984. (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) &&
  985. (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) {
  986. fn.fe = (*Encoder).binaryMarshal
  987. fn.fd = (*Decoder).binaryUnmarshal
  988. fi.addrD = ti.flagBinaryUnmarshalerPtr
  989. fi.addrE = ti.flagBinaryMarshalerPtr
  990. } else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
  991. (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) &&
  992. (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) {
  993. //If JSON, we should check JSONMarshal before textMarshal
  994. fn.fe = (*Encoder).jsonMarshal
  995. fn.fd = (*Decoder).jsonUnmarshal
  996. fi.addrD = ti.flagJsonUnmarshalerPtr
  997. fi.addrE = ti.flagJsonMarshalerPtr
  998. } else if supportMarshalInterfaces && !x.isBe() &&
  999. (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) &&
  1000. (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) {
  1001. fn.fe = (*Encoder).textMarshal
  1002. fn.fd = (*Decoder).textUnmarshal
  1003. fi.addrD = ti.flagTextUnmarshalerPtr
  1004. fi.addrE = ti.flagTextMarshalerPtr
  1005. } else {
  1006. if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) {
  1007. // by default (without using unsafe),
  1008. // if an array is not addressable, converting from an array to a slice
  1009. // requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array).
  1010. //
  1011. // (Non-addressable arrays mostly occur as keys/values from a map).
  1012. //
  1013. // However, fastpath functions are mostly for slices of numbers or strings,
  1014. // which are small by definition and thus allocation should be fast/cheap in time.
  1015. //
  1016. // Consequently, the value of doing this quick allocation to elide the overhead cost of
  1017. // non-optimized (not-unsafe) reflection is a fair price.
  1018. var rtid2 uintptr
  1019. if !ti.flagHasPkgPath { // un-named type (slice or mpa or array)
  1020. rtid2 = rtid
  1021. if rk == reflect.Array {
  1022. rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem)
  1023. }
  1024. if idx := fastpathAvIndex(rtid2); idx != -1 {
  1025. fn.fe = fastpathAv[idx].encfn
  1026. fn.fd = fastpathAv[idx].decfn
  1027. fi.addrD = true
  1028. fi.addrDf = false
  1029. if rk == reflect.Array {
  1030. fi.addrD = false // decode directly into array value (slice made from it)
  1031. }
  1032. }
  1033. } else { // named type (with underlying type of map or slice or array)
  1034. // try to use mapping for underlying type
  1035. xfe, xrt := fnloadFastpathUnderlying(ti)
  1036. if xfe != nil {
  1037. xfnf := xfe.encfn
  1038. xfnf2 := xfe.decfn
  1039. if rk == reflect.Array {
  1040. fi.addrD = false // decode directly into array value (slice made from it)
  1041. fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  1042. xfnf2(d, xf, rvConvert(xrv, xrt))
  1043. }
  1044. } else {
  1045. fi.addrD = true
  1046. fi.addrDf = false // meaning it can be an address(ptr) or a value
  1047. xptr2rt := reflect.PtrTo(xrt)
  1048. fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  1049. if xrv.Kind() == reflect.Ptr {
  1050. xfnf2(d, xf, rvConvert(xrv, xptr2rt))
  1051. } else {
  1052. xfnf2(d, xf, rvConvert(xrv, xrt))
  1053. }
  1054. }
  1055. }
  1056. fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
  1057. xfnf(e, xf, rvConvert(xrv, xrt))
  1058. }
  1059. }
  1060. }
  1061. }
  1062. if fn.fe == nil && fn.fd == nil {
  1063. switch rk {
  1064. case reflect.Bool:
  1065. fn.fe = (*Encoder).kBool
  1066. fn.fd = (*Decoder).kBool
  1067. case reflect.String:
  1068. // Do not use different functions based on StringToRaw option, as that will statically
  1069. // set the function for a string type, and if the Handle is modified thereafter,
  1070. // behaviour is non-deterministic
  1071. // i.e. DO NOT DO:
  1072. // if x.StringToRaw {
  1073. // fn.fe = (*Encoder).kStringToRaw
  1074. // } else {
  1075. // fn.fe = (*Encoder).kStringEnc
  1076. // }
  1077. fn.fe = (*Encoder).kString
  1078. fn.fd = (*Decoder).kString
  1079. case reflect.Int:
  1080. fn.fd = (*Decoder).kInt
  1081. fn.fe = (*Encoder).kInt
  1082. case reflect.Int8:
  1083. fn.fe = (*Encoder).kInt8
  1084. fn.fd = (*Decoder).kInt8
  1085. case reflect.Int16:
  1086. fn.fe = (*Encoder).kInt16
  1087. fn.fd = (*Decoder).kInt16
  1088. case reflect.Int32:
  1089. fn.fe = (*Encoder).kInt32
  1090. fn.fd = (*Decoder).kInt32
  1091. case reflect.Int64:
  1092. fn.fe = (*Encoder).kInt64
  1093. fn.fd = (*Decoder).kInt64
  1094. case reflect.Uint:
  1095. fn.fd = (*Decoder).kUint
  1096. fn.fe = (*Encoder).kUint
  1097. case reflect.Uint8:
  1098. fn.fe = (*Encoder).kUint8
  1099. fn.fd = (*Decoder).kUint8
  1100. case reflect.Uint16:
  1101. fn.fe = (*Encoder).kUint16
  1102. fn.fd = (*Decoder).kUint16
  1103. case reflect.Uint32:
  1104. fn.fe = (*Encoder).kUint32
  1105. fn.fd = (*Decoder).kUint32
  1106. case reflect.Uint64:
  1107. fn.fe = (*Encoder).kUint64
  1108. fn.fd = (*Decoder).kUint64
  1109. case reflect.Uintptr:
  1110. fn.fe = (*Encoder).kUintptr
  1111. fn.fd = (*Decoder).kUintptr
  1112. case reflect.Float32:
  1113. fn.fe = (*Encoder).kFloat32
  1114. fn.fd = (*Decoder).kFloat32
  1115. case reflect.Float64:
  1116. fn.fe = (*Encoder).kFloat64
  1117. fn.fd = (*Decoder).kFloat64
  1118. case reflect.Complex64:
  1119. fn.fe = (*Encoder).kComplex64
  1120. fn.fd = (*Decoder).kComplex64
  1121. case reflect.Complex128:
  1122. fn.fe = (*Encoder).kComplex128
  1123. fn.fd = (*Decoder).kComplex128
  1124. case reflect.Chan:
  1125. fn.fe = (*Encoder).kChan
  1126. fn.fd = (*Decoder).kChan
  1127. case reflect.Slice:
  1128. fn.fe = (*Encoder).kSlice
  1129. fn.fd = (*Decoder).kSlice
  1130. case reflect.Array:
  1131. fi.addrD = false // decode directly into array value (slice made from it)
  1132. fn.fe = (*Encoder).kArray
  1133. fn.fd = (*Decoder).kArray
  1134. case reflect.Struct:
  1135. if ti.anyOmitEmpty ||
  1136. ti.flagMissingFielder ||
  1137. ti.flagMissingFielderPtr {
  1138. fn.fe = (*Encoder).kStruct
  1139. } else {
  1140. fn.fe = (*Encoder).kStructNoOmitempty
  1141. }
  1142. fn.fd = (*Decoder).kStruct
  1143. case reflect.Map:
  1144. fn.fe = (*Encoder).kMap
  1145. fn.fd = (*Decoder).kMap
  1146. case reflect.Interface:
  1147. // encode: reflect.Interface are handled already by preEncodeValue
  1148. fn.fd = (*Decoder).kInterface
  1149. fn.fe = (*Encoder).kErr
  1150. default:
  1151. // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
  1152. fn.fe = (*Encoder).kErr
  1153. fn.fd = (*Decoder).kErr
  1154. }
  1155. }
  1156. }
  1157. return
  1158. }
  1159. // Handle defines a specific encoding format. It also stores any runtime state
  1160. // used during an Encoding or Decoding session e.g. stored state about Types, etc.
  1161. //
  1162. // Once a handle is configured, it can be shared across multiple Encoders and Decoders.
  1163. //
  1164. // Note that a Handle is NOT safe for concurrent modification.
  1165. //
  1166. // A Handle also should not be modified after it is configured and has
  1167. // been used at least once. This is because stored state may be out of sync with the
  1168. // new configuration, and a data race can occur when multiple goroutines access it.
  1169. // i.e. multiple Encoders or Decoders in different goroutines.
  1170. //
  1171. // Consequently, the typical usage model is that a Handle is pre-configured
  1172. // before first time use, and not modified while in use.
  1173. // Such a pre-configured Handle is safe for concurrent access.
  1174. type Handle interface {
  1175. Name() string
  1176. getBasicHandle() *BasicHandle
  1177. newEncDriver() encDriver
  1178. newDecDriver() decDriver
  1179. isBinary() bool
  1180. isJson() bool // json is special for now, so track it
  1181. // desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood.
  1182. desc(bd byte) string
  1183. // init initializes the handle based on handle-specific info (beyond what is in BasicHandle)
  1184. init()
  1185. }
  1186. // Raw represents raw formatted bytes.
  1187. // We "blindly" store it during encode and retrieve the raw bytes during decode.
  1188. // Note: it is dangerous during encode, so we may gate the behaviour
  1189. // behind an Encode flag which must be explicitly set.
  1190. type Raw []byte
  1191. // RawExt represents raw unprocessed extension data.
  1192. // Some codecs will decode extension data as a *RawExt
  1193. // if there is no registered extension for the tag.
  1194. //
  1195. // Only one of Data or Value is nil.
  1196. // If Data is nil, then the content of the RawExt is in the Value.
  1197. type RawExt struct {
  1198. Tag uint64
  1199. // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
  1200. // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
  1201. Data []byte
  1202. // Value represents the extension, if Data is nil.
  1203. // Value is used by codecs (e.g. cbor, json) which leverage the format to do
  1204. // custom serialization of the types.
  1205. Value interface{}
  1206. }
  1207. func (re *RawExt) setData(xbs []byte, zerocopy bool) {
  1208. if zerocopy {
  1209. re.Data = xbs
  1210. } else {
  1211. re.Data = append(re.Data[:0], xbs...)
  1212. }
  1213. }
  1214. // BytesExt handles custom (de)serialization of types to/from []byte.
  1215. // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
  1216. type BytesExt interface {
  1217. // WriteExt converts a value to a []byte.
  1218. //
  1219. // Note: v is a pointer iff the registered extension type is a struct or array kind.
  1220. WriteExt(v interface{}) []byte
  1221. // ReadExt updates a value from a []byte.
  1222. //
  1223. // Note: dst is always a pointer kind to the registered extension type.
  1224. ReadExt(dst interface{}, src []byte)
  1225. }
  1226. // InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
  1227. // The Encoder or Decoder will then handle the further (de)serialization of that known type.
  1228. //
  1229. // It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
  1230. type InterfaceExt interface {
  1231. // ConvertExt converts a value into a simpler interface for easy encoding
  1232. // e.g. convert time.Time to int64.
  1233. //
  1234. // Note: v is a pointer iff the registered extension type is a struct or array kind.
  1235. ConvertExt(v interface{}) interface{}
  1236. // UpdateExt updates a value from a simpler interface for easy decoding
  1237. // e.g. convert int64 to time.Time.
  1238. //
  1239. // Note: dst is always a pointer kind to the registered extension type.
  1240. UpdateExt(dst interface{}, src interface{})
  1241. }
  1242. // Ext handles custom (de)serialization of custom types / extensions.
  1243. type Ext interface {
  1244. BytesExt
  1245. InterfaceExt
  1246. }
  1247. // addExtWrapper is a wrapper implementation to support former AddExt exported method.
  1248. type addExtWrapper struct {
  1249. encFn func(reflect.Value) ([]byte, error)
  1250. decFn func(reflect.Value, []byte) error
  1251. }
  1252. func (x addExtWrapper) WriteExt(v interface{}) []byte {
  1253. bs, err := x.encFn(reflect.ValueOf(v))
  1254. halt.onerror(err)
  1255. return bs
  1256. }
  1257. func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
  1258. halt.onerror(x.decFn(reflect.ValueOf(v), bs))
  1259. }
  1260. func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
  1261. return x.WriteExt(v)
  1262. }
  1263. func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
  1264. x.ReadExt(dest, v.([]byte))
  1265. }
  1266. type bytesExtFailer struct{}
  1267. func (bytesExtFailer) WriteExt(v interface{}) []byte {
  1268. halt.onerror(errExtFnWriteExtUnsupported)
  1269. return nil
  1270. }
  1271. func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
  1272. halt.onerror(errExtFnReadExtUnsupported)
  1273. }
  1274. type interfaceExtFailer struct{}
  1275. func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
  1276. halt.onerror(errExtFnConvertExtUnsupported)
  1277. return nil
  1278. }
  1279. func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
  1280. halt.onerror(errExtFnUpdateExtUnsupported)
  1281. }
  1282. type bytesExtWrapper struct {
  1283. interfaceExtFailer
  1284. BytesExt
  1285. }
  1286. type interfaceExtWrapper struct {
  1287. bytesExtFailer
  1288. InterfaceExt
  1289. }
  1290. type extFailWrapper struct {
  1291. bytesExtFailer
  1292. interfaceExtFailer
  1293. }
  1294. type binaryEncodingType struct{}
  1295. func (binaryEncodingType) isBinary() bool { return true }
  1296. func (binaryEncodingType) isJson() bool { return false }
  1297. type textEncodingType struct{}
  1298. func (textEncodingType) isBinary() bool { return false }
  1299. func (textEncodingType) isJson() bool { return false }
  1300. type notJsonType struct{}
  1301. func (notJsonType) isJson() bool { return false }
  1302. // noBuiltInTypes is embedded into many types which do not support builtins
  1303. // e.g. msgpack, simple, cbor.
  1304. type noBuiltInTypes struct{}
  1305. func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
  1306. func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
  1307. // bigenHelper handles ByteOrder operations directly using
  1308. // arrays of bytes (not slice of bytes).
  1309. //
  1310. // Since byteorder operations are very common for encoding and decoding
  1311. // numbers, lengths, etc - it is imperative that this operation is as
  1312. // fast as possible. Removing indirection (pointer chasing) to look
  1313. // at up to 8 bytes helps a lot here.
  1314. //
  1315. // For times where it is expedient to use a slice, delegate to the
  1316. // bigenstd (equal to the binary.BigEndian value).
  1317. //
  1318. // retrofitted from stdlib: encoding/binary/BigEndian (ByteOrder)
  1319. type bigenHelper struct{}
  1320. func (z bigenHelper) PutUint16(v uint16) (b [2]byte) {
  1321. return [...]byte{
  1322. byte(v >> 8),
  1323. byte(v),
  1324. }
  1325. }
  1326. func (z bigenHelper) PutUint32(v uint32) (b [4]byte) {
  1327. return [...]byte{
  1328. byte(v >> 24),
  1329. byte(v >> 16),
  1330. byte(v >> 8),
  1331. byte(v),
  1332. }
  1333. }
  1334. func (z bigenHelper) PutUint64(v uint64) (b [8]byte) {
  1335. return [...]byte{
  1336. byte(v >> 56),
  1337. byte(v >> 48),
  1338. byte(v >> 40),
  1339. byte(v >> 32),
  1340. byte(v >> 24),
  1341. byte(v >> 16),
  1342. byte(v >> 8),
  1343. byte(v),
  1344. }
  1345. }
  1346. func (z bigenHelper) Uint16(b [2]byte) (v uint16) {
  1347. return uint16(b[1]) |
  1348. uint16(b[0])<<8
  1349. }
  1350. func (z bigenHelper) Uint32(b [4]byte) (v uint32) {
  1351. return uint32(b[3]) |
  1352. uint32(b[2])<<8 |
  1353. uint32(b[1])<<16 |
  1354. uint32(b[0])<<24
  1355. }
  1356. func (z bigenHelper) Uint64(b [8]byte) (v uint64) {
  1357. return uint64(b[7]) |
  1358. uint64(b[6])<<8 |
  1359. uint64(b[5])<<16 |
  1360. uint64(b[4])<<24 |
  1361. uint64(b[3])<<32 |
  1362. uint64(b[2])<<40 |
  1363. uint64(b[1])<<48 |
  1364. uint64(b[0])<<56
  1365. }
  1366. func (z bigenHelper) writeUint16(w *encWr, v uint16) {
  1367. x := z.PutUint16(v)
  1368. w.writen2(x[0], x[1])
  1369. }
  1370. func (z bigenHelper) writeUint32(w *encWr, v uint32) {
  1371. // w.writeb((z.PutUint32(v))[:])
  1372. // x := z.PutUint32(v)
  1373. // w.writeb(x[:])
  1374. // w.writen4(x[0], x[1], x[2], x[3])
  1375. w.writen4(z.PutUint32(v))
  1376. }
  1377. func (z bigenHelper) writeUint64(w *encWr, v uint64) {
  1378. w.writen8(z.PutUint64(v))
  1379. }
  1380. type extTypeTagFn struct {
  1381. rtid uintptr
  1382. rtidptr uintptr
  1383. rt reflect.Type
  1384. tag uint64
  1385. ext Ext
  1386. }
  1387. type extHandle []extTypeTagFn
  1388. // AddExt registes an encode and decode function for a reflect.Type.
  1389. // To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
  1390. //
  1391. // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  1392. func (x *BasicHandle) AddExt(rt reflect.Type, tag byte,
  1393. encfn func(reflect.Value) ([]byte, error),
  1394. decfn func(reflect.Value, []byte) error) (err error) {
  1395. if encfn == nil || decfn == nil {
  1396. return x.SetExt(rt, uint64(tag), nil)
  1397. }
  1398. return x.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
  1399. }
  1400. // SetExt will set the extension for a tag and reflect.Type.
  1401. // Note that the type must be a named type, and specifically not a pointer or Interface.
  1402. // An error is returned if that is not honored.
  1403. // To Deregister an ext, call SetExt with nil Ext.
  1404. //
  1405. // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  1406. func (x *BasicHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
  1407. if x.isInited() {
  1408. return errHandleInited
  1409. }
  1410. if x.basicHandleRuntimeState == nil {
  1411. x.basicHandleRuntimeState = new(basicHandleRuntimeState)
  1412. }
  1413. return x.basicHandleRuntimeState.setExt(rt, tag, ext)
  1414. }
  1415. func (o extHandle) getExtForI(x interface{}) (v *extTypeTagFn) {
  1416. if len(o) > 0 {
  1417. v = o.getExt(i2rtid(x), true)
  1418. }
  1419. return
  1420. }
  1421. func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
  1422. if !check {
  1423. return
  1424. }
  1425. for i := range o {
  1426. v = &o[i]
  1427. if v.rtid == rtid || v.rtidptr == rtid {
  1428. return
  1429. }
  1430. }
  1431. return nil
  1432. }
  1433. func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
  1434. for i := range o {
  1435. v = &o[i]
  1436. if v.tag == tag {
  1437. return
  1438. }
  1439. }
  1440. return nil
  1441. }
  1442. type intf2impl struct {
  1443. rtid uintptr // for intf
  1444. impl reflect.Type
  1445. }
  1446. type intf2impls []intf2impl
  1447. // Intf2Impl maps an interface to an implementing type.
  1448. // This allows us support infering the concrete type
  1449. // and populating it when passed an interface.
  1450. // e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
  1451. //
  1452. // Passing a nil impl will clear the mapping.
  1453. func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
  1454. if impl != nil && !impl.Implements(intf) {
  1455. return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
  1456. }
  1457. rtid := rt2id(intf)
  1458. o2 := *o
  1459. for i := range o2 {
  1460. v := &o2[i]
  1461. if v.rtid == rtid {
  1462. v.impl = impl
  1463. return
  1464. }
  1465. }
  1466. *o = append(o2, intf2impl{rtid, impl})
  1467. return
  1468. }
  1469. func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
  1470. for i := range o {
  1471. v := &o[i]
  1472. if v.rtid == rtid {
  1473. if v.impl == nil {
  1474. return
  1475. }
  1476. vkind := v.impl.Kind()
  1477. if vkind == reflect.Ptr {
  1478. return reflect.New(v.impl.Elem())
  1479. }
  1480. return rvZeroAddrK(v.impl, vkind)
  1481. }
  1482. }
  1483. return
  1484. }
  1485. // structFieldinfopathNode is a node in a tree, which allows us easily
  1486. // walk the anonymous path.
  1487. //
  1488. // In the typical case, the node is not embedded/anonymous, and thus the parent
  1489. // will be nil and this information becomes a value (not needing any indirection).
  1490. type structFieldInfoPathNode struct {
  1491. parent *structFieldInfoPathNode
  1492. offset uint16
  1493. index uint16
  1494. kind uint8
  1495. numderef uint8
  1496. // encNameAsciiAlphaNum and omitEmpty should be in structFieldInfo,
  1497. // but are kept here for tighter packaging.
  1498. encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
  1499. omitEmpty bool
  1500. typ reflect.Type
  1501. }
  1502. // depth returns number of valid nodes in the hierachy
  1503. func (path *structFieldInfoPathNode) depth() (d int) {
  1504. TOP:
  1505. if path != nil {
  1506. d++
  1507. path = path.parent
  1508. goto TOP
  1509. }
  1510. return
  1511. }
  1512. // field returns the field of the struct.
  1513. func (path *structFieldInfoPathNode) field(v reflect.Value) (rv2 reflect.Value) {
  1514. if parent := path.parent; parent != nil {
  1515. v = parent.field(v)
  1516. for j, k := uint8(0), parent.numderef; j < k; j++ {
  1517. if rvIsNil(v) {
  1518. return
  1519. }
  1520. v = v.Elem()
  1521. }
  1522. }
  1523. return path.rvField(v)
  1524. }
  1525. // fieldAlloc returns the field of the struct.
  1526. // It allocates if a nil value was seen while searching.
  1527. func (path *structFieldInfoPathNode) fieldAlloc(v reflect.Value) (rv2 reflect.Value) {
  1528. if parent := path.parent; parent != nil {
  1529. v = parent.fieldAlloc(v)
  1530. for j, k := uint8(0), parent.numderef; j < k; j++ {
  1531. if rvIsNil(v) {
  1532. rvSetDirect(v, reflect.New(v.Type().Elem()))
  1533. }
  1534. v = v.Elem()
  1535. }
  1536. }
  1537. return path.rvField(v)
  1538. }
  1539. type structFieldInfo struct {
  1540. encName string // encode name
  1541. // encNameHash uintptr
  1542. // fieldName string // currently unused
  1543. // encNameAsciiAlphaNum and omitEmpty should be here,
  1544. // but are stored in structFieldInfoPathNode for tighter packaging.
  1545. path structFieldInfoPathNode
  1546. }
  1547. func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
  1548. keytype = valueTypeString // default
  1549. if stag == "" {
  1550. return
  1551. }
  1552. ss := strings.Split(stag, ",")
  1553. if len(ss) < 2 {
  1554. return
  1555. }
  1556. for _, s := range ss[1:] {
  1557. switch s {
  1558. case "omitempty":
  1559. omitEmpty = true
  1560. case "toarray":
  1561. toArray = true
  1562. case "int":
  1563. keytype = valueTypeInt
  1564. case "uint":
  1565. keytype = valueTypeUint
  1566. case "float":
  1567. keytype = valueTypeFloat
  1568. // case "bool":
  1569. // keytype = valueTypeBool
  1570. case "string":
  1571. keytype = valueTypeString
  1572. }
  1573. }
  1574. return
  1575. }
  1576. func (si *structFieldInfo) parseTag(stag string) {
  1577. if stag == "" {
  1578. return
  1579. }
  1580. for i, s := range strings.Split(stag, ",") {
  1581. if i == 0 {
  1582. if s != "" {
  1583. si.encName = s
  1584. }
  1585. } else {
  1586. switch s {
  1587. case "omitempty":
  1588. si.path.omitEmpty = true
  1589. }
  1590. }
  1591. }
  1592. }
  1593. type sfiSortedByEncName []*structFieldInfo
  1594. func (p sfiSortedByEncName) Len() int { return len(p) }
  1595. func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
  1596. func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
  1597. // typeInfo4Container holds information that is only available for
  1598. // containers like map, array, chan, slice.
  1599. type typeInfo4Container struct {
  1600. elem reflect.Type
  1601. // key is:
  1602. // - if map kind: map key
  1603. // - if array kind: sliceOf(elem)
  1604. // - if chan kind: sliceof(elem)
  1605. key reflect.Type
  1606. // fastpathUnderlying is underlying type of a named slice/map/array, as defined by go spec,
  1607. // that is used by fastpath where we defined fastpath functions for the underlying type.
  1608. //
  1609. // for a map, it's a map; for a slice or array, it's a slice; else its nil.
  1610. fastpathUnderlying reflect.Type
  1611. tikey *typeInfo
  1612. tielem *typeInfo
  1613. }
  1614. // typeInfo keeps static (non-changing readonly)information
  1615. // about each (non-ptr) type referenced in the encode/decode sequence.
  1616. //
  1617. // During an encode/decode sequence, we work as below:
  1618. // - If base is a built in type, en/decode base value
  1619. // - If base is registered as an extension, en/decode base value
  1620. // - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
  1621. // - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
  1622. // - Else decode appropriately based on the reflect.Kind
  1623. type typeInfo struct {
  1624. rt reflect.Type
  1625. ptr reflect.Type
  1626. // pkgpath string
  1627. rtid uintptr
  1628. numMeth uint16 // number of methods
  1629. kind uint8
  1630. chandir uint8
  1631. anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty"
  1632. toArray bool // whether this (struct) type should be encoded as an array
  1633. keyType valueType // if struct, how is the field name stored in a stream? default is string
  1634. mbs bool // base type (T or *T) is a MapBySlice
  1635. sfi4Name map[string]*structFieldInfo // map. used for finding sfi given a name
  1636. *typeInfo4Container
  1637. // ---- cpu cache line boundary?
  1638. size, keysize, elemsize uint32
  1639. keykind, elemkind uint8
  1640. flagHasPkgPath bool // Type.PackagePath != ""
  1641. flagComparable bool
  1642. flagCanTransient bool
  1643. flagMarshalInterface bool // does this have custom (un)marshal implementation?
  1644. flagSelferViaCodecgen bool
  1645. // custom implementation flags
  1646. flagIsZeroer bool
  1647. flagIsZeroerPtr bool
  1648. flagIsCodecEmptyer bool
  1649. flagIsCodecEmptyerPtr bool
  1650. flagBinaryMarshaler bool
  1651. flagBinaryMarshalerPtr bool
  1652. flagBinaryUnmarshaler bool
  1653. flagBinaryUnmarshalerPtr bool
  1654. flagTextMarshaler bool
  1655. flagTextMarshalerPtr bool
  1656. flagTextUnmarshaler bool
  1657. flagTextUnmarshalerPtr bool
  1658. flagJsonMarshaler bool
  1659. flagJsonMarshalerPtr bool
  1660. flagJsonUnmarshaler bool
  1661. flagJsonUnmarshalerPtr bool
  1662. flagSelfer bool
  1663. flagSelferPtr bool
  1664. flagMissingFielder bool
  1665. flagMissingFielderPtr bool
  1666. infoFieldOmitempty bool
  1667. sfi structFieldInfos
  1668. }
  1669. func (ti *typeInfo) siForEncName(name []byte) (si *structFieldInfo) {
  1670. return ti.sfi4Name[string(name)]
  1671. }
  1672. func (ti *typeInfo) resolve(x []structFieldInfo, ss map[string]uint16) (n int) {
  1673. n = len(x)
  1674. for i := range x {
  1675. ui := uint16(i)
  1676. xn := x[i].encName
  1677. j, ok := ss[xn]
  1678. if ok {
  1679. i2clear := ui // index to be cleared
  1680. if x[i].path.depth() < x[j].path.depth() { // this one is shallower
  1681. ss[xn] = ui
  1682. i2clear = j
  1683. }
  1684. if x[i2clear].encName != "" {
  1685. x[i2clear].encName = ""
  1686. n--
  1687. }
  1688. } else {
  1689. ss[xn] = ui
  1690. }
  1691. }
  1692. return
  1693. }
  1694. func (ti *typeInfo) init(x []structFieldInfo, n int) {
  1695. var anyOmitEmpty bool
  1696. // remove all the nils (non-ready)
  1697. m := make(map[string]*structFieldInfo, n)
  1698. w := make([]structFieldInfo, n)
  1699. y := make([]*structFieldInfo, n+n)
  1700. z := y[n:]
  1701. y = y[:n]
  1702. n = 0
  1703. for i := range x {
  1704. if x[i].encName == "" {
  1705. continue
  1706. }
  1707. if !anyOmitEmpty && x[i].path.omitEmpty {
  1708. anyOmitEmpty = true
  1709. }
  1710. w[n] = x[i]
  1711. y[n] = &w[n]
  1712. m[x[i].encName] = &w[n]
  1713. n++
  1714. }
  1715. if n != len(y) {
  1716. halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), n)
  1717. }
  1718. copy(z, y)
  1719. sort.Sort(sfiSortedByEncName(z))
  1720. ti.anyOmitEmpty = anyOmitEmpty
  1721. ti.sfi.load(y, z)
  1722. ti.sfi4Name = m
  1723. }
  1724. // Handling flagCanTransient
  1725. //
  1726. // We support transient optimization if the kind of the type is
  1727. // a number, bool, string, or slice (of number/bool).
  1728. // In addition, we also support if the kind is struct or array,
  1729. // and the type does not contain any pointers recursively).
  1730. //
  1731. // Noteworthy that all reference types (string, slice, func, map, ptr, interface, etc) have pointers.
  1732. //
  1733. // If using transient for a type with a pointer, there is the potential for data corruption
  1734. // when GC tries to follow a "transient" pointer which may become a non-pointer soon after.
  1735. //
  1736. func transientBitsetFlags() *bitset32 {
  1737. if transientValueHasStringSlice {
  1738. return &numBoolStrSliceBitset
  1739. }
  1740. return &numBoolBitset
  1741. }
  1742. func isCanTransient(t reflect.Type, k reflect.Kind) (v bool) {
  1743. var bs = transientBitsetFlags()
  1744. if bs.isset(byte(k)) {
  1745. v = true
  1746. } else if k == reflect.Slice {
  1747. elem := t.Elem()
  1748. v = numBoolBitset.isset(byte(elem.Kind()))
  1749. } else if k == reflect.Array {
  1750. elem := t.Elem()
  1751. v = isCanTransient(elem, elem.Kind())
  1752. } else if k == reflect.Struct {
  1753. v = true
  1754. for j, jlen := 0, t.NumField(); j < jlen; j++ {
  1755. f := t.Field(j)
  1756. if !isCanTransient(f.Type, f.Type.Kind()) {
  1757. v = false
  1758. return
  1759. }
  1760. }
  1761. } else {
  1762. v = false
  1763. }
  1764. return
  1765. }
  1766. func (ti *typeInfo) doSetFlagCanTransient() {
  1767. if transientSizeMax > 0 {
  1768. ti.flagCanTransient = ti.size <= transientSizeMax
  1769. } else {
  1770. ti.flagCanTransient = true
  1771. }
  1772. if ti.flagCanTransient {
  1773. if !transientBitsetFlags().isset(ti.kind) {
  1774. ti.flagCanTransient = isCanTransient(ti.rt, reflect.Kind(ti.kind))
  1775. }
  1776. }
  1777. }
  1778. type rtid2ti struct {
  1779. rtid uintptr
  1780. ti *typeInfo
  1781. }
  1782. // TypeInfos caches typeInfo for each type on first inspection.
  1783. //
  1784. // It is configured with a set of tag keys, which are used to get
  1785. // configuration for the type.
  1786. type TypeInfos struct {
  1787. infos atomicTypeInfoSlice
  1788. mu sync.Mutex
  1789. _ uint64 // padding (cache-aligned)
  1790. tags []string
  1791. _ uint64 // padding (cache-aligned)
  1792. }
  1793. // NewTypeInfos creates a TypeInfos given a set of struct tags keys.
  1794. //
  1795. // This allows users customize the struct tag keys which contain configuration
  1796. // of their types.
  1797. func NewTypeInfos(tags []string) *TypeInfos {
  1798. return &TypeInfos{tags: tags}
  1799. }
  1800. func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
  1801. // check for tags: codec, json, in that order.
  1802. // this allows seamless support for many configured structs.
  1803. for _, x := range x.tags {
  1804. s = t.Get(x)
  1805. if s != "" {
  1806. return s
  1807. }
  1808. }
  1809. return
  1810. }
  1811. func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
  1812. // binary search. adapted from sort/search.go.
  1813. // Note: we use goto (instead of for loop) so this can be inlined.
  1814. var h uint
  1815. var j = uint(len(s))
  1816. LOOP:
  1817. if i < j {
  1818. h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
  1819. if s[h].rtid < rtid {
  1820. i = h + 1
  1821. } else {
  1822. j = h
  1823. }
  1824. goto LOOP
  1825. }
  1826. if i < uint(len(s)) && s[i].rtid == rtid {
  1827. ti = s[i].ti
  1828. }
  1829. return
  1830. }
  1831. func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  1832. if pti = x.find(rtid); pti == nil {
  1833. pti = x.load(rt)
  1834. }
  1835. return
  1836. }
  1837. func (x *TypeInfos) find(rtid uintptr) (pti *typeInfo) {
  1838. sp := x.infos.load()
  1839. if sp != nil {
  1840. _, pti = findTypeInfo(sp, rtid)
  1841. }
  1842. return
  1843. }
  1844. func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) {
  1845. rk := rt.Kind()
  1846. if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
  1847. halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
  1848. }
  1849. rtid := rt2id(rt)
  1850. // do not hold lock while computing this.
  1851. // it may lead to duplication, but that's ok.
  1852. ti := typeInfo{
  1853. rt: rt,
  1854. ptr: reflect.PtrTo(rt),
  1855. rtid: rtid,
  1856. kind: uint8(rk),
  1857. size: uint32(rt.Size()),
  1858. numMeth: uint16(rt.NumMethod()),
  1859. keyType: valueTypeString, // default it - so it's never 0
  1860. // pkgpath: rt.PkgPath(),
  1861. flagHasPkgPath: rt.PkgPath() != "",
  1862. }
  1863. // bset sets custom implementation flags
  1864. bset := func(when bool, b *bool) {
  1865. if when {
  1866. *b = true
  1867. }
  1868. }
  1869. var b1, b2 bool
  1870. b1, b2 = implIntf(rt, binaryMarshalerTyp)
  1871. bset(b1, &ti.flagBinaryMarshaler)
  1872. bset(b2, &ti.flagBinaryMarshalerPtr)
  1873. b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
  1874. bset(b1, &ti.flagBinaryUnmarshaler)
  1875. bset(b2, &ti.flagBinaryUnmarshalerPtr)
  1876. b1, b2 = implIntf(rt, textMarshalerTyp)
  1877. bset(b1, &ti.flagTextMarshaler)
  1878. bset(b2, &ti.flagTextMarshalerPtr)
  1879. b1, b2 = implIntf(rt, textUnmarshalerTyp)
  1880. bset(b1, &ti.flagTextUnmarshaler)
  1881. bset(b2, &ti.flagTextUnmarshalerPtr)
  1882. b1, b2 = implIntf(rt, jsonMarshalerTyp)
  1883. bset(b1, &ti.flagJsonMarshaler)
  1884. bset(b2, &ti.flagJsonMarshalerPtr)
  1885. b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
  1886. bset(b1, &ti.flagJsonUnmarshaler)
  1887. bset(b2, &ti.flagJsonUnmarshalerPtr)
  1888. b1, b2 = implIntf(rt, selferTyp)
  1889. bset(b1, &ti.flagSelfer)
  1890. bset(b2, &ti.flagSelferPtr)
  1891. b1, b2 = implIntf(rt, missingFielderTyp)
  1892. bset(b1, &ti.flagMissingFielder)
  1893. bset(b2, &ti.flagMissingFielderPtr)
  1894. b1, b2 = implIntf(rt, iszeroTyp)
  1895. bset(b1, &ti.flagIsZeroer)
  1896. bset(b2, &ti.flagIsZeroerPtr)
  1897. b1, b2 = implIntf(rt, isCodecEmptyerTyp)
  1898. bset(b1, &ti.flagIsCodecEmptyer)
  1899. bset(b2, &ti.flagIsCodecEmptyerPtr)
  1900. b1, b2 = implIntf(rt, isSelferViaCodecgenerTyp)
  1901. ti.flagSelferViaCodecgen = b1 || b2
  1902. ti.flagMarshalInterface = ti.flagSelfer || ti.flagSelferPtr ||
  1903. ti.flagSelferViaCodecgen ||
  1904. ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr ||
  1905. ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr ||
  1906. ti.flagTextMarshaler || ti.flagTextMarshalerPtr ||
  1907. ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr ||
  1908. ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr ||
  1909. ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr
  1910. b1 = rt.Comparable()
  1911. // bset(b1, &ti.flagComparable)
  1912. ti.flagComparable = b1
  1913. ti.doSetFlagCanTransient()
  1914. var tt reflect.Type
  1915. switch rk {
  1916. case reflect.Struct:
  1917. var omitEmpty bool
  1918. if f, ok := rt.FieldByName(structInfoFieldName); ok {
  1919. ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
  1920. ti.infoFieldOmitempty = omitEmpty
  1921. } else {
  1922. ti.keyType = valueTypeString
  1923. }
  1924. pp, pi := &pool4tiload, pool4tiload.Get()
  1925. pv := pi.(*typeInfoLoad)
  1926. pv.reset()
  1927. pv.etypes = append(pv.etypes, ti.rtid)
  1928. x.rget(rt, rtid, nil, pv, omitEmpty)
  1929. n := ti.resolve(pv.sfis, pv.sfiNames)
  1930. ti.init(pv.sfis, n)
  1931. pp.Put(pi)
  1932. case reflect.Map:
  1933. ti.typeInfo4Container = new(typeInfo4Container)
  1934. ti.elem = rt.Elem()
  1935. for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  1936. }
  1937. ti.tielem = x.get(rt2id(tt), tt)
  1938. ti.elemkind = uint8(ti.elem.Kind())
  1939. ti.elemsize = uint32(ti.elem.Size())
  1940. ti.key = rt.Key()
  1941. for tt = ti.key; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  1942. }
  1943. ti.tikey = x.get(rt2id(tt), tt)
  1944. ti.keykind = uint8(ti.key.Kind())
  1945. ti.keysize = uint32(ti.key.Size())
  1946. if ti.flagHasPkgPath {
  1947. ti.fastpathUnderlying = reflect.MapOf(ti.key, ti.elem)
  1948. }
  1949. case reflect.Slice:
  1950. ti.typeInfo4Container = new(typeInfo4Container)
  1951. ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
  1952. if !ti.mbs && b2 {
  1953. ti.mbs = b2
  1954. }
  1955. ti.elem = rt.Elem()
  1956. for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  1957. }
  1958. ti.tielem = x.get(rt2id(tt), tt)
  1959. ti.elemkind = uint8(ti.elem.Kind())
  1960. ti.elemsize = uint32(ti.elem.Size())
  1961. if ti.flagHasPkgPath {
  1962. ti.fastpathUnderlying = reflect.SliceOf(ti.elem)
  1963. }
  1964. case reflect.Chan:
  1965. ti.typeInfo4Container = new(typeInfo4Container)
  1966. ti.elem = rt.Elem()
  1967. for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  1968. }
  1969. ti.tielem = x.get(rt2id(tt), tt)
  1970. ti.elemkind = uint8(ti.elem.Kind())
  1971. ti.elemsize = uint32(ti.elem.Size())
  1972. ti.chandir = uint8(rt.ChanDir())
  1973. ti.key = reflect.SliceOf(ti.elem)
  1974. ti.keykind = uint8(reflect.Slice)
  1975. case reflect.Array:
  1976. ti.typeInfo4Container = new(typeInfo4Container)
  1977. ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
  1978. if !ti.mbs && b2 {
  1979. ti.mbs = b2
  1980. }
  1981. ti.elem = rt.Elem()
  1982. ti.elemkind = uint8(ti.elem.Kind())
  1983. ti.elemsize = uint32(ti.elem.Size())
  1984. for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
  1985. }
  1986. ti.tielem = x.get(rt2id(tt), tt)
  1987. ti.key = reflect.SliceOf(ti.elem)
  1988. ti.keykind = uint8(reflect.Slice)
  1989. ti.keysize = uint32(ti.key.Size())
  1990. if ti.flagHasPkgPath {
  1991. ti.fastpathUnderlying = ti.key
  1992. }
  1993. // MARKER: reflect.Ptr cannot happen here, as we halt early if reflect.Ptr passed in
  1994. // case reflect.Ptr:
  1995. // ti.elem = rt.Elem()
  1996. // ti.elemkind = uint8(ti.elem.Kind())
  1997. // ti.elemsize = uint32(ti.elem.Size())
  1998. }
  1999. x.mu.Lock()
  2000. sp := x.infos.load()
  2001. // since this is an atomic load/store, we MUST use a different array each time,
  2002. // else we have a data race when a store is happening simultaneously with a findRtidFn call.
  2003. if sp == nil {
  2004. pti = &ti
  2005. sp = []rtid2ti{{rtid, pti}}
  2006. x.infos.store(sp)
  2007. } else {
  2008. var idx uint
  2009. idx, pti = findTypeInfo(sp, rtid)
  2010. if pti == nil {
  2011. pti = &ti
  2012. sp2 := make([]rtid2ti, len(sp)+1)
  2013. copy(sp2[idx+1:], sp[idx:])
  2014. copy(sp2, sp[:idx])
  2015. sp2[idx] = rtid2ti{rtid, pti}
  2016. x.infos.store(sp2)
  2017. }
  2018. }
  2019. x.mu.Unlock()
  2020. return
  2021. }
  2022. func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
  2023. path *structFieldInfoPathNode, pv *typeInfoLoad, omitEmpty bool) {
  2024. // Read up fields and store how to access the value.
  2025. //
  2026. // It uses go's rules for message selectors,
  2027. // which say that the field with the shallowest depth is selected.
  2028. //
  2029. // Note: we consciously use slices, not a map, to simulate a set.
  2030. // Typically, types have < 16 fields,
  2031. // and iteration using equals is faster than maps there
  2032. flen := rt.NumField()
  2033. LOOP:
  2034. for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
  2035. f := rt.Field(int(j))
  2036. fkind := f.Type.Kind()
  2037. // skip if a func type, or is unexported, or structTag value == "-"
  2038. switch fkind {
  2039. case reflect.Func, reflect.UnsafePointer:
  2040. continue LOOP
  2041. }
  2042. isUnexported := f.PkgPath != ""
  2043. if isUnexported && !f.Anonymous {
  2044. continue
  2045. }
  2046. stag := x.structTag(f.Tag)
  2047. if stag == "-" {
  2048. continue
  2049. }
  2050. var si structFieldInfo
  2051. var numderef uint8 = 0
  2052. for xft := f.Type; xft.Kind() == reflect.Ptr; xft = xft.Elem() {
  2053. numderef++
  2054. }
  2055. var parsed bool
  2056. // if anonymous and no struct tag (or it's blank),
  2057. // and a struct (or pointer to struct), inline it.
  2058. if f.Anonymous && fkind != reflect.Interface {
  2059. // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
  2060. ft := f.Type
  2061. isPtr := ft.Kind() == reflect.Ptr
  2062. for ft.Kind() == reflect.Ptr {
  2063. ft = ft.Elem()
  2064. }
  2065. isStruct := ft.Kind() == reflect.Struct
  2066. // Ignore embedded fields of unexported non-struct types.
  2067. // Also, from go1.10, ignore pointers to unexported struct types
  2068. // because unmarshal cannot assign a new struct to an unexported field.
  2069. // See https://golang.org/issue/21357
  2070. if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
  2071. continue
  2072. }
  2073. doInline := stag == ""
  2074. if !doInline {
  2075. si.parseTag(stag)
  2076. parsed = true
  2077. doInline = si.encName == "" // si.isZero()
  2078. }
  2079. if doInline && isStruct {
  2080. // if etypes contains this, don't call rget again (as fields are already seen here)
  2081. ftid := rt2id(ft)
  2082. // We cannot recurse forever, but we need to track other field depths.
  2083. // So - we break if we see a type twice (not the first time).
  2084. // This should be sufficient to handle an embedded type that refers to its
  2085. // owning type, which then refers to its embedded type.
  2086. processIt := true
  2087. numk := 0
  2088. for _, k := range pv.etypes {
  2089. if k == ftid {
  2090. numk++
  2091. if numk == rgetMaxRecursion {
  2092. processIt = false
  2093. break
  2094. }
  2095. }
  2096. }
  2097. if processIt {
  2098. pv.etypes = append(pv.etypes, ftid)
  2099. path2 := &structFieldInfoPathNode{
  2100. parent: path,
  2101. typ: f.Type,
  2102. offset: uint16(f.Offset),
  2103. index: j,
  2104. kind: uint8(fkind),
  2105. numderef: numderef,
  2106. }
  2107. x.rget(ft, ftid, path2, pv, omitEmpty)
  2108. }
  2109. continue
  2110. }
  2111. }
  2112. // after the anonymous dance: if an unexported field, skip
  2113. if isUnexported || f.Name == "" { // f.Name cannot be "", but defensively handle it
  2114. continue
  2115. }
  2116. si.path = structFieldInfoPathNode{
  2117. parent: path,
  2118. typ: f.Type,
  2119. offset: uint16(f.Offset),
  2120. index: j,
  2121. kind: uint8(fkind),
  2122. numderef: numderef,
  2123. // set asciiAlphaNum to true (default); checked and may be set to false below
  2124. encNameAsciiAlphaNum: true,
  2125. // note: omitEmpty might have been set in an earlier parseTag call, etc - so carry it forward
  2126. omitEmpty: si.path.omitEmpty,
  2127. }
  2128. if !parsed {
  2129. si.encName = f.Name
  2130. si.parseTag(stag)
  2131. parsed = true
  2132. } else if si.encName == "" {
  2133. si.encName = f.Name
  2134. }
  2135. // si.encNameHash = maxUintptr() // hashShortString(bytesView(si.encName))
  2136. if omitEmpty {
  2137. si.path.omitEmpty = true
  2138. }
  2139. for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
  2140. if !asciiAlphaNumBitset.isset(si.encName[i]) {
  2141. si.path.encNameAsciiAlphaNum = false
  2142. break
  2143. }
  2144. }
  2145. pv.sfis = append(pv.sfis, si)
  2146. }
  2147. }
  2148. func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
  2149. // return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
  2150. // if I's method is defined on T (ie T implements I), then *T implements I.
  2151. // The converse is not true.
  2152. // Type.Implements can be expensive, as it does a simulataneous linear search across 2 lists
  2153. // with alphanumeric string comparisons.
  2154. // If we can avoid running one of these 2 calls, we should.
  2155. base = rt.Implements(iTyp)
  2156. if base {
  2157. indir = true
  2158. } else {
  2159. indir = reflect.PtrTo(rt).Implements(iTyp)
  2160. }
  2161. return
  2162. }
  2163. func bool2int(b bool) (v uint8) {
  2164. // MARKER: optimized to be a single instruction
  2165. if b {
  2166. v = 1
  2167. }
  2168. return
  2169. }
  2170. func isSliceBoundsError(s string) bool {
  2171. return strings.Contains(s, "index out of range") ||
  2172. strings.Contains(s, "slice bounds out of range")
  2173. }
  2174. func sprintf(format string, v ...interface{}) string {
  2175. return fmt.Sprintf(format, v...)
  2176. }
  2177. func panicValToErr(h errDecorator, v interface{}, err *error) {
  2178. if v == *err {
  2179. return
  2180. }
  2181. switch xerr := v.(type) {
  2182. case nil:
  2183. case runtime.Error:
  2184. d, dok := h.(*Decoder)
  2185. if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
  2186. *err = io.EOF
  2187. } else {
  2188. h.wrapErr(xerr, err)
  2189. }
  2190. case error:
  2191. switch xerr {
  2192. case nil:
  2193. case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
  2194. // treat as special (bubble up)
  2195. *err = xerr
  2196. default:
  2197. h.wrapErr(xerr, err)
  2198. }
  2199. default:
  2200. // we don't expect this to happen (as this library always panics with an error)
  2201. h.wrapErr(fmt.Errorf("%v", v), err)
  2202. }
  2203. }
  2204. func usableByteSlice(bs []byte, slen int) (out []byte, changed bool) {
  2205. const maxCap = 1024 * 1024 * 64 // 64MB
  2206. const skipMaxCap = false // allow to test
  2207. if slen <= 0 {
  2208. return []byte{}, true
  2209. }
  2210. if slen <= cap(bs) {
  2211. return bs[:slen], false
  2212. }
  2213. // slen > cap(bs) ... handle memory overload appropriately
  2214. if skipMaxCap || slen <= maxCap {
  2215. return make([]byte, slen), true
  2216. }
  2217. return make([]byte, maxCap), true
  2218. }
  2219. func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
  2220. return mapKeyFastKindVals[k&31]
  2221. }
  2222. // ----
  2223. type codecFnInfo struct {
  2224. ti *typeInfo
  2225. xfFn Ext
  2226. xfTag uint64
  2227. addrD bool
  2228. addrDf bool // force: if addrD, then decode function MUST take a ptr
  2229. addrE bool
  2230. // addrEf bool // force: if addrE, then encode function MUST take a ptr
  2231. }
  2232. // codecFn encapsulates the captured variables and the encode function.
  2233. // This way, we only do some calculations one times, and pass to the
  2234. // code block that should be called (encapsulated in a function)
  2235. // instead of executing the checks every time.
  2236. type codecFn struct {
  2237. i codecFnInfo
  2238. fe func(*Encoder, *codecFnInfo, reflect.Value)
  2239. fd func(*Decoder, *codecFnInfo, reflect.Value)
  2240. // _ [1]uint64 // padding (cache-aligned)
  2241. }
  2242. type codecRtidFn struct {
  2243. rtid uintptr
  2244. fn *codecFn
  2245. }
  2246. func makeExt(ext interface{}) Ext {
  2247. switch t := ext.(type) {
  2248. case Ext:
  2249. return t
  2250. case BytesExt:
  2251. return &bytesExtWrapper{BytesExt: t}
  2252. case InterfaceExt:
  2253. return &interfaceExtWrapper{InterfaceExt: t}
  2254. }
  2255. return &extFailWrapper{}
  2256. }
  2257. func baseRV(v interface{}) (rv reflect.Value) {
  2258. // use reflect.ValueOf, not rv4i, as of go 1.16beta, rv4i was not inlineable
  2259. for rv = reflect.ValueOf(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
  2260. }
  2261. return
  2262. }
  2263. // ----
  2264. // these "checkOverflow" functions must be inlinable, and not call anybody.
  2265. // Overflow means that the value cannot be represented without wrapping/overflow.
  2266. // Overflow=false does not mean that the value can be represented without losing precision
  2267. // (especially for floating point).
  2268. type checkOverflow struct{}
  2269. func (checkOverflow) Float32(v float64) (overflow bool) {
  2270. if v < 0 {
  2271. v = -v
  2272. }
  2273. return math.MaxFloat32 < v && v <= math.MaxFloat64
  2274. }
  2275. func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
  2276. if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
  2277. overflow = true
  2278. }
  2279. return
  2280. }
  2281. func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
  2282. if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
  2283. overflow = true
  2284. }
  2285. return
  2286. }
  2287. func (checkOverflow) Uint2Int(v uint64, neg bool) (overflow bool) {
  2288. return (neg && v > 1<<63) || (!neg && v >= 1<<63)
  2289. }
  2290. func (checkOverflow) SignedInt(v uint64) (overflow bool) {
  2291. //e.g. -127 to 128 for int8
  2292. // pos := (v >> 63) == 0
  2293. // ui2 := v & 0x7fffffffffffffff
  2294. // if pos {
  2295. // if ui2 > math.MaxInt64 {
  2296. // overflow = true
  2297. // }
  2298. // } else {
  2299. // if ui2 > math.MaxInt64-1 {
  2300. // overflow = true
  2301. // }
  2302. // }
  2303. // a signed integer has overflow if the sign (first) bit is 1 (negative)
  2304. // and the numbers after the sign bit is > maxint64 - 1
  2305. overflow = (v>>63) != 0 && v&0x7fffffffffffffff > math.MaxInt64-1
  2306. return
  2307. }
  2308. func (x checkOverflow) Float32V(v float64) float64 {
  2309. if x.Float32(v) {
  2310. halt.errorf("float32 overflow: %v", v)
  2311. }
  2312. return v
  2313. }
  2314. func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
  2315. if x.Uint(v, bitsize) {
  2316. halt.errorf("uint64 overflow: %v", v)
  2317. }
  2318. return v
  2319. }
  2320. func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
  2321. if x.Int(v, bitsize) {
  2322. halt.errorf("int64 overflow: %v", v)
  2323. }
  2324. return v
  2325. }
  2326. func (x checkOverflow) SignedIntV(v uint64) int64 {
  2327. if x.SignedInt(v) {
  2328. halt.errorf("uint64 to int64 overflow: %v", v)
  2329. }
  2330. return int64(v)
  2331. }
  2332. // ------------------ FLOATING POINT -----------------
  2333. func isNaN64(f float64) bool { return f != f }
  2334. func isWhitespaceChar(v byte) bool {
  2335. // these are in order of speed below ...
  2336. return v < 33
  2337. // return v < 33 && whitespaceCharBitset64.isset(v)
  2338. // return v < 33 && (v == ' ' || v == '\n' || v == '\t' || v == '\r')
  2339. // return v == ' ' || v == '\n' || v == '\t' || v == '\r'
  2340. // return whitespaceCharBitset.isset(v)
  2341. }
  2342. func isNumberChar(v byte) bool {
  2343. // these are in order of speed below ...
  2344. return numCharBitset.isset(v)
  2345. // return v < 64 && numCharNoExpBitset64.isset(v) || v == 'e' || v == 'E'
  2346. // return v > 42 && v < 102 && numCharWithExpBitset64.isset(v-42)
  2347. }
  2348. // -----------------------
  2349. type ioFlusher interface {
  2350. Flush() error
  2351. }
  2352. type ioBuffered interface {
  2353. Buffered() int
  2354. }
  2355. // -----------------------
  2356. type sfiRv struct {
  2357. v *structFieldInfo
  2358. r reflect.Value
  2359. }
  2360. // ------
  2361. // bitset types are better than [256]bool, because they permit the whole
  2362. // bitset array being on a single cache line and use less memory.
  2363. //
  2364. // Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
  2365. //
  2366. // We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
  2367. // bounds checking, so we discarded them, and everyone uses bitset256.
  2368. //
  2369. // given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
  2370. // consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
  2371. //
  2372. // Note that using >> or & is faster than using / or %, as division is quite expensive if not optimized.
  2373. // MARKER:
  2374. // We noticed a little performance degradation when using bitset256 as [32]byte (or bitset32 as uint32).
  2375. // For example, json encoding went from 188K ns/op to 168K ns/op (~ 10% reduction).
  2376. // Consequently, we are using a [NNN]bool for bitsetNNN.
  2377. // To eliminate bounds-checking, we use x % v as that is guaranteed to be within bounds.
  2378. // ----
  2379. type bitset32 [32]bool
  2380. func (x *bitset32) set(pos byte) *bitset32 {
  2381. x[pos&31] = true // x[pos%32] = true
  2382. return x
  2383. }
  2384. func (x *bitset32) isset(pos byte) bool {
  2385. return x[pos&31] // x[pos%32]
  2386. }
  2387. type bitset256 [256]bool
  2388. func (x *bitset256) set(pos byte) *bitset256 {
  2389. x[pos] = true
  2390. return x
  2391. }
  2392. func (x *bitset256) isset(pos byte) bool {
  2393. return x[pos]
  2394. }
  2395. // ------------
  2396. type panicHdl struct{}
  2397. // errorv will panic if err is defined (not nil)
  2398. func (panicHdl) onerror(err error) {
  2399. if err != nil {
  2400. panic(err)
  2401. }
  2402. }
  2403. // errorf will always panic, using the parameters passed.
  2404. //
  2405. // Note: it is ok to pass in a stringView, as it will just pass it directly
  2406. // to a fmt.Sprintf call and not hold onto it.
  2407. //
  2408. //go:noinline
  2409. func (panicHdl) errorf(format string, params ...interface{}) {
  2410. if format == "" {
  2411. panic(errPanicUndefined)
  2412. }
  2413. if len(params) == 0 {
  2414. panic(errors.New(format))
  2415. }
  2416. panic(fmt.Errorf(format, params...))
  2417. }
  2418. // ----------------------------------------------------
  2419. type errDecorator interface {
  2420. wrapErr(in error, out *error)
  2421. }
  2422. type errDecoratorDef struct{}
  2423. func (errDecoratorDef) wrapErr(v error, e *error) { *e = v }
  2424. // ----------------------------------------------------
  2425. type mustHdl struct{}
  2426. func (mustHdl) String(s string, err error) string {
  2427. halt.onerror(err)
  2428. return s
  2429. }
  2430. func (mustHdl) Int(s int64, err error) int64 {
  2431. halt.onerror(err)
  2432. return s
  2433. }
  2434. func (mustHdl) Uint(s uint64, err error) uint64 {
  2435. halt.onerror(err)
  2436. return s
  2437. }
  2438. func (mustHdl) Float(s float64, err error) float64 {
  2439. halt.onerror(err)
  2440. return s
  2441. }
  2442. // -------------------
  2443. func freelistCapacity(length int) (capacity int) {
  2444. for capacity = 8; capacity <= length; capacity *= 2 {
  2445. }
  2446. return
  2447. }
  2448. // bytesFreelist is a list of byte buffers, sorted by cap.
  2449. //
  2450. // In anecdotal testing (running go test -tsd 1..6), we couldn't get
  2451. // the length ofthe list > 4 at any time. So we believe a linear search
  2452. // without bounds checking is sufficient.
  2453. //
  2454. // Typical usage model:
  2455. //
  2456. // peek may go together with put, iff pop=true. peek gets largest byte slice temporarily.
  2457. // check is used to switch a []byte if necessary
  2458. // get/put go together
  2459. //
  2460. // Given that folks may get a []byte, and then append to it a lot which may re-allocate
  2461. // a new []byte, we should try to return both (one received from blist and new one allocated).
  2462. //
  2463. // Typical usage model for get/put, when we don't know whether we may need more than requested
  2464. //
  2465. // v0 := blist.get()
  2466. // v1 := v0
  2467. // ... use v1 ...
  2468. // blist.put(v1)
  2469. // if byteSliceAddr(v0) != byteSliceAddr(v1) {
  2470. // blist.put(v0)
  2471. // }
  2472. type bytesFreelist [][]byte
  2473. // peek returns a slice of possibly non-zero'ed bytes, with len=0,
  2474. // and with the largest capacity from the list.
  2475. func (x *bytesFreelist) peek(length int, pop bool) (out []byte) {
  2476. if bytesFreeListNoCache {
  2477. return make([]byte, 0, freelistCapacity(length))
  2478. }
  2479. y := *x
  2480. if len(y) > 0 {
  2481. out = y[len(y)-1]
  2482. }
  2483. // start buf with a minimum of 64 bytes
  2484. const minLenBytes = 64
  2485. if length < minLenBytes {
  2486. length = minLenBytes
  2487. }
  2488. if cap(out) < length {
  2489. out = make([]byte, 0, freelistCapacity(length))
  2490. y = append(y, out)
  2491. *x = y
  2492. }
  2493. if pop && len(y) > 0 {
  2494. y = y[:len(y)-1]
  2495. *x = y
  2496. }
  2497. return
  2498. }
  2499. // get returns a slice of possibly non-zero'ed bytes, with len=0,
  2500. // and with cap >= length requested.
  2501. func (x *bytesFreelist) get(length int) (out []byte) {
  2502. if bytesFreeListNoCache {
  2503. return make([]byte, 0, freelistCapacity(length))
  2504. }
  2505. y := *x
  2506. // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2507. // for i, v := range y {
  2508. for i := 0; i < len(y); i++ {
  2509. v := y[i]
  2510. if cap(v) >= length {
  2511. // *x = append(y[:i], y[i+1:]...)
  2512. copy(y[i:], y[i+1:])
  2513. *x = y[:len(y)-1]
  2514. return v
  2515. }
  2516. }
  2517. return make([]byte, 0, freelistCapacity(length))
  2518. }
  2519. func (x *bytesFreelist) put(v []byte) {
  2520. if bytesFreeListNoCache || cap(v) == 0 {
  2521. return
  2522. }
  2523. if len(v) != 0 {
  2524. v = v[:0]
  2525. }
  2526. // append the new value, then try to put it in a better position
  2527. y := append(*x, v)
  2528. *x = y
  2529. // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2530. // for i, z := range y[:len(y)-1] {
  2531. for i := 0; i < len(y)-1; i++ {
  2532. z := y[i]
  2533. if cap(z) > cap(v) {
  2534. copy(y[i+1:], y[i:])
  2535. y[i] = v
  2536. return
  2537. }
  2538. }
  2539. }
  2540. func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
  2541. // ensure inlineable, by moving slow-path out to its own function
  2542. if cap(v) >= length {
  2543. return v[:0]
  2544. }
  2545. return x.checkPutGet(v, length)
  2546. }
  2547. func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte {
  2548. // checkPutGet broken out into its own function, so check is inlineable in general case
  2549. const useSeparateCalls = false
  2550. if useSeparateCalls {
  2551. x.put(v)
  2552. return x.get(length)
  2553. }
  2554. if bytesFreeListNoCache {
  2555. return make([]byte, 0, freelistCapacity(length))
  2556. }
  2557. // assume cap(v) < length, so put must happen before get
  2558. y := *x
  2559. var put = cap(v) == 0 // if empty, consider it already put
  2560. if !put {
  2561. y = append(y, v)
  2562. *x = y
  2563. }
  2564. for i := 0; i < len(y); i++ {
  2565. z := y[i]
  2566. if put {
  2567. if cap(z) >= length {
  2568. copy(y[i:], y[i+1:])
  2569. y = y[:len(y)-1]
  2570. *x = y
  2571. return z
  2572. }
  2573. } else {
  2574. if cap(z) > cap(v) {
  2575. copy(y[i+1:], y[i:])
  2576. y[i] = v
  2577. put = true
  2578. }
  2579. }
  2580. }
  2581. return make([]byte, 0, freelistCapacity(length))
  2582. }
  2583. // -------------------------
  2584. // sfiRvFreelist is used by Encoder for encoding structs,
  2585. // where we have to gather the fields first and then
  2586. // analyze them for omitEmpty, before knowing the length of the array/map to encode.
  2587. //
  2588. // Typically, the length here will depend on the number of cycles e.g.
  2589. // if type T1 has reference to T1, or T1 has reference to type T2 which has reference to T1.
  2590. //
  2591. // In the general case, the length of this list at most times is 1,
  2592. // so linear search is fine.
  2593. type sfiRvFreelist [][]sfiRv
  2594. func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
  2595. y := *x
  2596. // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2597. // for i, v := range y {
  2598. for i := 0; i < len(y); i++ {
  2599. v := y[i]
  2600. if cap(v) >= length {
  2601. // *x = append(y[:i], y[i+1:]...)
  2602. copy(y[i:], y[i+1:])
  2603. *x = y[:len(y)-1]
  2604. return v
  2605. }
  2606. }
  2607. return make([]sfiRv, 0, freelistCapacity(length))
  2608. }
  2609. func (x *sfiRvFreelist) put(v []sfiRv) {
  2610. if len(v) != 0 {
  2611. v = v[:0]
  2612. }
  2613. // append the new value, then try to put it in a better position
  2614. y := append(*x, v)
  2615. *x = y
  2616. // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
  2617. // for i, z := range y[:len(y)-1] {
  2618. for i := 0; i < len(y)-1; i++ {
  2619. z := y[i]
  2620. if cap(z) > cap(v) {
  2621. copy(y[i+1:], y[i:])
  2622. y[i] = v
  2623. return
  2624. }
  2625. }
  2626. }
  2627. // ---- multiple interner implementations ----
  2628. // Hard to tell which is most performant:
  2629. // - use a map[string]string - worst perf, no collisions, and unlimited entries
  2630. // - use a linear search with move to front heuristics - no collisions, and maxed at 64 entries
  2631. // - use a computationally-intensive hash - best performance, some collisions, maxed at 64 entries
  2632. const (
  2633. internMaxStrLen = 16 // if more than 16 bytes, faster to copy than compare bytes
  2634. internCap = 64 * 2 // 64 uses 1K bytes RAM, so 128 (anecdotal sweet spot) uses 2K bytes
  2635. )
  2636. type internerMap map[string]string
  2637. func (x *internerMap) init() {
  2638. *x = make(map[string]string, internCap)
  2639. }
  2640. func (x internerMap) string(v []byte) (s string) {
  2641. s, ok := x[string(v)] // no allocation here, per go implementation
  2642. if !ok {
  2643. s = string(v) // new allocation here
  2644. x[s] = s
  2645. }
  2646. return
  2647. }