@@ -716,26 +716,18 @@ func (d *ReflectionDecoder) decodeStruct(
716716 continue
717717 }
718718
719- // Use FieldByIndex to access fields through their index path
720- // This handles embedded structs correctly, but we need to initialize
721- // any nil embedded pointers along the path
722- fieldValue := result
723- for i , idx := range fieldInfo .index {
724- fieldValue = fieldValue .Field (idx )
725- // If this is an embedded pointer field and it's nil, initialize it
726- if fieldValue .Kind () == reflect .Ptr && fieldValue .IsNil () {
727- // Only initialize if this isn't the final field in the path
728- if i < len (fieldInfo .index )- 1 {
729- fieldValue .Set (reflect .New (fieldValue .Type ().Elem ()))
730- }
731- }
732- // If it's a pointer, dereference it to continue traversal
733- if fieldValue .Kind () == reflect .Ptr && ! fieldValue .IsNil () &&
734- i < len (fieldInfo .index )- 1 {
735- fieldValue = fieldValue .Elem ()
719+ // Use optimized field access with addressable value wrapper
720+ av := newAddressableValue (result )
721+ fieldValue := av .fieldByIndex (fieldInfo .index0 , fieldInfo .index , true )
722+ if ! fieldValue .IsValid () {
723+ // Field access failed, skip this field
724+ offset , err = d .nextValueOffset (offset , 1 )
725+ if err != nil {
726+ return 0 , err
736727 }
728+ continue
737729 }
738- offset , err = d .decode (offset , fieldValue , depth )
730+ offset , err = d .decode (offset , fieldValue . Value , depth )
739731 if err != nil {
740732 return 0 , d .wrapErrorWithMapKey (err , string (key ))
741733 }
@@ -745,7 +737,8 @@ func (d *ReflectionDecoder) decodeStruct(
745737
746738type fieldInfo struct {
747739 name string
748- index []int
740+ index []int // Remaining indices (nil if single field)
741+ index0 int // First field index (avoids bounds check)
749742 depth int
750743 hasTag bool
751744}
@@ -880,7 +873,7 @@ func makeStructFields(rootType reflect.Type) *fieldsType {
880873
881874 // Add field to collection
882875 allFields = append (allFields , fieldInfo {
883- index : fieldIndex ,
876+ index : fieldIndex , // Will be reindexed later for optimization
884877 name : fieldName ,
885878 hasTag : hasTag ,
886879 depth : entry .depth ,
@@ -889,19 +882,24 @@ func makeStructFields(rootType reflect.Type) *fieldsType {
889882 }
890883
891884 // Apply precedence rules to resolve field conflicts
892- namedFields := make (map [string ]* fieldInfo )
893- fieldsByName := make (map [string ][]fieldInfo )
885+ // Pre-size the map based on field count for better memory efficiency
886+ namedFields := make (map [string ]* fieldInfo , len (allFields ))
887+ fieldsByName := make (map [string ][]fieldInfo , len (allFields ))
894888
895889 // Group fields by name
896890 for _ , field := range allFields {
897891 fieldsByName [field .name ] = append (fieldsByName [field .name ], field )
898892 }
899893
900894 // Apply precedence rules for each field name
895+ // Store results in a flattened slice to allow pointer references
896+ flatFields := make ([]fieldInfo , 0 , len (fieldsByName ))
897+
901898 for name , fields := range fieldsByName {
902899 if len (fields ) == 1 {
903900 // No conflict, use the field
904- namedFields [name ] = & fields [0 ]
901+ flatFields = append (flatFields , fields [0 ])
902+ namedFields [name ] = & flatFields [len (flatFields )- 1 ]
905903 continue
906904 }
907905
@@ -935,10 +933,81 @@ func makeStructFields(rootType reflect.Type) *fieldsType {
935933 // Same depth and tag status: first declared wins (keep current dominant)
936934 }
937935
938- namedFields [name ] = & dominant
936+ flatFields = append (flatFields , dominant )
937+ namedFields [name ] = & flatFields [len (flatFields )- 1 ]
939938 }
940939
941- return & fieldsType {
940+ fields := & fieldsType {
942941 namedFields : namedFields ,
943942 }
943+
944+ // Reindex all fields for optimized access
945+ fields .reindex ()
946+
947+ return fields
948+ }
949+
950+ // reindex optimizes field indices to avoid bounds checks during runtime.
951+ // This follows the json/v2 pattern of splitting the first index from the remainder.
952+ func (fs * fieldsType ) reindex () {
953+ for _ , field := range fs .namedFields {
954+ if len (field .index ) > 0 {
955+ field .index0 = field .index [0 ]
956+ field .index = field .index [1 :]
957+ if len (field .index ) == 0 {
958+ field .index = nil // avoid pinning the backing slice
959+ }
960+ }
961+ }
962+ }
963+
964+ // addressableValue wraps a reflect.Value to optimize field access and
965+ // embedded pointer handling. Based on encoding/json/v2 patterns.
966+ type addressableValue struct {
967+ reflect.Value
968+
969+ forcedAddr bool
970+ }
971+
972+ // newAddressableValue creates an addressable value wrapper.
973+ func newAddressableValue (v reflect.Value ) addressableValue {
974+ return addressableValue {Value : v , forcedAddr : false }
975+ }
976+
977+ // fieldByIndex efficiently accesses a field by its index path,
978+ // initializing embedded pointers as needed.
979+ func (av addressableValue ) fieldByIndex (
980+ index0 int ,
981+ remainingIndex []int ,
982+ mayAlloc bool ,
983+ ) addressableValue {
984+ // First field access (optimized with no bounds check)
985+ av = addressableValue {av .Field (index0 ), av .forcedAddr }
986+
987+ // Handle remaining indices if any
988+ if len (remainingIndex ) > 0 {
989+ for _ , i := range remainingIndex {
990+ av = av .indirect (mayAlloc )
991+ if ! av .IsValid () {
992+ return av
993+ }
994+ av = addressableValue {av .Field (i ), av .forcedAddr }
995+ }
996+ }
997+
998+ return av
999+ }
1000+
1001+ // indirect handles pointer dereferencing and initialization.
1002+ func (av addressableValue ) indirect (mayAlloc bool ) addressableValue {
1003+ if av .Kind () == reflect .Ptr {
1004+ if av .IsNil () {
1005+ if ! mayAlloc || ! av .CanSet () {
1006+ return addressableValue {} // Return invalid value
1007+ }
1008+ av .Set (reflect .New (av .Type ().Elem ()))
1009+ }
1010+ av = addressableValue {av .Elem (), false }
1011+ }
1012+ return av
9441013}
0 commit comments