encode.go (5201B)
1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package impl 6 7 import ( 8 "math" 9 "sort" 10 "sync/atomic" 11 12 "google.golang.org/protobuf/internal/flags" 13 proto "google.golang.org/protobuf/proto" 14 piface "google.golang.org/protobuf/runtime/protoiface" 15 ) 16 17 type marshalOptions struct { 18 flags piface.MarshalInputFlags 19 } 20 21 func (o marshalOptions) Options() proto.MarshalOptions { 22 return proto.MarshalOptions{ 23 AllowPartial: true, 24 Deterministic: o.Deterministic(), 25 UseCachedSize: o.UseCachedSize(), 26 } 27 } 28 29 func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } 30 func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } 31 32 // size is protoreflect.Methods.Size. 33 func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { 34 var p pointer 35 if ms, ok := in.Message.(*messageState); ok { 36 p = ms.pointer() 37 } else { 38 p = in.Message.(*messageReflectWrapper).pointer() 39 } 40 size := mi.sizePointer(p, marshalOptions{ 41 flags: in.Flags, 42 }) 43 return piface.SizeOutput{Size: size} 44 } 45 46 func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { 47 mi.init() 48 if p.IsNil() { 49 return 0 50 } 51 if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { 52 if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { 53 return int(size) 54 } 55 } 56 return mi.sizePointerSlow(p, opts) 57 } 58 59 func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { 60 if flags.ProtoLegacy && mi.isMessageSet { 61 size = sizeMessageSet(mi, p, opts) 62 if mi.sizecacheOffset.IsValid() { 63 atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) 64 } 65 return size 66 } 67 if mi.extensionOffset.IsValid() { 68 e := p.Apply(mi.extensionOffset).Extensions() 69 size += mi.sizeExtensions(e, opts) 70 } 71 for _, f := range mi.orderedCoderFields { 72 if f.funcs.size == nil { 73 continue 74 } 75 fptr := p.Apply(f.offset) 76 if f.isPointer && fptr.Elem().IsNil() { 77 continue 78 } 79 size += f.funcs.size(fptr, f, opts) 80 } 81 if mi.unknownOffset.IsValid() { 82 if u := mi.getUnknownBytes(p); u != nil { 83 size += len(*u) 84 } 85 } 86 if mi.sizecacheOffset.IsValid() { 87 if size > math.MaxInt32 { 88 // The size is too large for the int32 sizecache field. 89 // We will need to recompute the size when encoding; 90 // unfortunately expensive, but better than invalid output. 91 atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) 92 } else { 93 atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) 94 } 95 } 96 return size 97 } 98 99 // marshal is protoreflect.Methods.Marshal. 100 func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { 101 var p pointer 102 if ms, ok := in.Message.(*messageState); ok { 103 p = ms.pointer() 104 } else { 105 p = in.Message.(*messageReflectWrapper).pointer() 106 } 107 b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ 108 flags: in.Flags, 109 }) 110 return piface.MarshalOutput{Buf: b}, err 111 } 112 113 func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { 114 mi.init() 115 if p.IsNil() { 116 return b, nil 117 } 118 if flags.ProtoLegacy && mi.isMessageSet { 119 return marshalMessageSet(mi, b, p, opts) 120 } 121 var err error 122 // The old marshaler encodes extensions at beginning. 123 if mi.extensionOffset.IsValid() { 124 e := p.Apply(mi.extensionOffset).Extensions() 125 // TODO: Special handling for MessageSet? 126 b, err = mi.appendExtensions(b, e, opts) 127 if err != nil { 128 return b, err 129 } 130 } 131 for _, f := range mi.orderedCoderFields { 132 if f.funcs.marshal == nil { 133 continue 134 } 135 fptr := p.Apply(f.offset) 136 if f.isPointer && fptr.Elem().IsNil() { 137 continue 138 } 139 b, err = f.funcs.marshal(b, fptr, f, opts) 140 if err != nil { 141 return b, err 142 } 143 } 144 if mi.unknownOffset.IsValid() && !mi.isMessageSet { 145 if u := mi.getUnknownBytes(p); u != nil { 146 b = append(b, (*u)...) 147 } 148 } 149 return b, nil 150 } 151 152 func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { 153 if ext == nil { 154 return 0 155 } 156 for _, x := range *ext { 157 xi := getExtensionFieldInfo(x.Type()) 158 if xi.funcs.size == nil { 159 continue 160 } 161 n += xi.funcs.size(x.Value(), xi.tagsize, opts) 162 } 163 return n 164 } 165 166 func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { 167 if ext == nil { 168 return b, nil 169 } 170 171 switch len(*ext) { 172 case 0: 173 return b, nil 174 case 1: 175 // Fast-path for one extension: Don't bother sorting the keys. 176 var err error 177 for _, x := range *ext { 178 xi := getExtensionFieldInfo(x.Type()) 179 b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) 180 } 181 return b, err 182 default: 183 // Sort the keys to provide a deterministic encoding. 184 // Not sure this is required, but the old code does it. 185 keys := make([]int, 0, len(*ext)) 186 for k := range *ext { 187 keys = append(keys, int(k)) 188 } 189 sort.Ints(keys) 190 var err error 191 for _, k := range keys { 192 x := (*ext)[int32(k)] 193 xi := getExtensionFieldInfo(x.Type()) 194 b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) 195 if err != nil { 196 return b, err 197 } 198 } 199 return b, nil 200 } 201 }