diff --git a/.gitignore b/.gitignore index 9f11b755..9f1c9271 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ .idea/ +.vscode/ +.DS_Store diff --git a/db/db.go b/db/db.go index 0e3527b1..5ac31e33 100644 --- a/db/db.go +++ b/db/db.go @@ -850,8 +850,8 @@ func ReadPrefixN(db *grocksdb.DB, prefix []byte, n int) []*prefixes.PrefixRowKV value := it.Value() res[i] = &prefixes.PrefixRowKV{ - Key: key.Data(), - Value: value.Data(), + RawKey: key.Data(), + RawValue: value.Data(), } key.Free() @@ -908,8 +908,8 @@ func readWriteRawNCF(db *grocksdb.DB, options *IterOptions, out string, n int, f if i >= n { return } - key := kv.Key.([]byte) - value := kv.Value.([]byte) + key := kv.RawKey + value := kv.RawValue keyHex := hex.EncodeToString(key) valueHex := hex.EncodeToString(value) //log.Println(keyHex) @@ -947,8 +947,8 @@ func ReadWriteRawN(db *grocksdb.DB, options *IterOptions, out string, n int) { if i >= n { return } - key := kv.Key.([]byte) - value := kv.Value.([]byte) + key := kv.RawKey + value := kv.RawValue keyHex := hex.EncodeToString(key) valueHex := hex.EncodeToString(value) log.Println(keyHex) diff --git a/db/db_get.go b/db/db_get.go index a6ce6af3..b4761e8e 100644 --- a/db/db_get.go +++ b/db/db_get.go @@ -94,7 +94,7 @@ func (db *ReadOnlyDBColumnFamily) GetStreamsAndChannelRepostedByChannelHashes(re for _, reposterChannelHash := range reposterChannelHashes { key := prefixes.NewChannelToClaimKeyWHash(reposterChannelHash) - rawKeyPrefix := prefixes.ChannelToClaimKeyPackPartial(key, 1) + rawKeyPrefix := key.PartialPack(1) options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options = options.WithIncludeKey(false).WithIncludeValue(true) ch := IterCF(db.DB, options) @@ -167,7 +167,7 @@ func (db *ReadOnlyDBColumnFamily) GetShortClaimIdUrl(name string, normalizedName partialClaimId := claimId[:j] partialKey := prefixes.NewClaimShortIDKey(normalizedName, partialClaimId) log.Printf("partialKey: %#v\n", partialKey) - keyPrefix := prefixes.ClaimShortIDKeyPackPartial(partialKey, 2) + keyPrefix := partialKey.PartialPack(2) // Prefix and handle options := NewIterateOptions().WithPrefix(prefix).WithCfHandle(handle) // Start and stop bounds @@ -218,7 +218,7 @@ func (db *ReadOnlyDBColumnFamily) GetRepostedCount(claimHash []byte) (int, error } key := prefixes.NewRepostedKey(claimHash) - keyPrefix := prefixes.RepostedKeyPackPartial(key, 1) + keyPrefix := key.PartialPack(1) // Prefix and handle options := NewIterateOptions().WithPrefix(keyPrefix).WithCfHandle(handle) // Start and stop bounds @@ -267,8 +267,8 @@ func (db *ReadOnlyDBColumnFamily) GetActiveAmount(claimHash []byte, txoType uint startKey := prefixes.NewActiveAmountKey(claimHash, txoType, 0) endKey := prefixes.NewActiveAmountKey(claimHash, txoType, height) - startKeyRaw := prefixes.ActiveAmountKeyPackPartial(startKey, 3) - endKeyRaw := prefixes.ActiveAmountKeyPackPartial(endKey, 3) + startKeyRaw := startKey.PartialPack(3) + endKeyRaw := endKey.PartialPack(3) // Prefix and handle options := NewIterateOptions().WithPrefix([]byte{prefixes.ActiveAmount}).WithCfHandle(handle) // Start and stop bounds @@ -416,7 +416,7 @@ func (db *ReadOnlyDBColumnFamily) ControllingClaimIter() <-chan *prefixes.Prefix key := prefixes.NewClaimTakeoverKey("") var rawKeyPrefix []byte = nil - rawKeyPrefix = prefixes.ClaimTakeoverKeyPackPartial(key, 0) + rawKeyPrefix = key.PartialPack(0) options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options = options.WithIncludeValue(true) //.WithIncludeStop(true) ch := IterCF(db.DB, options) @@ -527,7 +527,7 @@ func (db *ReadOnlyDBColumnFamily) EffectiveAmountNameIter(normalizedName string) key := prefixes.NewEffectiveAmountKey(normalizedName) var rawKeyPrefix []byte = nil - rawKeyPrefix = prefixes.EffectiveAmountKeyPackPartial(key, 1) + rawKeyPrefix = key.PartialPack(1) options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options = options.WithIncludeValue(true) //.WithIncludeStop(true) ch := IterCF(db.DB, options) @@ -542,9 +542,9 @@ func (db *ReadOnlyDBColumnFamily) ClaimShortIdIter(normalizedName string, claimI key := prefixes.NewClaimShortIDKey(normalizedName, claimId) var rawKeyPrefix []byte = nil if claimId != "" { - rawKeyPrefix = prefixes.ClaimShortIDKeyPackPartial(key, 2) + rawKeyPrefix = key.PartialPack(2) } else { - rawKeyPrefix = prefixes.ClaimShortIDKeyPackPartial(key, 1) + rawKeyPrefix = key.PartialPack(1) } options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options = options.WithIncludeValue(true) //.WithIncludeStop(true) diff --git a/db/db_resolve.go b/db/db_resolve.go index 05981f4a..07b991a7 100644 --- a/db/db_resolve.go +++ b/db/db_resolve.go @@ -323,7 +323,7 @@ func (db *ReadOnlyDBColumnFamily) ResolveClaimInChannel(channelHash []byte, norm } key := prefixes.NewChannelToClaimKey(channelHash, normalizedName) - rawKeyPrefix := prefixes.ChannelToClaimKeyPackPartial(key, 2) + rawKeyPrefix := key.PartialPack(2) options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix) options = options.WithIncludeValue(true) //.WithIncludeStop(true) ch := IterCF(db.DB, options) diff --git a/db/db_test.go b/db/db_test.go index c123e1b1..e3c4081d 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -729,9 +729,9 @@ func TestIter(t *testing.T) { // log.Println(kv.Key) gotKey := kv.Key.(*prefixes.RepostedKey).PackKey() - keyPartial3 := prefixes.RepostedKeyPackPartial(kv.Key.(*prefixes.RepostedKey), 3) - keyPartial2 := prefixes.RepostedKeyPackPartial(kv.Key.(*prefixes.RepostedKey), 2) - keyPartial1 := prefixes.RepostedKeyPackPartial(kv.Key.(*prefixes.RepostedKey), 1) + keyPartial3 := kv.Key.(*prefixes.RepostedKey).PartialPack(3) + keyPartial2 := kv.Key.(*prefixes.RepostedKey).PartialPack(2) + keyPartial1 := kv.Key.(*prefixes.RepostedKey).PartialPack(1) // Check pack partial for sanity if !bytes.HasPrefix(gotKey, keyPartial3) { diff --git a/db/iteroptions.go b/db/iteroptions.go index 109d1fd6..4508cec9 100644 --- a/db/iteroptions.go +++ b/db/iteroptions.go @@ -24,6 +24,7 @@ type IterOptions struct { RawValue bool CfHandle *grocksdb.ColumnFamilyHandle It *grocksdb.Iterator + Serializer *prefixes.SerializationAPI } // NewIterateOptions creates a defualt options structure for a db iterator. @@ -41,6 +42,7 @@ func NewIterateOptions() *IterOptions { RawValue: false, CfHandle: nil, It: nil, + Serializer: prefixes.ProductionAPI, } } @@ -99,6 +101,11 @@ func (o *IterOptions) WithRawValue(rawValue bool) *IterOptions { return o } +func (o *IterOptions) WithSerializer(serializer *prefixes.SerializationAPI) *IterOptions { + o.Serializer = serializer + return o +} + // ReadRow reads a row from the db, returns nil when no more rows are available. func (opts *IterOptions) ReadRow(prevKey *[]byte) *prefixes.PrefixRowKV { it := opts.It @@ -117,8 +124,10 @@ func (opts *IterOptions) ReadRow(prevKey *[]byte) *prefixes.PrefixRowKV { valueData := value.Data() valueLen := len(valueData) - var outKey interface{} = nil - var outValue interface{} = nil + var outKey prefixes.BaseKey = nil + var outValue prefixes.BaseValue = nil + var rawOutKey []byte = nil + var rawOutValue []byte = nil var err error = nil log.Trace("keyData:", keyData) @@ -136,12 +145,12 @@ func (opts *IterOptions) ReadRow(prevKey *[]byte) *prefixes.PrefixRowKV { newKeyData := make([]byte, keyLen) copy(newKeyData, keyData) if opts.IncludeKey && !opts.RawKey { - outKey, err = prefixes.UnpackGenericKey(newKeyData) + outKey, err = opts.Serializer.UnpackKey(newKeyData) if err != nil { log.Error(err) } } else if opts.IncludeKey { - outKey = newKeyData + rawOutKey = newKeyData } // Value could be quite large, so this setting could be important @@ -150,18 +159,20 @@ func (opts *IterOptions) ReadRow(prevKey *[]byte) *prefixes.PrefixRowKV { newValueData := make([]byte, valueLen) copy(newValueData, valueData) if !opts.RawValue { - outValue, err = prefixes.UnpackGenericValue(newKeyData, newValueData) + outValue, err = opts.Serializer.UnpackValue(newKeyData, newValueData) if err != nil { log.Error(err) } } else { - outValue = newValueData + rawOutValue = newValueData } } kv := &prefixes.PrefixRowKV{ - Key: outKey, - Value: outValue, + Key: outKey, + Value: outValue, + RawKey: rawOutKey, + RawValue: rawOutValue, } *prevKey = newKeyData diff --git a/db/prefixes/generic.go b/db/prefixes/generic.go new file mode 100644 index 00000000..2b8d6851 --- /dev/null +++ b/db/prefixes/generic.go @@ -0,0 +1,201 @@ +package prefixes + +import ( + "encoding/binary" + "fmt" + "reflect" + "strings" + + "github.com/go-restruct/restruct" + "github.com/lbryio/lbcd/chaincfg/chainhash" +) + +func init() { + restruct.EnableExprBeta() +} + +// Type OnesComplementEffectiveAmount (uint64) has to be encoded specially +// to get the desired sort ordering. +// Implement the Sizer, Packer, Unpacker interface to handle it manually. + +func (amt *OnesComplementEffectiveAmount) SizeOf() int { + return 8 +} + +func (amt *OnesComplementEffectiveAmount) Pack(buf []byte, order binary.ByteOrder) ([]byte, error) { + binary.BigEndian.PutUint64(buf, OnesCompTwiddle64-uint64(*amt)) + return buf[8:], nil +} + +func (amt *OnesComplementEffectiveAmount) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) { + *amt = OnesComplementEffectiveAmount(OnesCompTwiddle64 - binary.BigEndian.Uint64(buf)) + return buf[8:], nil +} + +// Struct BlockTxsValue has a field TxHashes of type []*chainhash.Hash. +// I haven't been able to figure out the right annotations to make +// restruct.Pack,Unpack work automagically. +// Implement the Sizer, Packer, Unpacker interface to handle it manually. + +func (kv *BlockTxsValue) SizeOf() int { + return 32 * len(kv.TxHashes) +} + +func (kv *BlockTxsValue) Pack(buf []byte, order binary.ByteOrder) ([]byte, error) { + offset := 0 + for _, h := range kv.TxHashes { + offset += copy(buf[offset:], h[:]) + } + return buf[offset:], nil +} + +func (kv *BlockTxsValue) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) { + offset := 0 + kv.TxHashes = make([]*chainhash.Hash, len(buf)/32) + for i := range kv.TxHashes { + kv.TxHashes[i] = (*chainhash.Hash)(buf[offset:32]) + offset += 32 + } + return buf[offset:], nil +} + +func genericNew(prefix []byte, key bool) (interface{}, error) { + t, ok := prefixRegistry[prefix[0]] + if !ok { + panic(fmt.Sprintf("not handled: prefix=%v", prefix)) + } + if key { + return t.newKey(), nil + } + return t.newValue(), nil +} + +func GenericPack(kv interface{}, fields int) ([]byte, error) { + // Locate the byte offset of the first excluded field. + offset := 0 + if fields > 0 { + v := reflect.ValueOf(kv) + t := v.Type() + // Handle indirection to reach kind=Struct. + switch t.Kind() { + case reflect.Interface, reflect.Pointer: + v = v.Elem() + t = v.Type() + default: + panic(fmt.Sprintf("not handled: %v", t.Kind())) + } + count := 0 + for _, sf := range reflect.VisibleFields(t) { + if !sf.IsExported() { + continue + } + if sf.Anonymous && strings.HasPrefix(sf.Name, "LengthEncoded") { + fields += 1 // Skip it but process NameLen and Name instead. + continue + } + if count > fields { + break + } + sz, err := restruct.SizeOf(v.FieldByIndex(sf.Index).Interface()) + if err != nil { + panic(fmt.Sprintf("not handled: %v: %v", sf.Name, sf.Type.Kind())) + } + offset += sz + count += 1 + } + } + // Pack the struct. No ability to partially pack. + buf, err := restruct.Pack(binary.BigEndian, kv) + if err != nil { + panic(fmt.Sprintf("not handled: %v", err)) + } + // Return a prefix if some fields were excluded. + if fields > 0 { + return buf[:offset], nil + } + return buf, nil +} + +func GenericUnpack(pfx []byte, key bool, buf []byte) (interface{}, error) { + kv, _ := genericNew(pfx, key) + err := restruct.Unpack(buf, binary.BigEndian, kv) + if err != nil { + panic(fmt.Sprintf("not handled: %v", err)) + } + return kv, nil +} + +func GetSerializationAPI(prefix []byte) *SerializationAPI { + t, ok := prefixRegistry[prefix[0]] + if !ok { + panic(fmt.Sprintf("not handled: prefix=%v", prefix)) + } + if t.API != nil { + return t.API + } + return ProductionAPI +} + +type SerializationAPI struct { + PackKey func(key BaseKey) ([]byte, error) + PackPartialKey func(key BaseKey, fields int) ([]byte, error) + PackValue func(value BaseValue) ([]byte, error) + UnpackKey func(key []byte) (BaseKey, error) + UnpackValue func(prefix []byte, value []byte) (BaseValue, error) +} + +var ProductionAPI = &SerializationAPI{ + PackKey: PackGenericKey, + PackPartialKey: PackPartialGenericKey, + PackValue: PackGenericValue, + UnpackKey: UnpackGenericKey, + UnpackValue: UnpackGenericValue, +} + +var RegressionAPI_1 = &SerializationAPI{ + PackKey: func(key BaseKey) ([]byte, error) { + return GenericPack(key, -1) + }, + PackPartialKey: func(key BaseKey, fields int) ([]byte, error) { + return GenericPack(key, fields) + }, + PackValue: func(value BaseValue) ([]byte, error) { + return GenericPack(value, -1) + }, + UnpackKey: UnpackGenericKey, + UnpackValue: UnpackGenericValue, +} + +var RegressionAPI_2 = &SerializationAPI{ + PackKey: PackGenericKey, + PackPartialKey: PackPartialGenericKey, + PackValue: PackGenericValue, + UnpackKey: func(key []byte) (BaseKey, error) { + k, err := GenericUnpack(key, true, key) + return k.(BaseKey), err + }, + UnpackValue: func(prefix []byte, value []byte) (BaseValue, error) { + k, err := GenericUnpack(prefix, false, value) + return k.(BaseValue), err + }, +} + +var RegressionAPI_3 = &SerializationAPI{ + PackKey: func(key BaseKey) ([]byte, error) { + return GenericPack(key, -1) + }, + PackPartialKey: func(key BaseKey, fields int) ([]byte, error) { + return GenericPack(key, fields) + }, + PackValue: func(value BaseValue) ([]byte, error) { + return GenericPack(value, -1) + }, + UnpackKey: func(key []byte) (BaseKey, error) { + k, err := GenericUnpack(key, true, key) + return k.(BaseKey), err + }, + UnpackValue: func(prefix []byte, value []byte) (BaseValue, error) { + k, err := GenericUnpack(prefix, false, value) + return k.(BaseValue), err + }, +} diff --git a/db/prefixes/prefixes.go b/db/prefixes/prefixes.go index 5fb2c35c..0b224319 100644 --- a/db/prefixes/prefixes.go +++ b/db/prefixes/prefixes.go @@ -60,6 +60,12 @@ const ( SupportAmount = 'a' BlockTXs = 'b' + TrendingNotifications = 'c' + MempoolTx = 'd' + TouchedHashX = 'e' + HashXStatus = 'f' + HashXMempoolStatus = 'g' + ActivateClaimTXOType = 1 ActivatedSupportTXOType = 2 @@ -100,17 +106,78 @@ func GetPrefixes() [][]byte { {ChannelCount}, {SupportAmount}, {BlockTXs}, + {TrendingNotifications}, + {MempoolTx}, + {TouchedHashX}, + {HashXStatus}, + {HashXMempoolStatus}, } } // PrefixRowKV is a generic key/value pair for a prefix. type PrefixRowKV struct { - Key interface{} - Value interface{} + Key BaseKey + Value BaseValue + RawKey []byte + RawValue []byte +} + +type BaseKey interface { + NumFields() int + PartialPack(fields int) []byte + PackKey() []byte +} + +type BaseValue interface { + PackValue() []byte +} + +type KeyUnpacker interface { + UnpackKey(buf []byte) +} + +type ValueUnpacker interface { + UnpackValue(buf []byte) +} + +type LengthEncodedName struct { + NameLen uint16 `struct:"sizeof=Name"` + Name string `json:"name"` +} + +func NewLengthEncodedName(s string) LengthEncodedName { + return LengthEncodedName{ + NameLen: uint16(len(s)), + Name: s, + } +} + +type LengthEncodedNormalizedName struct { + NormalizedNameLen uint16 `struct:"sizeof=NormalizedName"` + NormalizedName string `json:"normalized_name"` +} + +func NewLengthEncodedNormalizedName(s string) LengthEncodedNormalizedName { + return LengthEncodedNormalizedName{ + NormalizedNameLen: uint16(len(s)), + NormalizedName: s, + } +} + +type LengthEncodedPartialClaimId struct { + PartialClaimIdLen uint8 `struct:"sizeof=PartialClaimId"` + PartialClaimId string `json:"partial_claim_id"` +} + +func NewLengthEncodedPartialClaimId(s string) LengthEncodedPartialClaimId { + return LengthEncodedPartialClaimId{ + PartialClaimIdLen: uint8(len(s)), + PartialClaimId: s, + } } type DBStateKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` } type DBStateValue struct { @@ -176,34 +243,20 @@ func (v *DBStateValue) PackValue() []byte { } value[32+4+4+32+4+4] = bitSetVar value[32+4+4+32+4+4+1] = v.DDVersion - var histFlushCount uint32 - var compFlushCount uint32 - var compCursor uint32 - histFlushCount = (OnesCompTwiddle32 - uint32(v.HistFlushCount)) - compFlushCount = (OnesCompTwiddle32 - uint32(v.CompFlushCount)) - compCursor = (OnesCompTwiddle32 - uint32(v.CompCursor)) - - binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1:], histFlushCount) - binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4:], compFlushCount) - binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4+4:], compCursor) + + binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1:], uint32(v.HistFlushCount)) + binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4:], uint32(v.CompFlushCount)) + binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4+4:], uint32(v.CompCursor)) binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4+4+4:], v.EsSyncHeight) return value } -func DBStateKeyPackPartialKey(key *DBStateKey) func(int) []byte { - return func(fields int) []byte { - return DBStateKeyPackPartial(key, fields) - } +func (kv *DBStateKey) NumFields() int { + return 0 } -func DBStateKeyPackPartialfields(fields int) func(*DBStateKey) []byte { - return func(u *DBStateKey) []byte { - return DBStateKeyPackPartial(u, fields) - } -} - -func DBStateKeyPackPartial(k *DBStateKey, fields int) []byte { +func (k *DBStateKey) PartialPack(fields int) []byte { prefixLen := 1 var n = prefixLen @@ -232,21 +285,21 @@ func DBStateValueUnpack(value []byte) *DBStateValue { WallTime: binary.BigEndian.Uint32(value[32+4+4+32+4:]), FirstSync: value[32+4+4+32+4+4] == 1, DDVersion: value[32+4+4+32+4+4+1], - HistFlushCount: int32(^binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1:])), - CompFlushCount: int32(^binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4:])), - CompCursor: int32(^binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4+4:])), + HistFlushCount: int32(binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1:])), + CompFlushCount: int32(binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4:])), + CompCursor: int32(binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4+4:])), EsSyncHeight: binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4+4+4:]), } return x } type UndoKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height uint64 `json:"height"` } type UndoValue struct { - Data []byte `json:"data"` + Data []byte `struct-while:"!_eof" json:"data"` } func (k *UndoKey) PackKey() []byte { @@ -268,19 +321,11 @@ func (v *UndoValue) PackValue() []byte { return value } -func UndoKeyPackPartialKey(key *UndoKey) func(int) []byte { - return func(fields int) []byte { - return UndoKeyPackPartial(key, fields) - } +func (kv *UndoKey) NumFields() int { + return 1 } -func UndoKeyPackPartialfields(fields int) func(*UndoKey) []byte { - return func(u *UndoKey) []byte { - return UndoKeyPackPartial(u, fields) - } -} - -func UndoKeyPackPartial(k *UndoKey, fields int) []byte { +func (k *UndoKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -329,8 +374,8 @@ func UndoValueUnpack(value []byte) *UndoValue { } type UTXOKey struct { - Prefix []byte `json:"prefix"` - HashX []byte `json:"hashx"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + HashX []byte `struct:"[11]byte" json:"hashx"` TxNum uint32 `json:"tx_num"` Nout uint16 `json:"nout"` } @@ -340,14 +385,14 @@ type UTXOValue struct { } type HashXUTXOKey struct { - Prefix []byte `json:"prefix"` - ShortTXHash []byte `json:"short_tx_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ShortTXHash []byte `struct:"[4]byte" json:"short_tx_hash"` TxNum uint32 `json:"tx_num"` Nout uint16 `json:"nout"` } type HashXUTXOValue struct { - HashX []byte `json:"hashx"` + HashX []byte `struct:"[11]byte" json:"hashx"` } // @@ -392,23 +437,13 @@ func (v *HashXUTXOValue) PackValue() []byte { return value } -// HashXUTXOKeyPackPartialKey creates a pack partial key function for n fields. -func HashXUTXOKeyPackPartialKey(key *HashXUTXOKey) func(int) []byte { - return func(fields int) []byte { - return HashXUTXOKeyPackPartial(key, fields) - } -} - -// HashXUTXOKeyPackPartialfields creates a pack partial key function for n fields. -func HashXUTXOKeyPackPartialfields(fields int) func(*HashXUTXOKey) []byte { - return func(u *HashXUTXOKey) []byte { - return HashXUTXOKeyPackPartial(u, fields) - } +func (kv *HashXUTXOKey) NumFields() int { + return 3 } // HashXUTXOKeyPackPartial packs a variable number of fields into a byte // array -func HashXUTXOKeyPackPartial(k *HashXUTXOKey, fields int) []byte { +func (k *HashXUTXOKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -467,13 +502,13 @@ func HashXUTXOValueUnpack(value []byte) *HashXUTXOValue { } type HashXHistoryKey struct { - Prefix []byte `json:"prefix"` - HashX []byte `json:"hashx"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + HashX []byte `struct:"[11]byte" json:"hashx"` Height uint32 `json:"height"` } type HashXHistoryValue struct { - HashXes []uint16 `json:"hashxes"` + HashXes []uint16 `struct-while:"!_eof" json:"hashxes"` } func (k *HashXHistoryKey) String() string { @@ -507,23 +542,13 @@ func (v *HashXHistoryValue) PackValue() []byte { return value } -// HashXHistoryKeyPackPartialKey creates a pack partial key function for n fields. -func HashXHistoryKeyPackPartialKey(key *HashXHistoryKey) func(int) []byte { - return func(fields int) []byte { - return HashXHistoryKeyPackPartial(key, fields) - } -} - -// HashXHistoryKeyPackPartialfields creates a pack partial key function for n fields. -func HashXHistoryKeyPackPartialfields(fields int) func(*HashXHistoryKey) []byte { - return func(u *HashXHistoryKey) []byte { - return HashXHistoryKeyPackPartial(u, fields) - } +func (kv *HashXHistoryKey) NumFields() int { + return 2 } // HashXHistoryKeyPackPartial packs a variable number of fields into a byte // array -func HashXHistoryKeyPackPartial(k *HashXHistoryKey, fields int) []byte { +func (k *HashXHistoryKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 2 { @@ -581,7 +606,7 @@ func HashXHistoryValueUnpack(value []byte) *HashXHistoryValue { } type BlockHashKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height uint32 `json:"height"` } @@ -614,19 +639,11 @@ func (v *BlockHashValue) PackValue() []byte { return value } -func BlockHashKeyPackPartialKey(key *BlockHashKey) func(int) []byte { - return func(fields int) []byte { - return BlockHashKeyPackPartial(key, fields) - } +func (kv *BlockHashKey) NumFields() int { + return 1 } -func BlockHashKeyPackPartialfields(fields int) func(*BlockHashKey) []byte { - return func(u *BlockHashKey) []byte { - return BlockHashKeyPackPartial(u, fields) - } -} - -func BlockHashKeyPackPartial(k *BlockHashKey, fields int) []byte { +func (k *BlockHashKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -680,12 +697,12 @@ func BlockHashValueUnpack(value []byte) *BlockHashValue { } type BlockTxsKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height uint32 `json:"height"` } type BlockTxsValue struct { - TxHashes []*chainhash.Hash `json:"tx_hashes"` + TxHashes []*chainhash.Hash `struct-while:"!_eof" json:"tx_hashes"` } func (k *BlockTxsKey) NewBlockTxsKey(height uint32) *BlockTxsKey { @@ -722,19 +739,11 @@ func (v *BlockTxsValue) PackValue() []byte { return value } -func BlockTxsKeyPackPartialKey(key *BlockTxsKey) func(int) []byte { - return func(fields int) []byte { - return BlockTxsKeyPackPartial(key, fields) - } -} - -func BlockTxsKeyPackPartialfields(fields int) func(*BlockTxsKey) []byte { - return func(u *BlockTxsKey) []byte { - return BlockTxsKeyPackPartial(u, fields) - } +func (kv *BlockTxsKey) NumFields() int { + return 1 } -func BlockTxsKeyPackPartial(k *BlockTxsKey, fields int) []byte { +func (k *BlockTxsKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -787,7 +796,7 @@ func BlockTxsValueUnpack(value []byte) *BlockTxsValue { } type TxCountKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height uint32 `json:"height"` } @@ -820,19 +829,11 @@ func (v *TxCountValue) PackValue() []byte { return value } -func TxCountKeyPackPartialKey(key *TxCountKey) func(int) []byte { - return func(fields int) []byte { - return TxCountKeyPackPartial(key, fields) - } -} - -func TxCountKeyPackPartialfields(fields int) func(*TxCountKey) []byte { - return func(u *TxCountKey) []byte { - return TxCountKeyPackPartial(u, fields) - } +func (kv *TxCountKey) NumFields() int { + return 1 } -func TxCountKeyPackPartial(k *TxCountKey, fields int) []byte { +func (k *TxCountKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -880,7 +881,7 @@ func TxCountValueUnpack(value []byte) *TxCountValue { } type TxHashKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` TxNum uint32 `json:"tx_num"` } @@ -914,19 +915,11 @@ func (v *TxHashValue) PackValue() []byte { return value } -func TxHashKeyPackPartialKey(key *TxHashKey) func(int) []byte { - return func(fields int) []byte { - return TxHashKeyPackPartial(key, fields) - } +func (kv *TxHashKey) NumFields() int { + return 1 } -func TxHashKeyPackPartialfields(fields int) func(*TxHashKey) []byte { - return func(u *TxHashKey) []byte { - return TxHashKeyPackPartial(u, fields) - } -} - -func TxHashKeyPackPartial(k *TxHashKey, fields int) []byte { +func (k *TxHashKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -974,7 +967,7 @@ func TxHashValueUnpack(value []byte) *TxHashValue { } type TxNumKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` TxHash *chainhash.Hash `json:"tx_hash"` } @@ -1000,19 +993,11 @@ func (v *TxNumValue) PackValue() []byte { return value } -func TxNumKeyPackPartialKey(key *TxNumKey) func(int) []byte { - return func(fields int) []byte { - return TxNumKeyPackPartial(key, fields) - } -} - -func TxNumKeyPackPartialfields(fields int) func(*TxNumKey) []byte { - return func(u *TxNumKey) []byte { - return TxNumKeyPackPartial(u, fields) - } +func (kv *TxNumKey) NumFields() int { + return 1 } -func TxNumKeyPackPartial(k *TxNumKey, fields int) []byte { +func (k *TxNumKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -1060,12 +1045,12 @@ func TxNumValueUnpack(value []byte) *TxNumValue { } type TxKey struct { - Prefix []byte `json:"prefix"` - TxHash *chainhash.Hash `json:"tx_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + TxHash *chainhash.Hash `struct:"*[32]byte" json:"tx_hash"` } type TxValue struct { - RawTx []byte `json:"raw_tx"` + RawTx []byte `struct-while:"!_eof" json:"raw_tx"` } func (k *TxKey) PackKey() []byte { @@ -1086,19 +1071,11 @@ func (v *TxValue) PackValue() []byte { return value } -func TxKeyPackPartialKey(key *TxKey) func(int) []byte { - return func(fields int) []byte { - return TxKeyPackPartial(key, fields) - } -} - -func TxKeyPackPartialfields(fields int) func(*TxKey) []byte { - return func(u *TxKey) []byte { - return TxKeyPackPartial(u, fields) - } +func (kv *TxKey) NumFields() int { + return 1 } -func TxKeyPackPartial(k *TxKey, fields int) []byte { +func (k *TxKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -1146,12 +1123,12 @@ func TxValueUnpack(value []byte) *TxValue { } type BlockHeaderKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height uint32 `json:"height"` } type BlockHeaderValue struct { - Header []byte `json:"header"` + Header []byte `struct:"[112]byte" json:"header"` } func (k *BlockHeaderValue) Equals(v *BlockHeaderValue) bool { @@ -1183,19 +1160,11 @@ func (v *BlockHeaderValue) PackValue() []byte { return value } -func BlockHeaderKeyPackPartialKey(key *BlockHeaderKey) func(int) []byte { - return func(fields int) []byte { - return BlockHeaderKeyPackPartial(key, fields) - } +func (kv *BlockHeaderKey) NumFields() int { + return 1 } -func BlockHeaderKeyPackPartialfields(fields int) func(*BlockHeaderKey) []byte { - return func(u *BlockHeaderKey) []byte { - return BlockHeaderKeyPackPartial(u, fields) - } -} - -func BlockHeaderKeyPackPartial(k *BlockHeaderKey, fields int) []byte { +func (k *BlockHeaderKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -1244,8 +1213,8 @@ func BlockHeaderValueUnpack(value []byte) *BlockHeaderValue { } type ClaimToTXOKey struct { - Prefix []byte `json:"prefix"` - ClaimHash []byte `json:"claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } type ClaimToTXOValue struct { @@ -1255,7 +1224,7 @@ type ClaimToTXOValue struct { RootPosition uint16 `json:"root_position"` Amount uint64 `json:"amount"` ChannelSignatureIsValid bool `json:"channel_signature_is_valid"` - Name string `json:"name"` + LengthEncodedName } func NewClaimToTXOKey(claimHash []byte) *ClaimToTXOKey { @@ -1301,19 +1270,11 @@ func (v *ClaimToTXOValue) PackValue() []byte { return value } -func ClaimToTXOKeyPackPartialKey(key *ClaimToTXOKey) func(int) []byte { - return func(fields int) []byte { - return ClaimToTXOKeyPackPartial(key, fields) - } +func (kv *ClaimToTXOKey) NumFields() int { + return 1 } -func ClaimToTXOKeyPackPartialfields(fields int) func(*ClaimToTXOKey) []byte { - return func(u *ClaimToTXOKey) []byte { - return ClaimToTXOKeyPackPartial(u, fields) - } -} - -func ClaimToTXOKeyPackPartial(k *ClaimToTXOKey, fields int) []byte { +func (k *ClaimToTXOKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -1364,19 +1325,19 @@ func ClaimToTXOValueUnpack(value []byte) *ClaimToTXOValue { RootPosition: binary.BigEndian.Uint16(value[10:]), Amount: binary.BigEndian.Uint64(value[12:]), ChannelSignatureIsValid: value[20] == 1, - Name: string(value[23 : 23+nameLen]), + LengthEncodedName: NewLengthEncodedName(string(value[23 : 23+nameLen])), } } type TXOToClaimKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } type TXOToClaimValue struct { - ClaimHash []byte `json:"claim_hash"` - Name string `json:"name"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` + LengthEncodedName } func NewTXOToClaimKey(txNum uint32, position uint16) *TXOToClaimKey { @@ -1410,19 +1371,11 @@ func (v *TXOToClaimValue) PackValue() []byte { return value } -func TXOToClaimKeyPackPartialKey(key *TXOToClaimKey) func(int) []byte { - return func(fields int) []byte { - return TXOToClaimKeyPackPartial(key, fields) - } -} - -func TXOToClaimKeyPackPartialfields(fields int) func(*TXOToClaimKey) []byte { - return func(u *TXOToClaimKey) []byte { - return TXOToClaimKeyPackPartial(u, fields) - } +func (kv *TXOToClaimKey) NumFields() int { + return 2 } -func TXOToClaimKeyPackPartial(k *TXOToClaimKey, fields int) []byte { +func (k *TXOToClaimKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 2 { @@ -1471,17 +1424,17 @@ func TXOToClaimKeyUnpack(key []byte) *TXOToClaimKey { func TXOToClaimValueUnpack(value []byte) *TXOToClaimValue { nameLen := binary.BigEndian.Uint16(value[20:]) return &TXOToClaimValue{ - ClaimHash: value[:20], - Name: string(value[22 : 22+nameLen]), + ClaimHash: value[:20], + LengthEncodedName: NewLengthEncodedName(string(value[22 : 22+nameLen])), } } type ClaimShortIDKey struct { - Prefix []byte `json:"prefix"` - NormalizedName string `json:"normalized_name"` - PartialClaimId string `json:"partial_claim_id"` - RootTxNum uint32 `json:"root_tx_num"` - RootPosition uint16 `json:"root_position"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName + LengthEncodedPartialClaimId // fields PartialClaimIdLen, PartialClaimId + RootTxNum uint32 `json:"root_tx_num"` + RootPosition uint16 `json:"root_position"` } type ClaimShortIDValue struct { @@ -1491,9 +1444,9 @@ type ClaimShortIDValue struct { func NewClaimShortIDKey(normalizedName, partialClaimId string) *ClaimShortIDKey { return &ClaimShortIDKey{ - Prefix: []byte{ClaimShortIdPrefix}, - NormalizedName: normalizedName, - PartialClaimId: partialClaimId, + Prefix: []byte{ClaimShortIdPrefix}, + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(normalizedName), + LengthEncodedPartialClaimId: NewLengthEncodedPartialClaimId(partialClaimId), } } @@ -1523,19 +1476,11 @@ func (v *ClaimShortIDValue) PackValue() []byte { return value } -func ClaimShortIDKeyPackPartialKey(key *ClaimShortIDKey) func(int) []byte { - return func(fields int) []byte { - return ClaimShortIDKeyPackPartial(key, fields) - } -} - -func ClaimShortIDKeyPackPartialfields(fields int) func(*ClaimShortIDKey) []byte { - return func(u *ClaimShortIDKey) []byte { - return ClaimShortIDKeyPackPartial(u, fields) - } +func (kv *ClaimShortIDKey) NumFields() int { + return 4 } -func ClaimShortIDKeyPackPartial(k *ClaimShortIDKey, fields int) []byte { +func (k *ClaimShortIDKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 4 { @@ -1591,11 +1536,11 @@ func ClaimShortIDKeyUnpack(key []byte) *ClaimShortIDKey { nameLen := int(binary.BigEndian.Uint16(key[prefixLen:])) partialClaimLen := int(uint8(key[prefixLen+2+nameLen])) return &ClaimShortIDKey{ - Prefix: key[:prefixLen], - NormalizedName: string(key[prefixLen+2 : prefixLen+2+nameLen]), - PartialClaimId: string(key[prefixLen+2+nameLen+1 : prefixLen+2+nameLen+1+partialClaimLen]), - RootTxNum: binary.BigEndian.Uint32(key[prefixLen+2+nameLen+1+partialClaimLen:]), - RootPosition: binary.BigEndian.Uint16(key[prefixLen+2+nameLen+1+partialClaimLen+4:]), + Prefix: key[:prefixLen], + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(key[prefixLen+2 : prefixLen+2+nameLen])), + LengthEncodedPartialClaimId: NewLengthEncodedPartialClaimId(string(key[prefixLen+2+nameLen+1 : prefixLen+2+nameLen+1+partialClaimLen])), + RootTxNum: binary.BigEndian.Uint32(key[prefixLen+2+nameLen+1+partialClaimLen:]), + RootPosition: binary.BigEndian.Uint16(key[prefixLen+2+nameLen+1+partialClaimLen+4:]), } } @@ -1607,14 +1552,14 @@ func ClaimShortIDValueUnpack(value []byte) *ClaimShortIDValue { } type ClaimToChannelKey struct { - Prefix []byte `json:"prefix"` - ClaimHash []byte `json:"claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } type ClaimToChannelValue struct { - SigningHash []byte `json:"signing_hash"` + SigningHash []byte `struct:"[20]byte" json:"signing_hash"` } func NewClaimToChannelKey(claimHash []byte, txNum uint32, position uint16) *ClaimToChannelKey { @@ -1646,19 +1591,11 @@ func (v *ClaimToChannelValue) PackValue() []byte { return value } -func ClaimToChannelKeyPackPartialKey(key *ClaimToChannelKey) func(int) []byte { - return func(fields int) []byte { - return ClaimToChannelKeyPackPartial(key, fields) - } +func (kv *ClaimToChannelKey) NumFields() int { + return 3 } -func ClaimToChannelKeyPackPartialfields(fields int) func(*ClaimToChannelKey) []byte { - return func(u *ClaimToChannelKey) []byte { - return ClaimToChannelKeyPackPartial(u, fields) - } -} - -func ClaimToChannelKeyPackPartial(k *ClaimToChannelKey, fields int) []byte { +func (k *ClaimToChannelKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -1717,22 +1654,22 @@ func ClaimToChannelValueUnpack(value []byte) *ClaimToChannelValue { } type ChannelToClaimKey struct { - Prefix []byte `json:"prefix"` - SigningHash []byte `json:"signing_hash"` - Name string `json:"name"` - TxNum uint32 `json:"tx_num"` - Position uint16 `json:"position"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + SigningHash []byte `struct:"[20]byte" json:"signing_hash"` + LengthEncodedName // fields NameLen, Name + TxNum uint32 `json:"tx_num"` + Position uint16 `json:"position"` } type ChannelToClaimValue struct { - ClaimHash []byte `json:"claim_hash"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } func NewChannelToClaimKey(channelHash []byte, normalizedName string) *ChannelToClaimKey { return &ChannelToClaimKey{ - Prefix: []byte{ChannelToClaim}, - SigningHash: channelHash, - Name: normalizedName, + Prefix: []byte{ChannelToClaim}, + SigningHash: channelHash, + LengthEncodedName: NewLengthEncodedName(normalizedName), } } @@ -1765,19 +1702,11 @@ func (v *ChannelToClaimValue) PackValue() []byte { return value } -func ChannelToClaimKeyPackPartialKey(key *ChannelToClaimKey) func(int) []byte { - return func(fields int) []byte { - return ChannelToClaimKeyPackPartial(key, fields) - } -} - -func ChannelToClaimKeyPackPartialfields(fields int) func(*ChannelToClaimKey) []byte { - return func(u *ChannelToClaimKey) []byte { - return ChannelToClaimKeyPackPartial(u, fields) - } +func (kv *ChannelToClaimKey) NumFields() int { + return 4 } -func ChannelToClaimKeyPackPartial(k *ChannelToClaimKey, fields int) []byte { +func (k *ChannelToClaimKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 4 { @@ -1828,11 +1757,11 @@ func ChannelToClaimKeyUnpack(key []byte) *ChannelToClaimKey { prefixLen := 1 nameLen := int(binary.BigEndian.Uint16(key[prefixLen+20:])) return &ChannelToClaimKey{ - Prefix: key[:prefixLen], - SigningHash: key[prefixLen : prefixLen+20], - Name: string(key[prefixLen+22 : prefixLen+22+nameLen]), - TxNum: binary.BigEndian.Uint32(key[prefixLen+22+nameLen:]), - Position: binary.BigEndian.Uint16(key[prefixLen+22+nameLen+4:]), + Prefix: key[:prefixLen], + SigningHash: key[prefixLen : prefixLen+20], + LengthEncodedName: NewLengthEncodedName(string(key[prefixLen+22 : prefixLen+22+nameLen])), + TxNum: binary.BigEndian.Uint32(key[prefixLen+22+nameLen:]), + Position: binary.BigEndian.Uint16(key[prefixLen+22+nameLen+4:]), } } @@ -1843,8 +1772,8 @@ func ChannelToClaimValueUnpack(value []byte) *ChannelToClaimValue { } type ChannelCountKey struct { - Prefix []byte `json:"prefix"` - ChannelHash []byte `json:"channel_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ChannelHash []byte `struct:"[20]byte" json:"channel_hash"` } type ChannelCountValue struct { @@ -1876,19 +1805,11 @@ func (v *ChannelCountValue) PackValue() []byte { return value } -func ChannelCountKeyPackPartialKey(key *ChannelCountKey) func(int) []byte { - return func(fields int) []byte { - return ChannelCountKeyPackPartial(key, fields) - } +func (kv *ChannelCountKey) NumFields() int { + return 1 } -func ChannelCountKeyPackPartialfields(fields int) func(*ChannelCountKey) []byte { - return func(u *ChannelCountKey) []byte { - return ChannelCountKeyPackPartial(u, fields) - } -} - -func ChannelCountKeyPackPartial(k *ChannelCountKey, fields int) []byte { +func (k *ChannelCountKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -1936,8 +1857,8 @@ func ChannelCountValueUnpack(value []byte) *ChannelCountValue { } type SupportAmountKey struct { - Prefix []byte `json:"prefix"` - ClaimHash []byte `json:"claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } type SupportAmountValue struct { @@ -1969,19 +1890,11 @@ func (v *SupportAmountValue) PackValue() []byte { return value } -func SupportAmountKeyPackPartialKey(key *SupportAmountKey) func(int) []byte { - return func(fields int) []byte { - return SupportAmountKeyPackPartial(key, fields) - } -} - -func SupportAmountKeyPackPartialfields(fields int) func(*SupportAmountKey) []byte { - return func(u *SupportAmountKey) []byte { - return SupportAmountKeyPackPartial(u, fields) - } +func (kv *SupportAmountKey) NumFields() int { + return 1 } -func SupportAmountKeyPackPartial(k *SupportAmountKey, fields int) []byte { +func (k *SupportAmountKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -2029,8 +1942,8 @@ func SupportAmountValueUnpack(value []byte) *SupportAmountValue { } type ClaimToSupportKey struct { - Prefix []byte `json:"prefix"` - ClaimHash []byte `json:"claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } @@ -2059,19 +1972,11 @@ func (v *ClaimToSupportValue) PackValue() []byte { return value } -func ClaimToSupportKeyPackPartialKey(key *ClaimToSupportKey) func(int) []byte { - return func(fields int) []byte { - return ClaimToSupportKeyPackPartial(key, fields) - } -} - -func ClaimToSupportKeyPackPartialfields(fields int) func(*ClaimToSupportKey) []byte { - return func(u *ClaimToSupportKey) []byte { - return ClaimToSupportKeyPackPartial(u, fields) - } +func (kv *ClaimToSupportKey) NumFields() int { + return 3 } -func ClaimToSupportKeyPackPartial(k *ClaimToSupportKey, fields int) []byte { +func (k *ClaimToSupportKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -2130,13 +2035,13 @@ func ClaimToSupportValueUnpack(value []byte) *ClaimToSupportValue { } type SupportToClaimKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } type SupportToClaimValue struct { - ClaimHash []byte `json:"claim_hash"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } func (k *SupportToClaimKey) PackKey() []byte { @@ -2158,19 +2063,11 @@ func (v *SupportToClaimValue) PackValue() []byte { return value } -func SupportToClaimKeyPackPartialKey(key *SupportToClaimKey) func(int) []byte { - return func(fields int) []byte { - return SupportToClaimKeyPackPartial(key, fields) - } -} - -func SupportToClaimKeyPackPartialfields(fields int) func(*SupportToClaimKey) []byte { - return func(u *SupportToClaimKey) []byte { - return SupportToClaimKeyPackPartial(u, fields) - } +func (kv *SupportToClaimKey) NumFields() int { + return 2 } -func SupportToClaimKeyPackPartial(k *SupportToClaimKey, fields int) []byte { +func (k *SupportToClaimKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 2 { @@ -2223,15 +2120,15 @@ func SupportToClaimValueUnpack(value []byte) *SupportToClaimValue { } type ClaimExpirationKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Expiration uint32 `json:"expiration"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } type ClaimExpirationValue struct { - ClaimHash []byte `json:"claim_hash"` - NormalizedName string `json:"normalized_name"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` + LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName } func (k *ClaimExpirationKey) PackKey() []byte { @@ -2258,19 +2155,11 @@ func (v *ClaimExpirationValue) PackValue() []byte { return value } -func ClaimExpirationKeyPackPartialKey(key *ClaimExpirationKey) func(int) []byte { - return func(fields int) []byte { - return ClaimExpirationKeyPackPartial(key, fields) - } -} - -func ClaimExpirationKeyPackPartialfields(fields int) func(*ClaimExpirationKey) []byte { - return func(u *ClaimExpirationKey) []byte { - return ClaimExpirationKeyPackPartial(u, fields) - } +func (kv *ClaimExpirationKey) NumFields() int { + return 3 } -func ClaimExpirationKeyPackPartial(k *ClaimExpirationKey, fields int) []byte { +func (k *ClaimExpirationKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -2325,25 +2214,25 @@ func ClaimExpirationKeyUnpack(key []byte) *ClaimExpirationKey { func ClaimExpirationValueUnpack(value []byte) *ClaimExpirationValue { nameLen := binary.BigEndian.Uint16(value[20:]) return &ClaimExpirationValue{ - ClaimHash: value[:20], - NormalizedName: string(value[22 : 22+nameLen]), + ClaimHash: value[:20], + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(value[22 : 22+nameLen])), } } type ClaimTakeoverKey struct { - Prefix []byte `json:"prefix"` - NormalizedName string `json:"normalized_name"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName } type ClaimTakeoverValue struct { - ClaimHash []byte `json:"claim_hash"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` Height uint32 `json:"height"` } func NewClaimTakeoverKey(normalizedName string) *ClaimTakeoverKey { return &ClaimTakeoverKey{ - Prefix: []byte{ClaimTakeover}, - NormalizedName: normalizedName, + Prefix: []byte{ClaimTakeover}, + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(normalizedName), } } @@ -2377,19 +2266,11 @@ func (v *ClaimTakeoverValue) PackValue() []byte { return value } -func ClaimTakeoverKeyPackPartialKey(key *ClaimTakeoverKey) func(int) []byte { - return func(fields int) []byte { - return ClaimTakeoverKeyPackPartial(key, fields) - } +func (kv *ClaimTakeoverKey) NumFields() int { + return 1 } -func ClaimTakeoverKeyPackPartialfields(fields int) func(*ClaimTakeoverKey) []byte { - return func(u *ClaimTakeoverKey) []byte { - return ClaimTakeoverKeyPackPartial(u, fields) - } -} - -func ClaimTakeoverKeyPackPartial(k *ClaimTakeoverKey, fields int) []byte { +func (k *ClaimTakeoverKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -2428,8 +2309,8 @@ func ClaimTakeoverKeyUnpack(key []byte) *ClaimTakeoverKey { prefixLen := 1 nameLen := binary.BigEndian.Uint16(key[prefixLen:]) return &ClaimTakeoverKey{ - Prefix: key[:prefixLen], - NormalizedName: string(key[prefixLen+2 : prefixLen+2+int(nameLen)]), + Prefix: key[:prefixLen], + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(key[prefixLen+2 : prefixLen+2+int(nameLen)])), } } @@ -2441,7 +2322,7 @@ func ClaimTakeoverValueUnpack(value []byte) *ClaimTakeoverValue { } type PendingActivationKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height uint32 `json:"height"` TxoType uint8 `json:"txo_type"` TxNum uint32 `json:"tx_num"` @@ -2457,8 +2338,8 @@ func (k *PendingActivationKey) IsClaim() bool { } type PendingActivationValue struct { - ClaimHash []byte `json:"claim_hash"` - NormalizedName string `json:"normalized_name"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` + LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName } func (k *PendingActivationKey) PackKey() []byte { @@ -2486,19 +2367,11 @@ func (v *PendingActivationValue) PackValue() []byte { return value } -func PendingActivationKeyPackPartialKey(key *PendingActivationKey) func(int) []byte { - return func(fields int) []byte { - return PendingActivationKeyPackPartial(key, fields) - } -} - -func PendingActivationKeyPackPartialfields(fields int) func(*PendingActivationKey) []byte { - return func(u *PendingActivationKey) []byte { - return PendingActivationKeyPackPartial(u, fields) - } +func (kv *PendingActivationKey) NumFields() int { + return 4 } -func PendingActivationKeyPackPartial(k *PendingActivationKey, fields int) []byte { +func (k *PendingActivationKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 4 { @@ -2558,22 +2431,22 @@ func PendingActivationKeyUnpack(key []byte) *PendingActivationKey { func PendingActivationValueUnpack(value []byte) *PendingActivationValue { nameLen := binary.BigEndian.Uint16(value[20:]) return &PendingActivationValue{ - ClaimHash: value[:20], - NormalizedName: string(value[22 : 22+nameLen]), + ClaimHash: value[:20], + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(value[22 : 22+nameLen])), } } type ActivationKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` TxoType uint8 `json:"txo_type"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } type ActivationValue struct { - Height uint32 `json:"height"` - ClaimHash []byte `json:"claim_hash"` - NormalizedName string `json:"normalized_name"` + Height uint32 `json:"height"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` + LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName } func NewActivationKey(txoType uint8, txNum uint32, position uint16) *ActivationKey { @@ -2610,19 +2483,11 @@ func (v *ActivationValue) PackValue() []byte { return value } -func ActivationKeyPackPartialKey(key *ActivationKey) func(int) []byte { - return func(fields int) []byte { - return ActivationKeyPackPartial(key, fields) - } +func (kv *ActivationKey) NumFields() int { + return 3 } -func ActivationKeyPackPartialfields(fields int) func(*ActivationKey) []byte { - return func(u *ActivationKey) []byte { - return ActivationKeyPackPartial(u, fields) - } -} - -func ActivationKeyPackPartial(k *ActivationKey, fields int) []byte { +func (k *ActivationKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -2676,15 +2541,15 @@ func ActivationKeyUnpack(key []byte) *ActivationKey { func ActivationValueUnpack(value []byte) *ActivationValue { nameLen := binary.BigEndian.Uint16(value[24:]) return &ActivationValue{ - Height: binary.BigEndian.Uint32(value), - ClaimHash: value[4 : 20+4], - NormalizedName: string(value[26 : 26+nameLen]), + Height: binary.BigEndian.Uint32(value), + ClaimHash: value[4 : 20+4], + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(value[26 : 26+nameLen])), } } type ActiveAmountKey struct { - Prefix []byte `json:"prefix"` - ClaimHash []byte `json:"claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` TxoType uint8 `json:"txo_type"` ActivationHeight uint32 `json:"activation_height"` TxNum uint32 `json:"tx_num"` @@ -2727,19 +2592,11 @@ func (v *ActiveAmountValue) PackValue() []byte { return value } -func ActiveAmountKeyPackPartialKey(key *ActiveAmountKey) func(int) []byte { - return func(fields int) []byte { - return ActiveAmountKeyPackPartial(key, fields) - } +func (kv *ActiveAmountKey) NumFields() int { + return 5 } -func ActiveAmountKeyPackPartialfields(fields int) func(*ActiveAmountKey) []byte { - return func(u *ActiveAmountKey) []byte { - return ActiveAmountKeyPackPartial(u, fields) - } -} - -func ActiveAmountKeyPackPartial(k *ActiveAmountKey, fields int) []byte { +func (k *ActiveAmountKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 5 { @@ -2806,22 +2663,24 @@ func ActiveAmountValueUnpack(value []byte) *ActiveAmountValue { } } +type OnesComplementEffectiveAmount uint64 + type EffectiveAmountKey struct { - Prefix []byte `json:"prefix"` - NormalizedName string `json:"normalized_name"` - EffectiveAmount uint64 `json:"effective_amount"` - TxNum uint32 `json:"tx_num"` - Position uint16 `json:"position"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName + EffectiveAmount OnesComplementEffectiveAmount `json:"effective_amount"` + TxNum uint32 `json:"tx_num"` + Position uint16 `json:"position"` } type EffectiveAmountValue struct { - ClaimHash []byte `json:"claim_hash"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } func NewEffectiveAmountKey(normalizedName string) *EffectiveAmountKey { return &EffectiveAmountKey{ - Prefix: []byte{EffectiveAmount}, - NormalizedName: normalizedName, + Prefix: []byte{EffectiveAmount}, + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(normalizedName), } } @@ -2837,7 +2696,7 @@ func (k *EffectiveAmountKey) PackKey() []byte { binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen)) copy(key[prefixLen+2:], []byte(k.NormalizedName)) - binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle64-k.EffectiveAmount) + binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle64-uint64(k.EffectiveAmount)) binary.BigEndian.PutUint32(key[prefixLen+nameLenLen+8:], k.TxNum) binary.BigEndian.PutUint16(key[prefixLen+nameLenLen+8+4:], k.Position) @@ -2852,19 +2711,11 @@ func (v *EffectiveAmountValue) PackValue() []byte { return value } -func EffectiveAmountKeyPackPartialKey(key *EffectiveAmountKey) func(int) []byte { - return func(fields int) []byte { - return EffectiveAmountKeyPackPartial(key, fields) - } -} - -func EffectiveAmountKeyPackPartialfields(fields int) func(*EffectiveAmountKey) []byte { - return func(u *EffectiveAmountKey) []byte { - return EffectiveAmountKeyPackPartial(u, fields) - } +func (kv *EffectiveAmountKey) NumFields() int { + return 4 } -func EffectiveAmountKeyPackPartial(k *EffectiveAmountKey, fields int) []byte { +func (k *EffectiveAmountKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. nameLen := len(k.NormalizedName) @@ -2901,7 +2752,7 @@ func EffectiveAmountKeyPackPartial(k *EffectiveAmountKey, fields int) []byte { binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen)) copy(key[prefixLen+2:], []byte(k.NormalizedName)) case 2: - binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle64-k.EffectiveAmount) + binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle64-uint64(k.EffectiveAmount)) case 3: binary.BigEndian.PutUint32(key[prefixLen+nameLenLen+8:], k.TxNum) case 4: @@ -2916,11 +2767,11 @@ func EffectiveAmountKeyUnpack(key []byte) *EffectiveAmountKey { prefixLen := 1 nameLen := binary.BigEndian.Uint16(key[prefixLen:]) return &EffectiveAmountKey{ - Prefix: key[:prefixLen], - NormalizedName: string(key[prefixLen+2 : prefixLen+2+int(nameLen)]), - EffectiveAmount: OnesCompTwiddle64 - binary.BigEndian.Uint64(key[prefixLen+2+int(nameLen):]), - TxNum: binary.BigEndian.Uint32(key[prefixLen+2+int(nameLen)+8:]), - Position: binary.BigEndian.Uint16(key[prefixLen+2+int(nameLen)+8+4:]), + Prefix: key[:prefixLen], + LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(key[prefixLen+2 : prefixLen+2+int(nameLen)])), + EffectiveAmount: OnesComplementEffectiveAmount(OnesCompTwiddle64 - binary.BigEndian.Uint64(key[prefixLen+2+int(nameLen):])), + TxNum: binary.BigEndian.Uint32(key[prefixLen+2+int(nameLen)+8:]), + Position: binary.BigEndian.Uint16(key[prefixLen+2+int(nameLen)+8+4:]), } } @@ -2931,12 +2782,12 @@ func EffectiveAmountValueUnpack(value []byte) *EffectiveAmountValue { } type RepostKey struct { - Prefix []byte `json:"prefix"` - ClaimHash []byte `json:"claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } type RepostValue struct { - RepostedClaimHash []byte `json:"reposted_claim_hash"` + RepostedClaimHash []byte `struct:"[20]byte" json:"reposted_claim_hash"` } func NewRepostKey(claimHash []byte) *RepostKey { @@ -2966,19 +2817,11 @@ func (v *RepostValue) PackValue() []byte { return value } -func RepostKeyPackPartialKey(key *RepostKey) func(int) []byte { - return func(fields int) []byte { - return RepostKeyPackPartial(key, fields) - } -} - -func RepostKeyPackPartialfields(fields int) func(*RepostKey) []byte { - return func(u *RepostKey) []byte { - return RepostKeyPackPartial(u, fields) - } +func (kv *RepostKey) NumFields() int { + return 1 } -func RepostKeyPackPartial(k *RepostKey, fields int) []byte { +func (k *RepostKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -3026,14 +2869,14 @@ func RepostValueUnpack(value []byte) *RepostValue { } type RepostedKey struct { - Prefix []byte `json:"prefix"` - RepostedClaimHash []byte `json:"reposted_claim_hash"` + Prefix []byte `struct:"[1]byte" json:"prefix"` + RepostedClaimHash []byte `struct:"[20]byte" json:"reposted_claim_hash"` TxNum uint32 `json:"tx_num"` Position uint16 `json:"position"` } type RepostedValue struct { - ClaimHash []byte `json:"claim_hash"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` } func NewRepostedKey(claimHash []byte) *RepostedKey { @@ -3064,19 +2907,11 @@ func (v *RepostedValue) PackValue() []byte { return value } -func RepostedKeyPackPartialKey(key *RepostedKey) func(int) []byte { - return func(fields int) []byte { - return RepostedKeyPackPartial(key, fields) - } +func (kv *RepostedKey) NumFields() int { + return 3 } -func RepostedKeyPackPartialfields(fields int) func(*RepostedKey) []byte { - return func(u *RepostedKey) []byte { - return RepostedKeyPackPartial(u, fields) - } -} - -func RepostedKeyPackPartial(k *RepostedKey, fields int) []byte { +func (k *RepostedKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -3134,13 +2969,15 @@ func RepostedValueUnpack(value []byte) *RepostedValue { } type TouchedOrDeletedClaimKey struct { - Prefix []byte `json:"prefix"` + Prefix []byte `struct:"[1]byte" json:"prefix"` Height int32 `json:"height"` } type TouchedOrDeletedClaimValue struct { - TouchedClaims [][]byte `json:"touched_claims"` - DeletedClaims [][]byte `json:"deleted_claims"` + TouchedClaimsLen uint32 `struct:"sizeof=TouchedClaims"` + DeletedClaimsLen uint32 `struct:"sizeof=DeletedClaims"` + TouchedClaims [][]byte `struct:"[][20]byte" json:"touched_claims"` + DeletedClaims [][]byte `struct:"[][20]byte" json:"deleted_claims"` } func (v *TouchedOrDeletedClaimValue) String() string { @@ -3222,19 +3059,11 @@ func (v *TouchedOrDeletedClaimValue) PackValue() []byte { return value } -func TouchedOrDeletedClaimKeyPackPartialKey(key *TouchedOrDeletedClaimKey) func(int) []byte { - return func(fields int) []byte { - return TouchedOrDeletedClaimKeyPackPartial(key, fields) - } -} - -func TouchedOrDeletedClaimPackPartialfields(fields int) func(*TouchedOrDeletedClaimKey) []byte { - return func(u *TouchedOrDeletedClaimKey) []byte { - return TouchedOrDeletedClaimKeyPackPartial(u, fields) - } +func (kv *TouchedOrDeletedClaimKey) NumFields() int { + return 1 } -func TouchedOrDeletedClaimKeyPackPartial(k *TouchedOrDeletedClaimKey, fields int) []byte { +func (k *TouchedOrDeletedClaimKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 1 { @@ -3290,8 +3119,10 @@ func TouchedOrDeletedClaimValueUnpack(value []byte) *TouchedOrDeletedClaimValue j += 20 } return &TouchedOrDeletedClaimValue{ - TouchedClaims: touchedClaims, - DeletedClaims: deletedClaims, + TouchedClaimsLen: touchedLen, + DeletedClaimsLen: deletedLen, + TouchedClaims: touchedClaims, + DeletedClaims: deletedClaims, } } @@ -3325,21 +3156,13 @@ func (k *UTXOValue) PackValue() []byte { return value } -func UTXOKeyPackPartialKey(key *UTXOKey) func(int) []byte { - return func(fields int) []byte { - return UTXOKeyPackPartial(key, fields) - } -} - -func UTXOKeyPackPartialfields(fields int) func(*UTXOKey) []byte { - return func(u *UTXOKey) []byte { - return UTXOKeyPackPartial(u, fields) - } +func (kv *UTXOKey) NumFields() int { + return 3 } // UTXOKeyPackPartial packs a variable number of fields for a UTXOKey into // a byte array. -func UTXOKeyPackPartial(k *UTXOKey, fields int) []byte { +func (k *UTXOKey) PartialPack(fields int) []byte { // Limit fields between 0 and number of fields, we always at least need // the prefix, and we never need to iterate past the number of fields. if fields > 3 { @@ -3397,364 +3220,802 @@ func UTXOValueUnpack(value []byte) *UTXOValue { } } -// generic simulates a generic key packing / unpacking function for the prefixes -func generic(voidstar interface{}, firstByte byte, function byte, functionName string) (interface{}, error) { - var data []byte - if function < 2 { - data = voidstar.([]byte) - } - switch uint16(firstByte) | uint16(function)<<8 { - case ClaimToSupport: - return ClaimToSupportKeyUnpack(data), nil - case ClaimToSupport | 1<<8: - return ClaimToSupportValueUnpack(data), nil - case ClaimToSupport | 2<<8: - return voidstar.(*ClaimToSupportKey).PackKey(), nil - case ClaimToSupport | 3<<8: - return voidstar.(*ClaimToSupportValue).PackValue(), nil - case ClaimToSupport | 4<<8: - return ClaimToSupportKeyPackPartialKey(voidstar.(*ClaimToSupportKey)), nil - case SupportToClaim: - return SupportToClaimKeyUnpack(data), nil - case SupportToClaim | 1<<8: - return SupportToClaimValueUnpack(data), nil - case SupportToClaim | 2<<8: - return voidstar.(*SupportToClaimKey).PackKey(), nil - case SupportToClaim | 3<<8: - return voidstar.(*SupportToClaimValue).PackValue(), nil - case SupportToClaim | 4<<8: - return SupportToClaimKeyPackPartialKey(voidstar.(*SupportToClaimKey)), nil - case ClaimToTXO: - return ClaimToTXOKeyUnpack(data), nil - case ClaimToTXO | 1<<8: - return ClaimToTXOValueUnpack(data), nil - case ClaimToTXO | 2<<8: - return voidstar.(*ClaimToTXOKey).PackKey(), nil - case ClaimToTXO | 3<<8: - return voidstar.(*ClaimToTXOValue).PackValue(), nil - case ClaimToTXO | 4<<8: - return ClaimToTXOKeyPackPartialKey(voidstar.(*ClaimToTXOKey)), nil - case TXOToClaim: - return TXOToClaimKeyUnpack(data), nil - case TXOToClaim | 1<<8: - return TXOToClaimValueUnpack(data), nil - case TXOToClaim | 2<<8: - return voidstar.(*TXOToClaimKey).PackKey(), nil - case TXOToClaim | 3<<8: - return voidstar.(*TXOToClaimValue).PackValue(), nil - case TXOToClaim | 4<<8: - return TXOToClaimKeyPackPartialKey(voidstar.(*TXOToClaimKey)), nil - - case ClaimToChannel: - return ClaimToChannelKeyUnpack(data), nil - case ClaimToChannel | 1<<8: - return ClaimToChannelValueUnpack(data), nil - case ClaimToChannel | 2<<8: - return voidstar.(*ClaimToChannelKey).PackKey(), nil - case ClaimToChannel | 3<<8: - return voidstar.(*ClaimToChannelValue).PackValue(), nil - case ClaimToChannel | 4<<8: - return ClaimToChannelKeyPackPartialKey(voidstar.(*ClaimToChannelKey)), nil - case ChannelToClaim: - return ChannelToClaimKeyUnpack(data), nil - case ChannelToClaim | 1<<8: - return ChannelToClaimValueUnpack(data), nil - case ChannelToClaim | 2<<8: - return voidstar.(*ChannelToClaimKey).PackKey(), nil - case ChannelToClaim | 3<<8: - return voidstar.(*ChannelToClaimValue).PackValue(), nil - case ChannelToClaim | 4<<8: - return ChannelToClaimKeyPackPartialKey(voidstar.(*ChannelToClaimKey)), nil - - case ClaimShortIdPrefix: - return ClaimShortIDKeyUnpack(data), nil - case ClaimShortIdPrefix | 1<<8: - return ClaimShortIDValueUnpack(data), nil - case ClaimShortIdPrefix | 2<<8: - return voidstar.(*ClaimShortIDKey).PackKey(), nil - case ClaimShortIdPrefix | 3<<8: - return voidstar.(*ClaimShortIDValue).PackValue(), nil - case ClaimShortIdPrefix | 4<<8: - return ClaimShortIDKeyPackPartialKey(voidstar.(*ClaimShortIDKey)), nil - case EffectiveAmount: - return EffectiveAmountKeyUnpack(data), nil - case EffectiveAmount | 1<<8: - return EffectiveAmountValueUnpack(data), nil - case EffectiveAmount | 2<<8: - return voidstar.(*EffectiveAmountKey).PackKey(), nil - case EffectiveAmount | 3<<8: - return voidstar.(*EffectiveAmountValue).PackValue(), nil - case EffectiveAmount | 4<<8: - return EffectiveAmountKeyPackPartialKey(voidstar.(*EffectiveAmountKey)), nil - case ClaimExpiration: - return ClaimExpirationKeyUnpack(data), nil - case ClaimExpiration | 1<<8: - return ClaimExpirationValueUnpack(data), nil - case ClaimExpiration | 2<<8: - return voidstar.(*ClaimExpirationKey).PackKey(), nil - case ClaimExpiration | 3<<8: - return voidstar.(*ClaimExpirationValue).PackValue(), nil - case ClaimExpiration | 4<<8: - return ClaimExpirationKeyPackPartialKey(voidstar.(*ClaimExpirationKey)), nil - - case ClaimTakeover: - return ClaimTakeoverKeyUnpack(data), nil - case ClaimTakeover | 1<<8: - return ClaimTakeoverValueUnpack(data), nil - case ClaimTakeover | 2<<8: - return voidstar.(*ClaimTakeoverKey).PackKey(), nil - case ClaimTakeover | 3<<8: - return voidstar.(*ClaimTakeoverValue).PackValue(), nil - case ClaimTakeover | 4<<8: - return ClaimTakeoverKeyPackPartialKey(voidstar.(*ClaimTakeoverKey)), nil - case PendingActivation: - return PendingActivationKeyUnpack(data), nil - case PendingActivation | 1<<8: - return PendingActivationValueUnpack(data), nil - case PendingActivation | 2<<8: - return voidstar.(*PendingActivationKey).PackKey(), nil - case PendingActivation | 3<<8: - return voidstar.(*PendingActivationValue).PackValue(), nil - case PendingActivation | 4<<8: - return PendingActivationKeyPackPartialKey(voidstar.(*PendingActivationKey)), nil - case ActivatedClaimAndSupport: - return ActivationKeyUnpack(data), nil - case ActivatedClaimAndSupport | 1<<8: - return ActivationValueUnpack(data), nil - case ActivatedClaimAndSupport | 2<<8: - return voidstar.(*ActivationKey).PackKey(), nil - case ActivatedClaimAndSupport | 3<<8: - return voidstar.(*ActivationValue).PackValue(), nil - case ActivatedClaimAndSupport | 4<<8: - return ActivationKeyPackPartialKey(voidstar.(*ActivationKey)), nil - case ActiveAmount: - return ActiveAmountKeyUnpack(data), nil - case ActiveAmount | 1<<8: - return ActiveAmountValueUnpack(data), nil - case ActiveAmount | 2<<8: - return voidstar.(*ActiveAmountKey).PackKey(), nil - case ActiveAmount | 3<<8: - return voidstar.(*ActiveAmountValue).PackValue(), nil - case ActiveAmount | 4<<8: - return ActiveAmountKeyPackPartialKey(voidstar.(*ActiveAmountKey)), nil - - case Repost: - return RepostKeyUnpack(data), nil - case Repost | 1<<8: - return RepostValueUnpack(data), nil - case Repost | 2<<8: - return voidstar.(*RepostKey).PackKey(), nil - case Repost | 3<<8: - return voidstar.(*RepostValue).PackValue(), nil - case Repost | 4<<8: - return RepostKeyPackPartialKey(voidstar.(*RepostKey)), nil - case RepostedClaim: - return RepostedKeyUnpack(data), nil - case RepostedClaim | 1<<8: - return RepostedValueUnpack(data), nil - case RepostedClaim | 2<<8: - return voidstar.(*RepostedKey).PackKey(), nil - case RepostedClaim | 3<<8: - return voidstar.(*RepostedValue).PackValue(), nil - case RepostedClaim | 4<<8: - return RepostedKeyPackPartialKey(voidstar.(*RepostedKey)), nil - - case Undo: - return UndoKeyUnpack(data), nil - case Undo | 1<<8: - return UndoValueUnpack(data), nil - case Undo | 2<<8: - return voidstar.(*UndoKey).PackKey(), nil - case Undo | 3<<8: - return voidstar.(*UndoValue).PackValue(), nil - case Undo | 4<<8: - return UndoKeyPackPartialKey(voidstar.(*UndoKey)), nil - case ClaimDiff: - return TouchedOrDeletedClaimKeyUnpack(data), nil - case ClaimDiff | 1<<8: - return TouchedOrDeletedClaimValueUnpack(data), nil - case ClaimDiff | 2<<8: - return voidstar.(*TouchedOrDeletedClaimKey).PackKey(), nil - case ClaimDiff | 3<<8: - return voidstar.(*TouchedOrDeletedClaimValue).PackValue(), nil - case ClaimDiff | 4<<8: - return TouchedOrDeletedClaimKeyPackPartialKey(voidstar.(*TouchedOrDeletedClaimKey)), nil - - case Tx: - return TxKeyUnpack(data), nil - case Tx | 1<<8: - return TxValueUnpack(data), nil - case Tx | 2<<8: - return voidstar.(*TxKey).PackKey(), nil - case Tx | 3<<8: - return voidstar.(*TxValue).PackValue(), nil - case Tx | 4<<8: - return TxKeyPackPartialKey(voidstar.(*TxKey)), nil - case BlockHash: - return BlockHashKeyUnpack(data), nil - case BlockHash | 1<<8: - return BlockHashValueUnpack(data), nil - case BlockHash | 2<<8: - return voidstar.(*BlockHashKey).PackKey(), nil - case BlockHash | 3<<8: - return voidstar.(*BlockHashValue).PackValue(), nil - case BlockHash | 4<<8: - return BlockHashKeyPackPartialKey(voidstar.(*BlockHashKey)), nil - case Header: - return BlockHeaderKeyUnpack(data), nil - case Header | 1<<8: - return BlockHeaderValueUnpack(data), nil - case Header | 2<<8: - return voidstar.(*BlockHeaderKey).PackKey(), nil - case Header | 3<<8: - return voidstar.(*BlockHeaderValue).PackValue(), nil - case Header | 4<<8: - return BlockHeaderKeyPackPartialKey(voidstar.(*BlockHeaderKey)), nil - case TxNum: - return TxNumKeyUnpack(data), nil - case TxNum | 1<<8: - return TxNumValueUnpack(data), nil - case TxNum | 2<<8: - return voidstar.(*TxNumKey).PackKey(), nil - case TxNum | 3<<8: - return voidstar.(*TxNumValue).PackValue(), nil - case TxNum | 4<<8: - return TxNumKeyPackPartialKey(voidstar.(*TxNumKey)), nil - - case TxCount: - return TxCountKeyUnpack(data), nil - case TxCount | 1<<8: - return TxCountValueUnpack(data), nil - case TxCount | 2<<8: - return voidstar.(*TxCountKey).PackKey(), nil - case TxCount | 3<<8: - return voidstar.(*TxCountValue).PackValue(), nil - case TxCount | 4<<8: - return TxCountKeyPackPartialKey(voidstar.(*TxCountKey)), nil - case TxHash: - return TxHashKeyUnpack(data), nil - case TxHash | 1<<8: - return TxHashValueUnpack(data), nil - case TxHash | 2<<8: - return voidstar.(*TxHashKey).PackKey(), nil - case TxHash | 3<<8: - return voidstar.(*TxHashValue).PackValue(), nil - case TxHash | 4<<8: - return TxHashKeyPackPartialKey(voidstar.(*TxHashKey)), nil - case UTXO: - return UTXOKeyUnpack(data), nil - case UTXO | 1<<8: - return UTXOValueUnpack(data), nil - case UTXO | 2<<8: - return voidstar.(*UTXOKey).PackKey(), nil - case UTXO | 3<<8: - return voidstar.(*UTXOValue).PackValue(), nil - case UTXO | 4<<8: - return UTXOKeyPackPartialKey(voidstar.(*UTXOKey)), nil - case HashXUTXO: - return HashXUTXOKeyUnpack(data), nil - case HashXUTXO | 1<<8: - return HashXUTXOValueUnpack(data), nil - case HashXUTXO | 2<<8: - return voidstar.(*HashXUTXOKey).PackKey(), nil - case HashXUTXO | 3<<8: - return voidstar.(*HashXUTXOValue).PackValue(), nil - case HashXUTXO | 4<<8: - return HashXUTXOKeyPackPartialKey(voidstar.(*HashXUTXOKey)), nil - case HashXHistory: - return HashXHistoryKeyUnpack(data), nil - case HashXHistory | 1<<8: - return HashXHistoryValueUnpack(data), nil - case HashXHistory | 2<<8: - return voidstar.(*HashXHistoryKey).PackKey(), nil - case HashXHistory | 3<<8: - return voidstar.(*HashXHistoryValue).PackValue(), nil - case HashXHistory | 4<<8: - return HashXHistoryKeyPackPartialKey(voidstar.(*HashXHistoryKey)), nil - case DBState: - return DBStateKeyUnpack(data), nil - case DBState | 1<<8: - return DBStateValueUnpack(data), nil - case DBState | 2<<8: - return voidstar.(*DBStateKey).PackKey(), nil - case DBState | 3<<8: - return voidstar.(*DBStateValue).PackValue(), nil - case DBState | 4<<8: - return DBStateKeyPackPartialKey(voidstar.(*DBStateKey)), nil - - case ChannelCount: - return ChannelCountKeyUnpack(data), nil - case ChannelCount | 1<<8: - return ChannelCountValueUnpack(data), nil - case ChannelCount | 2<<8: - return voidstar.(*ChannelCountKey).PackKey(), nil - case ChannelCount | 3<<8: - return voidstar.(*ChannelCountValue).PackValue(), nil - case ChannelCount | 4<<8: - return ChannelCountKeyPackPartialKey(voidstar.(*ChannelCountKey)), nil - case SupportAmount: - return SupportAmountKeyUnpack(data), nil - case SupportAmount | 1<<8: - return SupportAmountValueUnpack(data), nil - case SupportAmount | 2<<8: - return voidstar.(*SupportAmountKey).PackKey(), nil - case SupportAmount | 3<<8: - return voidstar.(*SupportAmountValue).PackValue(), nil - case SupportAmount | 4<<8: - return SupportAmountKeyPackPartialKey(voidstar.(*SupportAmountKey)), nil - case BlockTXs: - return BlockTxsKeyUnpack(data), nil - case BlockTXs | 1<<8: - return BlockTxsValueUnpack(data), nil - case BlockTXs | 2<<8: - return voidstar.(*BlockTxsKey).PackKey(), nil - case BlockTXs | 3<<8: - return voidstar.(*BlockTxsValue).PackValue(), nil - case BlockTXs | 4<<8: - return BlockTxsKeyPackPartialKey(voidstar.(*BlockTxsKey)), nil - - } - return nil, fmt.Errorf("%s function for %v not implemented", functionName, firstByte) -} - -func UnpackGenericKey(key []byte) (interface{}, error) { +type TrendingNotificationKey struct { + Prefix []byte `struct:"[1]byte" json:"prefix"` + Height uint32 `json:"height"` + ClaimHash []byte `struct:"[20]byte" json:"claim_hash"` +} + +type TrendingNotificationValue struct { + PreviousAmount uint64 `json:"previous_amount"` + NewAmount uint64 `json:"new_amount"` +} + +func (kv *TrendingNotificationKey) NumFields() int { + return 2 +} + +func (kv *TrendingNotificationKey) PartialPack(fields int) []byte { + // b'>L20s' + n := len(kv.Prefix) + 4 + 20 + buf := make([]byte, n) + offset := 0 + offset += copy(buf, kv.Prefix[offset:]) + if fields <= 0 { + return buf[:offset] + } + binary.BigEndian.PutUint32(buf[offset:], kv.Height) + offset += 4 + if fields -= 1; fields <= 0 { + return buf[:offset] + } + offset += copy(buf[offset:], kv.ClaimHash[:20]) + return buf[:offset] +} + +func (kv *TrendingNotificationKey) PackKey() []byte { + return kv.PartialPack(kv.NumFields()) +} + +func (kv *TrendingNotificationKey) UnpackKey(buf []byte) { + // b'>L20s' + offset := 0 + kv.Prefix = buf[offset : offset+1] + offset += 1 + kv.Height = binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + kv.ClaimHash = buf[offset : offset+20] + offset += 20 +} + +func (kv *TrendingNotificationValue) PackValue() []byte { + // b'>QQ' + n := 8 + 8 + buf := make([]byte, n) + offset := 0 + binary.BigEndian.PutUint64(buf[offset:], kv.PreviousAmount) + offset += 8 + binary.BigEndian.PutUint64(buf[offset:], kv.NewAmount) + offset += 8 + return buf +} + +func (kv *TrendingNotificationValue) UnpackValue(buf []byte) { + // b'>QQ' + offset := 0 + kv.PreviousAmount = binary.BigEndian.Uint64(buf[offset:]) + offset += 8 + kv.NewAmount = binary.BigEndian.Uint64(buf[offset:]) + offset += 8 +} + +type MempoolTxKey struct { + Prefix []byte `struct:"[1]byte" json:"prefix"` + TxHash []byte `struct:"[32]byte" json:"tx_hash"` +} + +type MempoolTxValue struct { + RawTx []byte `struct-while:"!_eof" json:"raw_tx"` +} + +func (kv *MempoolTxKey) NumFields() int { + return 1 +} + +func (kv *MempoolTxKey) PartialPack(fields int) []byte { + // b'>32s' + n := len(kv.Prefix) + 32 + buf := make([]byte, n) + offset := 0 + offset += copy(buf[offset:], kv.Prefix[:1]) + if fields <= 0 { + return buf[:offset] + } + offset += copy(buf[offset:], kv.TxHash[:32]) + return buf[:offset] +} + +func (kv *MempoolTxKey) PackKey() []byte { + return kv.PartialPack(kv.NumFields()) +} + +func (kv *MempoolTxKey) UnpackKey(buf []byte) { + // b'>32s' + offset := 0 + kv.Prefix = buf[offset : offset+1] + offset += 1 + kv.TxHash = buf[offset : offset+32] + offset += 32 +} + +func (kv *MempoolTxValue) PackValue() []byte { + // variable length bytes + n := len(kv.RawTx) + buf := make([]byte, n) + offset := 0 + offset += copy(buf, kv.RawTx) + return buf +} + +func (kv *MempoolTxValue) UnpackValue(buf []byte) { + // variable length bytes + offset := 0 + kv.RawTx = buf[:] + offset += len(buf) +} + +type TouchedHashXKey struct { + Prefix []byte `struct:"[1]byte" json:"prefix"` + Height uint32 `json:"height"` +} + +type TouchedHashXValue struct { + TouchedHashXs [][]byte `struct:"[][11]byte" struct-while:"!_eof" json:"touched_hashXs"` +} + +func (kv *TouchedHashXKey) NumFields() int { + return 1 +} + +func (kv *TouchedHashXKey) PartialPack(fields int) []byte { + // b'>L' + n := len(kv.Prefix) + 4 + buf := make([]byte, n) + offset := 0 + offset += copy(buf[offset:], kv.Prefix[:1]) + if fields <= 0 { + return buf[:offset] + } + binary.BigEndian.PutUint32(buf[offset:], kv.Height) + offset += 4 + return buf[:offset] +} + +func (kv *TouchedHashXKey) PackKey() []byte { + return kv.PartialPack(kv.NumFields()) +} + +func (kv *TouchedHashXKey) UnpackKey(buf []byte) { + // b'>L' + offset := 0 + kv.Prefix = buf[offset : offset+1] + offset += 1 + kv.Height = binary.BigEndian.Uint32(buf[offset:]) + offset += 4 +} + +func (kv *TouchedHashXValue) PackValue() []byte { + // variable length bytes + n := len(kv.TouchedHashXs) * 11 + buf := make([]byte, n) + offset := 0 + for i := range kv.TouchedHashXs { + offset += copy(buf[offset:], kv.TouchedHashXs[i][:11]) + } + return buf +} + +func (kv *TouchedHashXValue) UnpackValue(buf []byte) { + // variable length bytes + n := len(buf) + kv.TouchedHashXs = make([][]byte, n/11) + for i, offset := 0, 0; offset+11 <= n; i, offset = i+1, offset+11 { + kv.TouchedHashXs[i] = buf[offset : offset+11] + } +} + +type HashXStatusKey struct { + Prefix []byte `struct:"[1]byte" json:"prefix"` + HashX []byte `struct:"[20]byte" json:"hashX"` +} + +type HashXStatusValue struct { + Status []byte `struct:"[32]byte" json:"status"` +} + +func (kv *HashXStatusKey) NumFields() int { + return 1 +} + +func (kv *HashXStatusKey) PartialPack(fields int) []byte { + // b'>20s' + n := len(kv.Prefix) + 20 + buf := make([]byte, n) + offset := 0 + offset += copy(buf[offset:], kv.Prefix[:1]) + if fields <= 0 { + return buf[:offset] + } + offset += copy(buf[offset:], kv.HashX[:20]) + return buf[:offset] +} + +func (kv *HashXStatusKey) PackKey() []byte { + return kv.PartialPack(kv.NumFields()) +} + +func (kv *HashXStatusKey) UnpackKey(buf []byte) { + // b'>20s' + offset := 0 + kv.Prefix = buf[offset : offset+1] + offset += 1 + kv.HashX = buf[offset : offset+20] + offset += 20 +} + +func (kv *HashXStatusValue) PackValue() []byte { + // b'32s' + n := 32 + buf := make([]byte, n) + offset := 0 + offset += copy(buf[offset:], kv.Status[:32]) + return buf +} + +func (kv *HashXStatusValue) UnpackValue(buf []byte) { + // b'32s' + offset := 0 + kv.Status = buf[offset : offset+32] + offset += 32 +} + +type HashXMempoolStatusKey = HashXStatusKey +type HashXMempoolStatusValue = HashXStatusValue + +func UnpackGenericKey(key []byte) (BaseKey, error) { if len(key) == 0 { return nil, fmt.Errorf("key length zero") } - return generic(key, key[0], 0, "unpack key") + // Look up the prefix metadata, and use the registered function(s) + // to create and unpack key of appropriate type. + t, ok := prefixRegistry[key[0]] + if !ok { + return nil, fmt.Errorf("unpack key function for %v not implemented", key[0]) + } + if t.newKeyUnpack != nil { + // Type provides KeyUnpack() function. + return t.newKeyUnpack(key).(BaseKey), nil + } + if t.newKey != nil { + // Type provides a new function. + k := t.newKey() + unpacker, ok := k.(KeyUnpacker) + if ok { + unpacker.UnpackKey(key) + return unpacker.(BaseKey), nil + } + } + return nil, fmt.Errorf("unpack key function for %v not implemented", key[0]) } -func UnpackGenericValue(key, value []byte) (interface{}, error) { +func UnpackGenericValue(key, value []byte) (BaseValue, error) { if len(key) == 0 { return nil, fmt.Errorf("key length zero") } if len(value) == 0 { return nil, fmt.Errorf("value length zero") } - return generic(value, key[0], 1, "unpack value") + // Look up the prefix metadata, and use the registered function(s) + // to create and unpack value of appropriate type. + t, ok := prefixRegistry[key[0]] + if !ok { + return nil, fmt.Errorf("unpack value function for %v not implemented", key[0]) + } + if t.newValueUnpack != nil { + // Type provides ValueUnpack() function. + return t.newValueUnpack(value).(BaseValue), nil + } + if t.newValue != nil { + // Type provides a new function. + k := t.newValue() + unpacker, ok := k.(ValueUnpacker) + if ok { + unpacker.UnpackValue(value) + return unpacker.(BaseValue), nil + } + } + return nil, fmt.Errorf("unpack key function for %v not implemented", key[0]) } -func PackPartialGenericKey(prefix byte, key interface{}, fields int) ([]byte, error) { +func PackPartialGenericKey(key BaseKey, fields int) ([]byte, error) { if key == nil { - return nil, fmt.Errorf("key length zero") + return nil, fmt.Errorf("key is nil") } - genericRes, err := generic(key, prefix, 4, "pack partial key") - res := genericRes.(func(int) []byte)(fields) - return res, err + return key.PartialPack(fields), nil } -func PackGenericKey(prefix byte, key interface{}) ([]byte, error) { +func PackGenericKey(key BaseKey) ([]byte, error) { if key == nil { - return nil, fmt.Errorf("key length zero") + return nil, fmt.Errorf("key is nil") } - genericRes, err := generic(key, prefix, 2, "pack key") - return genericRes.([]byte), err + return key.PackKey(), nil } -func PackGenericValue(prefix byte, value interface{}) ([]byte, error) { +func PackGenericValue(value BaseValue) ([]byte, error) { if value == nil { - return nil, fmt.Errorf("value length zero") - } - genericRes, err := generic(value, prefix, 3, "pack value") - return genericRes.([]byte), err + return nil, fmt.Errorf("value is nil") + } + return value.PackValue(), nil +} + +// Metadata associated with each prefix/table. Currently used to +// implement generic unpacking. + +type prefixMeta struct { + newKey func() interface{} + newValue func() interface{} + newKeyUnpack func([]byte) interface{} + newValueUnpack func([]byte) interface{} + API *SerializationAPI +} + +var prefixRegistry = map[byte]prefixMeta{ + ClaimToSupport: { + newKey: func() interface{} { + return &ClaimToSupportKey{Prefix: []byte{ClaimToSupport}} + }, + newValue: func() interface{} { + return &ClaimToSupportValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ClaimToSupportKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ClaimToSupportValueUnpack(buf) + }, + }, + SupportToClaim: { + newKey: func() interface{} { + return &SupportToClaimKey{Prefix: []byte{SupportToClaim}} + }, + newValue: func() interface{} { + return &SupportToClaimValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return SupportToClaimKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return SupportToClaimValueUnpack(buf) + }, + }, + + ClaimToTXO: { + newKey: func() interface{} { + return &ClaimToTXOKey{Prefix: []byte{ClaimToTXO}} + }, + newValue: func() interface{} { + return &ClaimToTXOValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ClaimToTXOKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ClaimToTXOValueUnpack(buf) + }, + }, + TXOToClaim: { + newKey: func() interface{} { + return &TXOToClaimKey{Prefix: []byte{TXOToClaim}} + }, + newValue: func() interface{} { + return &TXOToClaimValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return TXOToClaimKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return TXOToClaimValueUnpack(buf) + }, + }, + + ClaimToChannel: { + newKey: func() interface{} { + return &ClaimToChannelKey{Prefix: []byte{ClaimToChannel}} + }, + newValue: func() interface{} { + return &ClaimToChannelValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ClaimToChannelKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ClaimToChannelValueUnpack(buf) + }, + }, + ChannelToClaim: { + newKey: func() interface{} { + return &ChannelToClaimKey{Prefix: []byte{ChannelToClaim}} + }, + newValue: func() interface{} { + return &ChannelToClaimValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ChannelToClaimKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ChannelToClaimValueUnpack(buf) + }, + }, + + ClaimShortIdPrefix: { + newKey: func() interface{} { + return &ClaimShortIDKey{Prefix: []byte{ClaimShortIdPrefix}} + }, + newValue: func() interface{} { + return &ClaimShortIDValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ClaimShortIDKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ClaimShortIDValueUnpack(buf) + }, + }, + EffectiveAmount: { + newKey: func() interface{} { + return &EffectiveAmountKey{Prefix: []byte{EffectiveAmount}} + }, + newValue: func() interface{} { + return &EffectiveAmountValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return EffectiveAmountKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return EffectiveAmountValueUnpack(buf) + }, + }, + ClaimExpiration: { + newKey: func() interface{} { + return &ClaimExpirationKey{Prefix: []byte{ClaimExpiration}} + }, + newValue: func() interface{} { + return &ClaimExpirationValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ClaimExpirationKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ClaimExpirationValueUnpack(buf) + }, + }, + + ClaimTakeover: { + newKey: func() interface{} { + return &ClaimTakeoverKey{Prefix: []byte{ClaimTakeover}} + }, + newValue: func() interface{} { + return &ClaimTakeoverValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ClaimTakeoverKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ClaimTakeoverValueUnpack(buf) + }, + }, + PendingActivation: { + newKey: func() interface{} { + return &PendingActivationKey{Prefix: []byte{PendingActivation}} + }, + newValue: func() interface{} { + return &PendingActivationValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return PendingActivationKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return PendingActivationValueUnpack(buf) + }, + }, + ActivatedClaimAndSupport: { + newKey: func() interface{} { + return &ActivationKey{Prefix: []byte{ActivatedClaimAndSupport}} + }, + newValue: func() interface{} { + return &ActivationValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ActivationKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ActivationValueUnpack(buf) + }, + }, + ActiveAmount: { + newKey: func() interface{} { + return &ActiveAmountKey{Prefix: []byte{ActiveAmount}} + }, + newValue: func() interface{} { + return &ActiveAmountValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ActiveAmountKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ActiveAmountValueUnpack(buf) + }, + }, + + Repost: { + newKey: func() interface{} { + return &RepostKey{Prefix: []byte{Repost}} + }, + newValue: func() interface{} { + return &RepostValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return RepostKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return RepostValueUnpack(buf) + }, + }, + RepostedClaim: { + newKey: func() interface{} { + return &RepostedKey{Prefix: []byte{RepostedClaim}} + }, + newValue: func() interface{} { + return &RepostedValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return RepostedKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return RepostedValueUnpack(buf) + }, + }, + + Undo: { + newKey: func() interface{} { + return &UndoKey{Prefix: []byte{Undo}} + }, + newValue: func() interface{} { + return &UndoValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return UndoKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return UndoValueUnpack(buf) + }, + }, + ClaimDiff: { + newKey: func() interface{} { + return &TouchedOrDeletedClaimKey{Prefix: []byte{ClaimDiff}} + }, + newValue: func() interface{} { + return &TouchedOrDeletedClaimValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return TouchedOrDeletedClaimKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return TouchedOrDeletedClaimValueUnpack(buf) + }, + }, + + Tx: { + newKey: func() interface{} { + return &TxKey{Prefix: []byte{Tx}} + }, + newValue: func() interface{} { + return &TxValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return TxKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return TxValueUnpack(buf) + }, + }, + BlockHash: { + newKey: func() interface{} { + return &BlockHashKey{Prefix: []byte{BlockHash}} + }, + newValue: func() interface{} { + return &BlockHashValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return BlockHashKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return BlockHashValueUnpack(buf) + }, + }, + Header: { + newKey: func() interface{} { + return &BlockHeaderKey{Prefix: []byte{Header}} + }, + newValue: func() interface{} { + return &BlockHeaderValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return BlockHeaderKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return BlockHeaderValueUnpack(buf) + }, + }, + TxNum: { + newKey: func() interface{} { + return &TxNumKey{Prefix: []byte{TxNum}} + }, + newValue: func() interface{} { + return &TxNumValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return TxNumKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return TxNumValueUnpack(buf) + }, + }, + TxCount: { + newKey: func() interface{} { + return &TxCountKey{Prefix: []byte{TxCount}} + }, + newValue: func() interface{} { + return &TxCountValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return TxCountKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return TxCountValueUnpack(buf) + }, + }, + TxHash: { + newKey: func() interface{} { + return &TxHashKey{Prefix: []byte{TxHash}} + }, + newValue: func() interface{} { + return &TxHashValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return TxHashKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return TxHashValueUnpack(buf) + }, + }, + UTXO: { + newKey: func() interface{} { + return &UTXOKey{Prefix: []byte{UTXO}} + }, + newValue: func() interface{} { + return &UTXOValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return UTXOKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return UTXOValueUnpack(buf) + }, + }, + HashXUTXO: { + newKey: func() interface{} { + return &HashXUTXOKey{Prefix: []byte{HashXUTXO}} + }, + newValue: func() interface{} { + return &HashXUTXOValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return HashXUTXOKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return HashXUTXOValueUnpack(buf) + }, + }, + HashXHistory: { + newKey: func() interface{} { + return &HashXHistoryKey{Prefix: []byte{HashXHistory}} + }, + newValue: func() interface{} { + return &HashXHistoryValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return HashXHistoryKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return HashXHistoryValueUnpack(buf) + }, + }, + DBState: { + newKey: func() interface{} { + return &DBStateKey{Prefix: []byte{DBState}} + }, + newValue: func() interface{} { + return &DBStateValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return DBStateKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return DBStateValueUnpack(buf) + }, + }, + ChannelCount: { + newKey: func() interface{} { + return &ChannelCountKey{Prefix: []byte{ChannelCount}} + }, + newValue: func() interface{} { + return &ChannelCountValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return ChannelCountKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return ChannelCountValueUnpack(buf) + }, + }, + SupportAmount: { + newKey: func() interface{} { + return &SupportAmountKey{Prefix: []byte{SupportAmount}} + }, + newValue: func() interface{} { + return &SupportAmountValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return SupportAmountKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return SupportAmountValueUnpack(buf) + }, + }, + BlockTXs: { + newKey: func() interface{} { + return &BlockTxsKey{Prefix: []byte{BlockTXs}} + }, + newValue: func() interface{} { + return &BlockTxsValue{} + }, + newKeyUnpack: func(buf []byte) interface{} { + return BlockTxsKeyUnpack(buf) + }, + newValueUnpack: func(buf []byte) interface{} { + return BlockTxsValueUnpack(buf) + }, + }, + + TrendingNotifications: { + newKey: func() interface{} { + return &TrendingNotificationKey{Prefix: []byte{TrendingNotifications}} + }, + newValue: func() interface{} { + return &TrendingNotificationValue{} + }, + }, + MempoolTx: { + newKey: func() interface{} { + return &MempoolTxKey{Prefix: []byte{MempoolTx}} + }, + newValue: func() interface{} { + return &MempoolTxValue{} + }, + }, + TouchedHashX: { + newKey: func() interface{} { + return &TouchedHashXKey{Prefix: []byte{TouchedHashX}} + }, + newValue: func() interface{} { + return &TouchedHashXValue{} + }, + }, + HashXStatus: { + newKey: func() interface{} { + return &HashXStatusKey{Prefix: []byte{HashXStatus}} + }, + newValue: func() interface{} { + return &HashXStatusValue{} + }, + }, + HashXMempoolStatus: { + newKey: func() interface{} { + return &HashXMempoolStatusKey{Prefix: []byte{HashXMempoolStatus}} + }, + newValue: func() interface{} { + return &HashXMempoolStatusValue{} + }, + }, } diff --git a/db/prefixes/prefixes_test.go b/db/prefixes/prefixes_test.go index 02672e81..08828495 100644 --- a/db/prefixes/prefixes_test.go +++ b/db/prefixes/prefixes_test.go @@ -2,11 +2,15 @@ package prefixes_test import ( "bytes" + "crypto/rand" "encoding/csv" "encoding/hex" "fmt" "log" + "math" + "math/big" "os" + "sort" "testing" dbpkg "github.com/lbryio/herald.go/db" @@ -14,6 +18,14 @@ import ( "github.com/linxGnu/grocksdb" ) +func TestPrefixRegistry(t *testing.T) { + for _, prefix := range prefixes.GetPrefixes() { + if prefixes.GetSerializationAPI(prefix) == nil { + t.Errorf("prefix %c not registered", prefix) + } + } +} + func testInit(filePath string) (*grocksdb.DB, [][]string, func(), *grocksdb.ColumnFamilyHandle) { log.Println(filePath) file, err := os.Open(filePath) @@ -28,12 +40,25 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func(), *grocksdb.Colu columnFamily := records[0][0] records = records[1:] + cleanupFiles := func() { + err = os.RemoveAll("./tmp") + if err != nil { + log.Println(err) + } + } + // wOpts := grocksdb.NewDefaultWriteOptions() opts := grocksdb.NewDefaultOptions() opts.SetCreateIfMissing(true) db, err := grocksdb.OpenDb(opts, "tmp") if err != nil { log.Println(err) + // Garbage might have been left behind by a prior crash. + cleanupFiles() + db, err = grocksdb.OpenDb(opts, "tmp") + if err != nil { + log.Println(err) + } } handle, err := db.CreateColumnFamily(opts, columnFamily) if err != nil { @@ -41,16 +66,30 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func(), *grocksdb.Colu } toDefer := func() { db.Close() - err = os.RemoveAll("./tmp") - if err != nil { - log.Println(err) - } + cleanupFiles() } return db, records, toDefer, handle } func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T) { + return func(t *testing.T) { + APIs := []*prefixes.SerializationAPI{ + prefixes.GetSerializationAPI([]byte{prefix}), + // Verify combinations of production vs. "restruct" implementations of + // serialization API (e.g production Pack() with "restruct" Unpack()). + prefixes.RegressionAPI_1, + prefixes.RegressionAPI_2, + prefixes.RegressionAPI_3, + } + for _, api := range APIs { + opts := dbpkg.NewIterateOptions().WithPrefix([]byte{prefix}).WithSerializer(api).WithIncludeValue(true) + testGenericOptions(opts, filePath, prefix, numPartials)(t) + } + } +} + +func testGenericOptions(options *dbpkg.IterOptions, filePath string, prefix byte, numPartials int) func(*testing.T) { return func(t *testing.T) { wOpts := grocksdb.NewDefaultWriteOptions() @@ -69,26 +108,34 @@ func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T) db.PutCF(wOpts, handle, key, val) } // test prefix - options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefix}).WithIncludeValue(true) options = options.WithCfHandle(handle) ch := dbpkg.IterCF(db, options) var i = 0 for kv := range ch { // log.Println(kv.Key) - gotKey, err := prefixes.PackGenericKey(prefix, kv.Key) + gotKey, err := options.Serializer.PackKey(kv.Key) if err != nil { log.Println(err) } + if numPartials != kv.Key.NumFields() { + t.Errorf("key reports %v fields but %v expected", kv.Key.NumFields(), numPartials) + } for j := 1; j <= numPartials; j++ { - keyPartial, _ := prefixes.PackPartialGenericKey(prefix, kv.Key, j) + keyPartial, _ := options.Serializer.PackPartialKey(kv.Key, j) // Check pack partial for sanity - if !bytes.HasPrefix(gotKey, keyPartial) { - t.Errorf("%+v should be prefix of %+v\n", keyPartial, gotKey) + if j < numPartials { + if !bytes.HasPrefix(gotKey, keyPartial) || (len(keyPartial) >= len(gotKey)) { + t.Errorf("%+v should be prefix of %+v\n", keyPartial, gotKey) + } + } else { + if !bytes.Equal(gotKey, keyPartial) { + t.Errorf("%+v should be equal to %+v\n", keyPartial, gotKey) + } } } - got, err := prefixes.PackGenericValue(prefix, kv.Value) + got, err := options.Serializer.PackValue(kv.Value) if err != nil { log.Println(err) } @@ -101,7 +148,7 @@ func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T) log.Println(err) } if !bytes.Equal(gotKey, wantKey) { - t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want) + t.Errorf("gotKey: %+v, wantKey: %+v\n", gotKey, wantKey) } if !bytes.Equal(got, want) { t.Errorf("got: %+v, want: %+v\n", got, want) @@ -123,12 +170,12 @@ func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T) if err != nil { log.Println(err) } - options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true) + options2 := dbpkg.NewIterateOptions().WithSerializer(options.Serializer).WithStart(start).WithStop(stop).WithIncludeValue(true) options2 = options2.WithCfHandle(handle) ch2 := dbpkg.IterCF(db, options2) i = 0 for kv := range ch2 { - got, err := prefixes.PackGenericValue(prefix, kv.Value) + got, err := options2.Serializer.PackValue(kv.Value) if err != nil { log.Println(err) } @@ -216,7 +263,7 @@ func TestTXOToClaim(t *testing.T) { func TestClaimShortID(t *testing.T) { filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimShortIdPrefix) - testGeneric(filePath, prefixes.ClaimShortIdPrefix, 3)(t) + testGeneric(filePath, prefixes.ClaimShortIdPrefix, 4)(t) } func TestClaimToChannel(t *testing.T) { @@ -286,7 +333,7 @@ func TestClaimDiff(t *testing.T) { func TestUTXO(t *testing.T) { filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.UTXO) - testGeneric(filePath, prefixes.UTXO, 1)(t) + testGeneric(filePath, prefixes.UTXO, 3)(t) } func TestHashXUTXO(t *testing.T) { @@ -330,3 +377,175 @@ func TestUTXOKey_String(t *testing.T) { }) } } + +func TestTrendingNotifications(t *testing.T) { + prefix := byte(prefixes.TrendingNotifications) + filePath := fmt.Sprintf("../../testdata/%c.csv", prefix) + //synthesizeTestData([]byte{prefix}, filePath, []int{4, 20}, []int{8, 8}, [][3]int{}) + key := &prefixes.TrendingNotificationKey{} + testGeneric(filePath, prefix, key.NumFields())(t) +} + +func TestMempoolTx(t *testing.T) { + prefix := byte(prefixes.MempoolTx) + filePath := fmt.Sprintf("../../testdata/%c.csv", prefix) + //synthesizeTestData([]byte{prefix}, filePath, []int{32}, []int{}, [][3]int{{20, 100, 1}}) + key := &prefixes.MempoolTxKey{} + testGeneric(filePath, prefix, key.NumFields())(t) +} + +func TestTouchedHashX(t *testing.T) { + prefix := byte(prefixes.TouchedHashX) + filePath := fmt.Sprintf("../../testdata/%c.csv", prefix) + //synthesizeTestData([]byte{prefix}, filePath, []int{4}, []int{}, [][3]int{{1, 5, 11}}) + key := &prefixes.TouchedHashXKey{} + testGeneric(filePath, prefix, key.NumFields())(t) +} + +func TestHashXStatus(t *testing.T) { + prefix := byte(prefixes.HashXStatus) + filePath := fmt.Sprintf("../../testdata/%c.csv", prefix) + //synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{32}, [][3]int{}) + key := &prefixes.HashXStatusKey{} + testGeneric(filePath, prefix, key.NumFields())(t) +} + +func TestHashXMempoolStatus(t *testing.T) { + prefix := byte(prefixes.HashXMempoolStatus) + filePath := fmt.Sprintf("../../testdata/%c.csv", prefix) + //synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{32}, [][3]int{}) + key := &prefixes.HashXMempoolStatusKey{} + testGeneric(filePath, prefix, key.NumFields())(t) +} + +func synthesizeTestData(prefix []byte, filePath string, keyFixed, valFixed []int, valVariable [][3]int) { + file, err := os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644) + if err != nil { + panic(err) + } + defer file.Close() + + records := make([][2][]byte, 0, 20) + for r := 0; r < 20; r++ { + key := make([]byte, 0, 1000) + key = append(key, prefix...) + val := make([]byte, 0, 1000) + // Handle fixed columns of key. + for _, width := range keyFixed { + v := make([]byte, width) + rand.Read(v) + key = append(key, v...) + } + // Handle fixed columns of value. + for _, width := range valFixed { + v := make([]byte, width) + rand.Read(v) + val = append(val, v...) + } + // Handle variable length array in value. Each element is "chunk" size. + for _, w := range valVariable { + low, high, chunk := w[0], w[1], w[2] + n, _ := rand.Int(rand.Reader, big.NewInt(int64(high-low))) + v := make([]byte, chunk*(low+int(n.Int64()))) + rand.Read(v) + val = append(val, v...) + } + records = append(records, [2][]byte{key, val}) + } + + sort.Slice(records, func(i, j int) bool { return bytes.Compare(records[i][0], records[j][0]) == -1 }) + + wr := csv.NewWriter(file) + wr.Write([]string{string(prefix), ""}) // column headers + for _, rec := range records { + encoded := []string{hex.EncodeToString(rec[0]), hex.EncodeToString(rec[1])} + err := wr.Write(encoded) + if err != nil { + panic(err) + } + } + wr.Flush() +} + +// Fuzz tests for various Key and Value types (EXPERIMENTAL) + +func FuzzTouchedHashXKey(f *testing.F) { + kvs := []prefixes.TouchedHashXKey{ + { + Prefix: []byte{prefixes.TouchedHashX}, + Height: 0, + }, + { + Prefix: []byte{prefixes.TouchedHashX}, + Height: 1, + }, + { + Prefix: []byte{prefixes.TouchedHashX}, + Height: math.MaxUint32, + }, + } + + for _, kv := range kvs { + seed := make([]byte, 0, 200) + seed = append(seed, kv.PackKey()...) + f.Add(seed) + } + + f.Fuzz(func(t *testing.T, in []byte) { + t.Logf("testing: %+v", in) + out := make([]byte, 0, 200) + var kv prefixes.TouchedHashXKey + kv.UnpackKey(in) + out = append(out, kv.PackKey()...) + if len(in) >= 5 { + if !bytes.HasPrefix(in, out) { + t.Fatalf("%v: not equal after round trip: %v", in, out) + } + } + }) +} + +func FuzzTouchedHashXValue(f *testing.F) { + kvs := []prefixes.TouchedHashXValue{ + { + TouchedHashXs: [][]byte{}, + }, + { + TouchedHashXs: [][]byte{ + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + }, + { + TouchedHashXs: [][]byte{ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + }, + { + TouchedHashXs: [][]byte{ + {0xff, 0xff, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + {0, 1, 0xff, 0xff, 4, 5, 6, 7, 8, 9, 10}, + {0, 1, 2, 3, 0xff, 0xff, 6, 7, 8, 9, 10}, + }, + }, + } + + for _, kv := range kvs { + seed := make([]byte, 0, 200) + seed = append(seed, kv.PackValue()...) + f.Add(seed) + } + + f.Fuzz(func(t *testing.T, in []byte) { + t.Logf("testing: %+v", in) + out := make([]byte, 0, 200) + var kv prefixes.TouchedHashXValue + kv.UnpackValue(in) + out = append(out, kv.PackValue()...) + if len(in) >= 5 { + if !bytes.HasPrefix(in, out) { + t.Fatalf("%v: not equal after round trip: %v", in, out) + } + } + }) +} diff --git a/go.mod b/go.mod index fa675132..167ace03 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,10 @@ require ( gopkg.in/karalabe/cookiejar.v1 v1.0.0-20141109175019-e1490cae028c ) -require golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect +require ( + github.com/go-restruct/restruct v1.2.0-alpha // indirect + golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect +) require ( github.com/beorn7/perks v1.0.1 // indirect diff --git a/go.sum b/go.sum index ef0656a3..61d9c56d 100644 --- a/go.sum +++ b/go.sum @@ -179,6 +179,8 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc= +github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -362,6 +364,8 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linxGnu/grocksdb v1.6.42 h1:nJLoXFuzwBwQQQrXTUgRGRz1QRm7y8pR6CNV/gwrbqs= github.com/linxGnu/grocksdb v1.6.42/go.mod h1:JcMMDBFaDNhRXFYcYXmgQwb/RarSld1PulTI7UzE+w0= +github.com/linxGnu/grocksdb v1.7.0 h1:UyFDykX0CUfxDN10cqlFho/rwt9K6KoDaLXL9Ej5z9g= +github.com/linxGnu/grocksdb v1.7.0/go.mod h1:JcMMDBFaDNhRXFYcYXmgQwb/RarSld1PulTI7UzE+w0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5/go.mod h1:H0aPCWffGOaDcjkw1iB7W9DVLp6GXmfcJY/7YZCWPA4= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= diff --git a/testdata/c.csv b/testdata/c.csv new file mode 100644 index 00000000..58dc37eb --- /dev/null +++ b/testdata/c.csv @@ -0,0 +1,21 @@ +c, +631457da9061c90a8fd211994ba8e3701a76c43fa66937673f,e41d47b10d8b768793c75e4b2bb35784 +632de81b0213a1b6e390e4c1859dba94b2b0e9a74e360a2b9e,1326b6b9eb9ad8ecc591aa54c365dafa +63325af388b77d3ed3df8a5b1483b83fb0b5153ad51de15ac0,b3985bb638840f1c0c7aadaa32848fc1 +6339c7574004d908068b73e2f898a241dceaa19d2e4f5fd2c6,b55b277d1598b93cad3cbcdbdc796c04 +6363d895c26d023913ae5c84680d8acbf0c4b2dd6fa1842a2c,9c33af364b69814cc868fadc48547ef9 +637d45cd2b29ba27353f889660780d2c5edd0d490058c06dd1,6597a63fa0de8aaf717e031029830cc1 +637e1d5b825273eaf7457f40d97fc18ab2f99e25552e14e185,2c9e0e7145297d8eaee06f36567a529c +638151f59e498873ef82ef0271186f0b60b9ceeaa10aec120e,9b64b4276a1059e4ecf19560b566d503 +6384e22f9b0fc6f63c9a221786091ecf02b0df2925895b8132,e12f4a8a130f1419ff4ae3a9bb8a31ee +63a92ad4fe7abbf72db94f49092764329c4d9b5cf30115eb2a,152300368cecfaf42debe1e7cccba9cc +63ab7cc5574087640b78b46e9548cfbefabc581e479883eb70,1f8e2f0abf79e263c3bd3fa29085f454 +63b7cceb793d1e8a3729c9f9bc7a580b7d3d1b42a3c13c5e99,fb5b20d556d3362da5e4e880b8feec7a +63b9b943c661dfad86644fdf34d956273996a261692227d6a9,8b4aeb0ad6f6275025df1fb2a173c5a7 +63bba32a7015a47db0da6381c30f95200858637fb82cf367ee,83841279d3c9a345e87f02ba431479fe +63beea81eeec6fadf422df5800d013278ccd351dc77cabf363,d3ea0bcc5e7a5453855d96220fc02e97 +63bf6872e4541eaa7ffe0659e11eff43520a6571a634576c56,d01ae01321c2617c17767446f624a348 +63cce2b1651ed5575052abbb75747d059b5a54e09c7a330b56,46a4dbf4d155da400b30038a0ccd3bdc +63d5165b6b9c42249409c8e616fc17481bd296f69d0b4564f2,a18bff62b8cbe7aea8a46aa2e83432a3 +63e616d85d1425ea0686aa58438ff416db5176da015cef2eb3,8c1e763b02f9f3f1b4c6f0e5dd18cb19 +63f5476e70301ba6fdd6d0317b2c03d678e2623ee66fd4110a,f04df6c132e1d2d14feeb17ca34b65f3 diff --git a/testdata/d.csv b/testdata/d.csv new file mode 100644 index 00000000..1ebf17f2 --- /dev/null +++ b/testdata/d.csv @@ -0,0 +1,21 @@ +d, +64188d8e8e56c823919ba5eea5b60d0e2a27b313b314a83cd79ec882e042ba47d1,27f60d5852ab8e9538b5c35891ebd915c14b02a679607b01ae33e040a816685fba36f7e9918136dba9999c13cc +64254b85d06da94e2c7723699a684dfcf38664bcadb4e6aa35541cd5b2975bbcb9,fbc9d8e21a2192182aba69c73a6e3f7f56ba2fac8a634ef1f0b16625a12db3757c27dbddd74c3e598005a7c529f13410d4ff3a02456164e973040dec661f78106441 +642984b5855a4a1894d881f82d3703f184e6c1b380daa5d09147c98c1b71bee9ea,3ff17d6d132128a85f8262399a6ee09401672ec20e668ff70fe63024753d8b9ecd915720e2fc4b52d857034b066c2e316ab2d2d3c77d20649bfdd1e86d7f0ffa1b44302989e1f103470aebbaf4 +64299c1c1b5dabf41bd83f3c91efce9eb5c0acd635dc6e669b42c3bf27cc4dc418,144ab7485a18bdfc8ed9543e1d5783941d602f9b012441da55f028b37d679f046173b4ab1c10e424 +6435d0497f800004c1a23d3471242dbcf8012eb45792621e2185d675b1c3a21021,a03bf241d35ac46c51aad53c83b2f445fc8e97654e843b0d83b0ba85b0d8130c9e7c7b13bb4d6157f5f73df8c80e4f4851d29c0501e8fcba518d3dbd80c0e87e94ec1bc781e0f6092fd0d4749c418afd +644515ee2686c2e0410a965fae5a8ff3e707bab2ba3969d9557ab529aa219da650,662ce7d0284408744733f63ea84cb9db34413f261913c3fce59933a196458b3a1e9b52a636af1fb778a0edaedae51be1aedb09b9d605e1e7ef8c0da3e8eba9b99d723a9c1635473554b0bf45db5fb790a110f0d3f89cbe +6458f48aa991fc0a2c6f79f138fcc758646b025fce9d02525ee077dbbb56c64043,a48b7d67a08ebf8a9298c7b6576a1daae2e0b8fcc35fc95bd7097c54fed39df5bab602e389e1378523688109525e8be4b23d +645b00b38d41e9e74d7af8b88c6840deacd9af74a25de3f352440b0087a111af2e,0d6b55f6eae73445f41335666b345be2afc15989331f8478efd86f7c420d7f71cd6a23723a25c1da963dce93e5993a74529a4cddced9ca3a6ede21b597ba2c26d2 +645c00301ef63070ab0912e3378b2d59d19953a74143b584d686e59638ede0250c,16fa8a614ee7bc188c92772bd8f41311e518ea04a4063eae2e3f0ac6c86fcb34a821afe711c4cabe6a6b4245dec139 +645c241e29e0a3e406f4a908faa7d39df87c91190fb3e073b006d22f6695735873,84b2dd6db4cdd508d31f4fa0ca561f90d0cdffdb958cf8a5d297260d +6468c52a1fbf769451bcd1c99021ee0b309ae67bb5f03e83ab50674bb959e5845c,ae39e4716dc15ece68c57794720d787193b28632e13dea5050e95f1f251674370ef3aa64 +646acbb4b11cfa5ead5a2c38515ace8f4fc87d39c3cf8866401900ee822e8ce238,c31db7d0ce2537e1fe0c6fc9cd4e84d5c9f73df537425f1035938fa49fb0f9334f86be59b8 +6478d257a7fd6779ad36b351e88cc9f34e55cf8d200bc3f095505168d31dafc21c,f8e3051555b19ecc5af92ba46f7db73190d9e1e0ecf84c259cad97371480ea3c7c5036157fad5c1d0d008bf1ab4ae558b78f4426a9303cc53401b9085b5c23966f48fbb1d76809ea3376e3d08a6d10b048d06da6a5ff32 +64b099e855102c54d054907e42637536b93f2b5c8482795a4d89bd420dff876fe3,19bfabe9d9633c1741bf051db2ba9b0d0b265a66ac9869ce +64b567cd2cb2d61062b66aeb2364f7bf3fc706f67ecf34674fdfc0b793587c6e3b,ccfc02a82b2e0f925a53aff5c040e610af1eee11f2aba92a9ce57e975c1937fb7888e9da98712bc5be906f0ed4946077f4ecb7d5c2fd167d892a67 +64bfd045aaaeded94be7a756ca44bf3c3b1825c32ce8df02023ba5349aab3cae4e,2a890e23f7282e5d38f5575e83d72b369c365a4772b0f109ce +64c3fbfe842cf0e183d79b9340da544ac8afeee1351f4d67ba407afd0db8dc20b7,df3b8fc3e4b169c0cbeeb701ddc8a50ea4dab3ce5a32553bc5be28e5cd1c65a76669fa71c141c639965f8a7d71ef93f2a193cf9025a67509ac7bae8152a6e36a3c283e3186dc35ed11de23810a1cbe13b0889f465b8e70dfc96671821a4504c0 +64c610888ad1cb913b13be9f52e51269bfa664862b213d102838cfa04350eb3431,7a065900bc937ec5426525b13375ccc7f07b1230a3369eb6a107ba5a253182a2660ebe7f45 +64d41e007768c674b134ff3f75b7c682a08fe673929673a445cd2e176b63d5aff5,9fd9c6ceee853474dbd77c73640befc524d8e3f3 +64ee07557244e772cf9384d37ace73921388c05a8cadcab8aa17e82935bd5b95a7,4f396aef717bd3b9f57ca99af6db26114794c059472b8951dfe0cf588f35c7c74a91dbbac4f26faa565c18fb5b7d0ddbef53ae92945bf74e3f81a453d6b16b03208dbf5ae310f0 diff --git a/testdata/e.csv b/testdata/e.csv new file mode 100644 index 00000000..1e5afaa8 --- /dev/null +++ b/testdata/e.csv @@ -0,0 +1,21 @@ +e, +6500f23ec1,7b471b15ac811403113bf4 +654b6af788,7c38d58c240503b936f4c1204a4ed317680f6fbc09c95c4d6ab2598f31d3e09e9a +654dceae45,2b36ece4081037b0ec8136d4a41a667f9736548ff85892fb178ed0008ea17fe7582985b489d9d3c455d23b1b +65673f9cef,8cc057ce0c7190316c9269a6e2807e63417637b5f82eef1f94762e584191166662f6a446199ab950a6b96a98 +656845f85a,4ef94f090853d39618c561f4d6b1dab800b3fd46b95c56641079f36f8e3d8c3d24126ef86be8d456e93a5d4c +656fd477dc,08e664da615c0dd584b91e210848ea2949dc60c555bc +6575c86b58,421fb2a0f544ae76b850b45af8749b65eb5880fca17f6ba9b70cc9f6746cf04632 +6585892310,c2043f7e7ff3b392d46c381682da2f60baf85c34ed6e9f5a2a5cced6f972b9847b +659459b414,8f8a3713c0abe3c94ef3aa4b449693df448683aa6192395d4bd61c66ef71f69e89 +659839e3bd,6baddd761d7c6b8bbc8dce4f7a0240f4db5bbe19b9eb0874ff3b8c1d0fd5ba48ff +65a0e881ac,c7ccd582382f46df2095dff1d484af80f40fff68a3a92397d413a9818260e18cd40d2d35b4072dea89eb0d08 +65b4164cd2,6b8bcfd57d29fb94128767b24e4b09f3f6fbf1773785 +65b8989fc8,7e712054cbb6dc0e292684 +65b9996832,997ed9e6c10df1c78f3e1f +65d805f1ba,3af5fcf80e392d3daec547de5d9171d9c24a79c5e3cc5551ea432377c277f58aa0 +65edc9cdf2,7e37479e9bb38fc69e1b0d +65ef0d9209,c88ffcfba33856508b4ba58c82b65cf60927ffaa45faf1f671b27965ab7e87fc4e +65f2b2764b,2a5cc7a625a03a55170954202ba6a95675acbb79897a79256c6913deeb583918198769fe1e2e4c2802623315 +65f72d65f3,77ef24d0a1a6d1c17580a8612cccd8398148834ff341 +65ffbd56f8,2a015033fd5beb3320f748a4589a5eb81d9a5241ab3c561341f1ae2de993957dc29a273e6056c5676e5ebabc diff --git a/testdata/f.csv b/testdata/f.csv new file mode 100644 index 00000000..1708d07d --- /dev/null +++ b/testdata/f.csv @@ -0,0 +1,21 @@ +f, +660d649ba1defa4ab5ab71f8a977d7f7cedb11056e,919be5811844077f4660af66afa9a59a5ad17cf5c541524e780fe2137bfa250c +6623c6895027f70a5330bbcb1153d635abcb4d5224,8dadcde1a6f676d4004eacd399f825006ddf136d1e92b1c92113377b3e1741b4 +664f095b24484ebce8f31fbf008e63cc4aa163d401,c0c4a751f569c1f9c01531f57ba674b2ad2338d9c08f9e9fc85b0209d15466b2 +665201a38de7d7243df717c9f9279cdd30105f0f77,d9293577cc0d51fe3a5bee78fea9b2b2222e6c2aa0d26a4ef4bfb7dd095587e8 +665328b2449e537b0ca4733f87ac5ebcdf033c5ebd,624f80a361e47c7eb1b815e8714a40f67b4f642a5546547a3fcb5bf5593d8fab +665ec882021f55b1fbaa5fad00df5c5d07633b7af3,1e917fbc04385290d654f711bdef12773dd54b6b5ea26fe2a9d58ed051f2cb7f +6671c131cd433750ba6d3908150ca4910841164b74,a2ebfbdf7a23024c340a45f201645aa46f48bc1fdd8d34ed83fcffbf1ee90523 +667fb93d9ae877ba11f337f21422b0679852580802,4710649e06619e13250754937e9c17c20b07434751171aac2f2f78b184aa0146 +668ed5f39a5db059dc3261377f2a47728f7a357d33,8dd8ca749b87f43e290904749a546fe319c9d53e765f065bb8beb234a117655e +66951782f6ba94f2b71e46d0cc4a2411b14d81eb70,4f5c9434dd0886c57c2530991cebd973e1b50d5ba8fcfc019e54561217a49bbb +66970565dfe2b01cad49b73a085a3c3f7a3be61c4c,f6ca0ae18c896d9bc97c5a9d0c3a06256485f59c77fb91780b213f933b80f48b +669f6a30a6712062da0cc27181845c04d7430abf73,5c6604bfd63b871daceb7893dd618850458974fe4108871c1a1323fb8ae34e4e +66a9a7b89b78553592acf3dfc417c1d7654dab3273,0561f28c3a5ea0027ecb3c53fa068772a6b7cb73d23104a14f9aba8cd1f070a2 +66aba81567ba48f001f843f01354d575c2e2687847,b0f6ae2c1db8263f7e11fc79423109e718d1f3c30bd123c4243401b5e4f1fee6 +66b569cc3d28be4466fb28d147f66d6d8769598964,ecee392ad8217f325508ba38d280436fb0a520b79a9627e5e18197bf55540885 +66d4662cd100d66055917d6342d48f49d948fcc255,5762a8ac767fa30d2ca76db7081f8a2e4f5da4f0bf92d29e1322da9a154cc3d6 +66d6fa6ac71d0255dd3f185de6480d5b4316b6b050,5fc193e5e51b3bd8e95f4eb9df63236da7abf678fc47c0b339ceb5c127d0f488 +66e5b6c7c231a02a32eedd8383a5750fd135244a03,58c70ffbfada12550f24bf7931cee06eb2e267dec3560e2e46843e383415f163 +66e673cce02c2163f756491ef05d7535ceb578e215,b8db43d1f6e62361e2e3b8fa765f79c08ddfb3035caa06f8250d6d1b063a7140 +66fc4ad75184e6029c805d9494eed4e81be770c002,fc7ac5e785f73732d95183d6bdc3423d41a074fc3f04b1304bae1efa652edde1 diff --git a/testdata/g.csv b/testdata/g.csv new file mode 100644 index 00000000..54f355cf --- /dev/null +++ b/testdata/g.csv @@ -0,0 +1,21 @@ +g, +6702c124856d5168381a32971d8933440a1728fc41,575696fd653a4de2f9a8c1f580cf0c229631b0f5d95fceb354cda133e2eb2d34 +6707f1511e3a2cb28493f91b85e9e4a9d9d07c86a5,ba368e0f859ee36da8701df1c0b52cbf0c0f8a4b1a91f6d0db83a408f5a937d1 +6707fd4213cae8d5342a98ba49b255fa80b2a9a6e4,bd3a44d30f66444f8732119bc7e0cf0bb47f8f0ab2840987fc06b629f3e6d3f4 +6710294a5693224a6222404ba45fd38eb2e77979a4,de35a8ea0a26d17445e2f509db23188961b5cd1229b96d2411565adf63731b5c +6716a9f84e02143b50d9034aec126b12d7f2708cc4,5823640ae4529f8df2dab20386c887d0a1ba1ffa4583b99dff761c01f670c2fa +672e51bc65c9b97d482b0b720e6cb673c41fe7b5c5,0687df449bd8cb8d8f526f4189973d084d786ab0927d81c127f56b03c61aa955 +67682620db65932047689e5eaf392d6b85be801864,b262d40758edb28d1c04fa3a24d8268990516de6846ad94d002ce55640866239 +676e8c320dbbf5eebc2969a93fbc51dd7f6062a7d1,c9e2a8e7181a70e2a488b884c8baadb4043a075c6876cb012c67fbec5aa9f615 +6772e2ac48891ee3c2c727835702a374ad0cb70fd6,985a9c9ee7a0626d78dab431e663289762ce6959be314f91f7b08b1466097fd6 +67847dd1dac117b85d1e20d93580cdf42f00001a77,62e6b1b8c2961703a90276dcde6dad182b2d14e23f27dccc927cca7770b9890e +678f49948c72b7295f12092a24d300eeff894f1dd7,2e7c456dac5206c5627736924e96ac016a09a88ec5f4835fbe0cf9e294611c88 +67948b9633ab2ec07d7525936254e66f8c957d026c,66b5c54b3a685de3ea18f9e69254eec065eb3207ac1f93494fdcd585e9a267a0 +679674c162db8d3bb57c434fe87825625c4d4daf63,05425880d80258f7441859b3494415a3fd7398c9e209a19674abd48372b283c6 +67a8d3f17df85502bd644a364721e6364d61635b73,1efce69a3a05c505e9f9cc5c2241d02099c043d934389b430fd8b185e6dfe6cb +67bad7f4fb3c6828b6fc4624d43786fc8f55d6eb0f,04a1c0a7ffe7acbf974ca18cf3debbd8e1be3d6703f842f57ef14af6d4c336d3 +67c13fb0c65acca5520bc2f59bd91ca3482dbec156,7fdc6989cd778baad45cd98358ea060237b169a4aeaeb14da6ac4686b7858c9f +67d4314588b4424b0ee026536b9bd7857f11cab2ee,c63fd7a85a533b8591577bab805104708ba5458fab0e343d46b3e24a28b92cb5 +67d734244f85f32a58e34e2d9cadf225a56973d32f,d19a6307c24470b3973973319770bdb896218bb58d1f2d07c7226266075057d0 +67d9c159c5d5e407e6b0a4cacf9d6fe62a55b0fedc,89cbdb903fdfe0b44e74b0a69eed3de7029f18c28f77e5509f8ace766ab86610 +67fafc73d674250f11e559ab08b287f5714e531761,1752ffbf9807bb2e4e480bf045b4bacc472befe755287384b5a526065a58c065