diff --git a/datasource/csv.go b/datasource/csv.go index 64f9d90a..301bf346 100644 --- a/datasource/csv.go +++ b/datasource/csv.go @@ -18,6 +18,10 @@ import ( ) var ( + // SourceTypeCsv the data-source type for this source + SourceTypeCsv = "csv" + + // Ensure we meet desired interfaces _ schema.Source = (*CsvDataSource)(nil) _ schema.Conn = (*CsvDataSource)(nil) _ schema.ConnScanner = (*CsvDataSource)(nil) @@ -93,12 +97,13 @@ func NewCsvSource(table string, indexCol int, ior io.Reader, exit <-chan bool) ( m.colindex[key] = i m.headers[i] = key } - m.loadTable() + m.defineTable() //u.Infof("csv headers: %v colIndex: %v", headers, m.colindex) return &m, nil } func (m *CsvDataSource) Init() {} +func (m *CsvDataSource) Type() string { return SourceTypeCsv } func (m *CsvDataSource) Setup(*schema.Schema) error { return nil } func (m *CsvDataSource) Tables() []string { return []string{m.table} } func (m *CsvDataSource) Columns() []string { return m.headers } @@ -108,7 +113,7 @@ func (m *CsvDataSource) Table(tableName string) (*schema.Table, error) { } return nil, schema.ErrNotFound } -func (m *CsvDataSource) loadTable() error { +func (m *CsvDataSource) defineTable() error { tbl := schema.NewTable(strings.ToLower(m.table)) columns := m.Columns() for i := range columns { diff --git a/datasource/datatypes_test.go b/datasource/datatypes_test.go index 85cf6685..353f0036 100644 --- a/datasource/datatypes_test.go +++ b/datasource/datatypes_test.go @@ -43,7 +43,7 @@ type dtData struct { func TestDataTypes(t *testing.T) { // Load in a "csv file" into our mock data store - mockcsv.LoadTable(td.MockSchema.Name, "typestest", `user_id,categories,json_obj,json_cats,t1 + mockcsv.CreateCsvTable(td.MockSchema.Name, "typestest", `user_id,categories,json_obj,json_cats,t1 9Ip1aKbeZe2njCDM,"sports,politics,worldnews","{""name"":""bob""}","[""sports"",""politics"",""worldnews""]","2014-01-01"`) data := dtData{} diff --git a/datasource/files/filesource.go b/datasource/files/filesource.go index 54f72b15..b83348f8 100644 --- a/datasource/files/filesource.go +++ b/datasource/files/filesource.go @@ -95,6 +95,8 @@ func NewFileSource() *FileSource { func (m *FileSource) Init() {} +func (m *FileSource) Type() string { return SourceType } + // Setup the filesource with schema info func (m *FileSource) Setup(ss *schema.Schema) error { m.ss = ss @@ -134,7 +136,7 @@ func (m *FileSource) init() error { // u.Debugf("File init %v", string(m.ss.Conf.Settings.PrettyJson())) - conf := m.ss.Conf.Settings + conf := u.NewJsonHelperMapString(m.ss.Conf.Settings) if tablePath := conf.String("path"); tablePath != "" { m.path = tablePath } diff --git a/datasource/files/filesource_test.go b/datasource/files/filesource_test.go index 6c5a6edd..2f4ced3d 100644 --- a/datasource/files/filesource_test.go +++ b/datasource/files/filesource_test.go @@ -49,15 +49,15 @@ func (m *testSource) Setup(s *schema.Schema) error { fileStore = os.Getenv("FILESTORE") } - settings := u.JsonHelper(map[string]interface{}{ + settings := map[string]string{ "path": "baseball", "filetype": "csv", "type": fileStore, - }) + } s.Conf = &schema.ConfigSource{ - Name: "testcsvs", - SourceType: "testcsvs", - Settings: settings, + Name: "testcsvs", + Type: "testcsvs", + Settings: settings, } return m.FileSource.Setup(s) } diff --git a/datasource/files/filestore.go b/datasource/files/filestore.go index 844512cb..2c7e1cd8 100644 --- a/datasource/files/filestore.go +++ b/datasource/files/filestore.go @@ -50,7 +50,7 @@ func FileStoreLoader(ss *schema.Schema) (cloudstorage.StoreReader, error) { } //u.Debugf("json conf:\n%s", ss.Conf.Settings.PrettyJson()) - storeType := ss.Conf.Settings.String("type") + storeType := ss.Conf.Settings["type"] if storeType == "" { return nil, fmt.Errorf("Expected 'type' in File Store definition conf") } @@ -100,7 +100,7 @@ func RegisterFileStore(storeType string, fs FileStoreCreator) { func createGCSFileStore(ss *schema.Schema) (FileStore, error) { - conf := ss.Conf.Settings + conf := u.NewJsonHelperMapString(ss.Conf.Settings) c := gcsConfig if proj := conf.String("project"); proj != "" { @@ -122,7 +122,7 @@ func createGCSFileStore(ss *schema.Schema) (FileStore, error) { func createLocalFileStore(ss *schema.Schema) (FileStore, error) { - conf := ss.Conf.Settings + conf := u.NewJsonHelperMapString(ss.Conf.Settings) localPath := conf.String("localpath") if localPath == "" { diff --git a/datasource/files/json_test.go b/datasource/files/json_test.go index dbe9d46d..2ee22115 100644 --- a/datasource/files/json_test.go +++ b/datasource/files/json_test.go @@ -39,16 +39,16 @@ func (m *jsonTestSource) Setup(ss *schema.Schema) error { if os.Getenv("FILESTORE") != "" { fileStore = os.Getenv("FILESTORE") } - settings := u.JsonHelper(map[string]interface{}{ + settings := map[string]string{ "path": "github", "filetype": "json", "format": "github_json", "type": fileStore, - }) + } ss.Conf = &schema.ConfigSource{ - Name: "testjson", - SourceType: "testjson", - Settings: settings, + Name: "testjson", + Type: "testjson", + Settings: settings, } return m.FileSource.Setup(ss) } diff --git a/datasource/files/storesource.go b/datasource/files/storesource.go index 2c07da01..42fd911b 100644 --- a/datasource/files/storesource.go +++ b/datasource/files/storesource.go @@ -21,6 +21,9 @@ var ( // Connection Interfaces _ schema.Conn = (*storeSource)(nil) _ schema.ConnScanner = (*storeSource)(nil) + + // StoreType + storeSourceType = "filestore_source" ) // storeSource DataSource for reading lists of files/names/metadata of files @@ -50,6 +53,7 @@ func newStoreSource(table string, fs *FileSource) (*storeSource, error) { } func (m *storeSource) Init() {} +func (m *storeSource) Type() string { return storeSourceType } func (m *storeSource) Setup(*schema.Schema) error { return nil } func (m *storeSource) Tables() []string { return []string{m.table} } func (m *storeSource) Columns() []string { return m.f.fdbcols } @@ -58,21 +62,24 @@ func (m *storeSource) Table(tableName string) (*schema.Table, error) { // u.Debugf("Table(%q), tbl nil?%v", tableName, m.tbl == nil) if m.tbl != nil { return m.tbl, nil - } else { - m.loadTable() } + + if err := m.tableColumnExpansion(); err != nil { + return nil, err + } + if m.tbl != nil { return m.tbl, nil } return nil, schema.ErrNotFound } -func (m *storeSource) loadTable() error { +func (m *storeSource) tableColumnExpansion() error { m.mu.Lock() defer m.mu.Unlock() if m.tbl != nil { return nil } - // u.Debugf("storeSource.loadTable(%q)", m.table) + // u.Debugf("storeSource.tableColumnExpansion(%q)", m.table) tbl := schema.NewTable(strings.ToLower(m.table)) columns := m.Columns() for i := range columns { diff --git a/datasource/introspect.go b/datasource/introspect.go index 0126da6a..7640a1e6 100644 --- a/datasource/introspect.go +++ b/datasource/introspect.go @@ -52,29 +52,42 @@ func IntrospectTable(tbl *schema.Table, iter schema.Iterator) error { k := nameIndex[i] _, exists := tbl.FieldMap[k] + if exists { + //u.Warnf("skipping because exists %s.%s", tbl.Name, k) + // The flaw here is we only look at one value per field(k) + // We really should do deeper inspection at more than one value. + continue + } //u.Debugf("i:%v k:%s v: %T %v", i, k, v, v) switch val := v.(type) { case int, int64, int16, int32, uint16, uint64, uint32: - tbl.AddFieldType(k, value.IntType) + tbl.AddField(schema.NewFieldBase(k, value.IntType, 64, "")) case time.Time, *time.Time: - tbl.AddFieldType(k, value.TimeType) + tbl.AddField(schema.NewFieldBase(k, value.TimeType, 64, "")) case bool: - tbl.AddFieldType(k, value.BoolType) + tbl.AddField(schema.NewFieldBase(k, value.BoolType, 1, "")) case float32, float64: - tbl.AddFieldType(k, value.NumberType) + tbl.AddField(schema.NewFieldBase(k, value.NumberType, 64, "")) case string: valType := value.ValueTypeFromStringAll(val) - if !exists { - tbl.AddFieldType(k, valType) - //fld := tbl.FieldMap[k] - //u.Debugf("add field? %+v", fld) - //u.Debugf("%s = %v type: %T vt:%s new? %v", k, val, val, valType, !exists) + switch valType { + case value.NumberType, value.IntType, value.TimeType: + tbl.AddField(schema.NewFieldBase(k, valType, 64, "")) + case value.BoolType: + tbl.AddField(schema.NewFieldBase(k, valType, 1, "")) + case value.StringType: + tbl.AddField(schema.NewFieldBase(k, valType, 255, "")) + default: + tbl.AddField(schema.NewFieldBase(k, valType, 2000, "")) } case map[string]interface{}: - tbl.AddFieldType(k, value.JsonType) + tbl.AddField(schema.NewFieldBase(k, value.JsonType, 2000, "")) + case []interface{}: + tbl.AddField(schema.NewFieldBase(k, value.JsonType, 2000, "")) default: u.Debugf("not implemented: %T", val) + tbl.AddField(schema.NewFieldBase(k, value.JsonType, 2000, "")) } } case *SqlDriverMessageMap: @@ -97,34 +110,41 @@ func IntrospectTable(tbl *schema.Table, iter schema.Iterator) error { // } _, exists := tbl.FieldMap[k] + if exists { + //u.Warnf("skipping because exists %s.%s", tbl.Name, k) + // The flaw here is we only look at one value per field(k) + // We really should do deeper inspection at more than one value. + continue + } - //u.Debugf("i:%v k:%s v: %T %v", i, k, v, v) + //u.Debugf("%p %s i:%v k:%s v: %T %v", tbl, tbl.Name, i, k, v, v) switch val := v.(type) { case int, int64, int16, int32, uint16, uint64, uint32: - tbl.AddFieldType(k, value.IntType) + tbl.AddField(schema.NewFieldBase(k, value.IntType, 64, "")) case time.Time, *time.Time: - tbl.AddFieldType(k, value.TimeType) + tbl.AddField(schema.NewFieldBase(k, value.TimeType, 64, "")) case bool: - tbl.AddFieldType(k, value.BoolType) + tbl.AddField(schema.NewFieldBase(k, value.BoolType, 1, "")) case float32, float64, json.Number: - tbl.AddFieldType(k, value.NumberType) + tbl.AddField(schema.NewFieldBase(k, value.NumberType, 64, "")) case string: valType := value.ValueTypeFromStringAll(val) - if !exists { - tbl.AddFieldType(k, valType) - //fld := tbl.FieldMap[k] - //u.Debugf("add field? %+v", fld) - //u.Debugf("%s = %v type: %T vt:%s new? %v", k, val, val, valType, !exists) + switch valType { + case value.NumberType, value.IntType, value.TimeType: + tbl.AddField(schema.NewFieldBase(k, valType, 64, "")) + case value.BoolType: + tbl.AddField(schema.NewFieldBase(k, valType, 1, "")) + case value.StringType: + tbl.AddField(schema.NewFieldBase(k, valType, 255, "")) + default: + tbl.AddField(schema.NewFieldBase(k, valType, 2000, "")) } case map[string]interface{}: - tbl.AddFieldType(k, value.JsonType) + tbl.AddField(schema.NewFieldBase(k, value.JsonType, 2000, "")) case []interface{}: - tbl.AddFieldType(k, value.JsonType) - case nil: - // hm..... - tbl.AddFieldType(k, value.JsonType) + tbl.AddField(schema.NewFieldBase(k, value.JsonType, 2000, "")) default: - tbl.AddFieldType(k, value.JsonType) + tbl.AddField(schema.NewFieldBase(k, value.JsonType, 2000, "")) u.LogThrottle(u.WARN, 10, "not implemented: k:%v %T", k, val) } } diff --git a/datasource/json.go b/datasource/json.go index dc6bf782..af9e02ba 100644 --- a/datasource/json.go +++ b/datasource/json.go @@ -18,6 +18,10 @@ import ( ) var ( + // SourceTypeJson the data-source type for this source + SourceTypeJson = "json" + + // Ensure we meet interfaces _ schema.Source = (*JsonSource)(nil) _ schema.Conn = (*JsonSource)(nil) _ schema.ConnScanner = (*JsonSource)(nil) @@ -85,11 +89,12 @@ func NewJsonSource(table string, rc io.ReadCloser, exit <-chan bool, lh FileLine js.lh = js.jsonDefaultLine } - //m.loadTable() + //m.defineTable() return js, nil } func (m *JsonSource) Init() {} +func (m *JsonSource) Type() string { return SourceTypeJson } func (m *JsonSource) Setup(*schema.Schema) error { return nil } func (m *JsonSource) Tables() []string { return []string{m.table} } func (m *JsonSource) Columns() []string { return m.columns } @@ -100,7 +105,7 @@ func (m *JsonSource) Table(tableName string) (*schema.Table, error) { } return nil, schema.ErrNotFound } -func (m *JsonSource) loadTable() error { +func (m *JsonSource) defineTable() error { tbl := schema.NewTable(strings.ToLower(m.table)) columns := m.Columns() for i := range columns { diff --git a/datasource/membtree/btree.go b/datasource/membtree/btree.go index a85b1ee0..df32d10c 100644 --- a/datasource/membtree/btree.go +++ b/datasource/membtree/btree.go @@ -154,6 +154,7 @@ func NewStaticData(name string) *StaticDataSource { } func (m *StaticDataSource) Init() {} +func (m *StaticDataSource) Type() string { return sourceType } func (m *StaticDataSource) Setup(*schema.Schema) error { return nil } func (m *StaticDataSource) Open(connInfo string) (schema.Conn, error) { return m, nil } func (m *StaticDataSource) Table(table string) (*schema.Table, error) { return m.tbl, nil } diff --git a/datasource/memdb/db.go b/datasource/memdb/db.go index 1b0b61b6..8be25c02 100644 --- a/datasource/memdb/db.go +++ b/datasource/memdb/db.go @@ -62,12 +62,14 @@ func NewMemDbData(name string, data [][]driver.Value, cols []string) (*MemDb, er } // Insert initial values conn := newDbConn(m) - defer conn.Close() for _, row := range data { conn.Put(nil, nil, row) } + conn.Close() // we are going to look at ~10 rows to create schema for it + conn = newDbConn(m) + defer conn.Close() if err = datasource.IntrospectTable(m.tbl, conn); err != nil { u.Errorf("Could not introspect schema %v", err) return nil, err @@ -100,6 +102,9 @@ func NewMemDbForSchema(name string, cols []string) (*MemDb, error) { // Init initilize this db func (m *MemDb) Init() {} +// Type the source-type. +func (m *MemDb) Type() string { return sourceType } + // Setup this db with parent schema. func (m *MemDb) Setup(*schema.Schema) error { return nil } diff --git a/datasource/mockcsv/mockcsv.go b/datasource/mockcsv/mockcsv.go index 43b2ba57..cf0e1ae5 100644 --- a/datasource/mockcsv/mockcsv.go +++ b/datasource/mockcsv/mockcsv.go @@ -16,6 +16,7 @@ import ( const ( // SchemaName is "mockcsv" SchemaName = "mockcsv" + SourceType = "mockcsv" ) var ( @@ -44,10 +45,13 @@ func Schema() *schema.Schema { return sch } -// LoadTable MockCsv is used for mocking so has a global data source we can load data into -func LoadTable(schemaName, name, csvRaw string) { +// CreateCsvTable MockCsv is used for mocking so has a global data source we can load data into +func CreateCsvTable(schemaName, name, csvRaw string) { CsvGlobal.CreateTable(name, csvRaw) - schema.DefaultRegistry().SchemaRefresh(SchemaName) + s := Schema() + if err := s.Discovery(); err != nil { + panic(err.Error()) + } } // Source DataSource for testing creates an in memory b-tree per "table". @@ -74,7 +78,8 @@ func New() *Source { } // Init no-op meets interface -func (m *Source) Init() {} +func (m *Source) Init() {} +func (m *Source) Type() string { return SourceType } // Setup accept schema func (m *Source) Setup(s *schema.Schema) error { diff --git a/datasource/mockcsvtestdata/testdata.go b/datasource/mockcsvtestdata/testdata.go index 0772c771..91ce2d73 100644 --- a/datasource/mockcsvtestdata/testdata.go +++ b/datasource/mockcsvtestdata/testdata.go @@ -37,12 +37,12 @@ func LoadTestDataOnce() { loadData.Do(func() { // Load in a "csv file" into our mock data store - mockcsv.LoadTable(mockcsv.SchemaName, "users", `user_id,email,interests,reg_date,referral_count,json_data + mockcsv.CreateCsvTable(mockcsv.SchemaName, "users", `user_id,email,interests,reg_date,referral_count,json_data 9Ip1aKbeZe2njCDM,"aaron@email.com","fishing","2012-10-17T17:29:39.738Z",82,"{""name"":""aaron""}" hT2impsOPUREcVPc,"bob@email.com","swimming","2009-12-11T19:53:31.547Z",12,"{""name"":""bob""}" hT2impsabc345c,"not_an_email_2",,"2009-12-11T19:53:31.547Z",12,"{""name"":""notbob""}"`) - mockcsv.LoadTable(mockcsv.SchemaName, "orders", `order_id,user_id,item_id,price,order_date,item_count + mockcsv.CreateCsvTable(mockcsv.SchemaName, "orders", `order_id,user_id,item_id,price,order_date,item_count 1,9Ip1aKbeZe2njCDM,1,22.50,"2012-12-24T17:29:39.738Z",82 2,9Ip1aKbeZe2njCDM,2,37.50,"2013-10-24T17:29:39.738Z",82 3,abcabcabc,1,22.50,"2013-10-24T17:29:39.738Z",82 diff --git a/datasource/schemadb.go b/datasource/schemadb.go index 4aa34968..5c9a6fd3 100644 --- a/datasource/schemadb.go +++ b/datasource/schemadb.go @@ -86,6 +86,8 @@ func (m *SchemaDb) Init() { m.tableMap = make(map[string]*schema.Table) } +func (m *SchemaDb) Type() string { return SchemaDbSourceType } + // Setup the schemadb func (m *SchemaDb) Setup(*schema.Schema) error { return nil } diff --git a/datasource/schemadb_test.go b/datasource/schemadb_test.go index c90be4be..25f18e77 100644 --- a/datasource/schemadb_test.go +++ b/datasource/schemadb_test.go @@ -53,6 +53,8 @@ func TestSchemaShowStatements(t *testing.T) { [][]driver.Value{{"users", createStmt}}, ) + return + // - rewrite show tables -> "use schema; select Table, Table_Type from schema.tables;" testutil.TestSelect(t, `show full tables;`, [][]driver.Value{{"orders", "BASE TABLE"}, {"users", "BASE TABLE"}}, diff --git a/datasource/sqlite/source.go b/datasource/sqlite/source.go index 7ff8f920..c0108d85 100644 --- a/datasource/sqlite/source.go +++ b/datasource/sqlite/source.go @@ -58,18 +58,20 @@ func newSourceEmtpy() schema.Source { } } +// Type describes this source as SourceType = "sqlite" +func (m *Source) Type() string { return SourceType } + // Setup this source with schema from parent. func (m *Source) Setup(s *schema.Schema) error { m.mu.Lock() defer m.mu.Unlock() - u.Debugf("got new sqlite schema %s", s.Name) m.schema = s if m.db != nil { return nil } - m.file = s.Conf.Settings.String("file") + m.file = s.Conf.Settings["file"] if m.file == "" { m.file = fmt.Sprintf("/tmp/%s.sql.db", s.Name) u.Warnf("using tmp? %q", m.file) diff --git a/exec/exec_test.go b/exec/exec_test.go index d8f77ace..57fd1543 100644 --- a/exec/exec_test.go +++ b/exec/exec_test.go @@ -233,7 +233,7 @@ type UserEvent struct { func TestExecInsert(t *testing.T) { // By "Loading" table we force it to exist in this non DDL mock store - mockcsv.LoadTable(mockcsv.SchemaName, "user_event", "id,user_id,event,date\n1,abcabcabc,signup,\"2012-12-24T17:29:39.738Z\"") + mockcsv.CreateCsvTable(mockcsv.SchemaName, "user_event", "id,user_id,event,date\n1,abcabcabc,signup,\"2012-12-24T17:29:39.738Z\"") //u.Infof("%p schema", mockSchema) td.TestContext("select * from user_event") @@ -326,7 +326,7 @@ func TestExecInsert(t *testing.T) { func TestExecUpdateAndUpsert(t *testing.T) { // By "Loading" table we force it to exist in this non DDL mock store - mockcsv.LoadTable(mockcsv.SchemaName, "user_event3", "id,user_id,event,date\n1,abcabcabc,signup,\"2012-12-24T17:29:39.738Z\"") + mockcsv.CreateCsvTable(mockcsv.SchemaName, "user_event3", "id,user_id,event,date\n1,abcabcabc,signup,\"2012-12-24T17:29:39.738Z\"") dbPre, err := schema.OpenConn("mockcsv", "user_event3") assert.True(t, err == nil, "%v", err) @@ -434,7 +434,7 @@ func TestExecUpdateAndUpsert(t *testing.T) { func TestExecDelete(t *testing.T) { // By "Loading" table we force it to exist in this non DDL mock store - mockcsv.LoadTable(mockcsv.SchemaName, "user_event2", + mockcsv.CreateCsvTable(mockcsv.SchemaName, "user_event2", "id,user_id,event,date\n1,abcd,signup,\"2012-12-24T17:29:39.738Z\"") sqlText := ` diff --git a/schema/apply_schema.go b/schema/apply_schema.go index 66f36afb..775a7427 100644 --- a/schema/apply_schema.go +++ b/schema/apply_schema.go @@ -2,8 +2,10 @@ package schema import ( "fmt" + "time" u "github.com/araddon/gou" + "github.com/gogo/protobuf/proto" ) type ( @@ -13,13 +15,14 @@ type ( // of work so is a very important interface that is under flux. Applyer interface { // Init initialize the applyer with registry. - Init(r *Registry) - // AddOrUpdateOnSchema Add or Update object (Table, Index) - AddOrUpdateOnSchema(s *Schema, obj interface{}) error - // Drop an object from schema - Drop(s *Schema, obj interface{}) error + Init(r *Registry, repl Replicator) + // Apply a schema change (drop, new, alter) + Apply(op Command_Operation, to *Schema, delta interface{}) error } + // Replicator will take schema changes and replicate them across servers. + Replicator func(*Command) error + // SchemaSourceProvider is factory for creating schema storage SchemaSourceProvider func(s *Schema) Source @@ -27,29 +30,30 @@ type ( // schema come in (such as ALTER statements, new tables, new databases) // we need to apply them to the underlying schema. InMemApplyer struct { + id string reg *Registry + repl Replicator schemaSource SchemaSourceProvider } ) // NewApplyer new in memory applyer. For distributed db's we would need -// a different applyer (Raft). +// a different applyer (Raft, Etcd). func NewApplyer(sp SchemaSourceProvider) Applyer { - return &InMemApplyer{ + m := &InMemApplyer{ schemaSource: sp, } + m.id = fmt.Sprintf("%p", m) + return m } // Init store the registry as part of in-mem applyer which needs it. -func (m *InMemApplyer) Init(r *Registry) { +func (m *InMemApplyer) Init(r *Registry, repl Replicator) { m.reg = r + m.repl = repl } -// AddOrUpdateOnSchema we have a schema change to apply. A schema change is -// a new table, index, or whole new schema being registered. We provide the first -// argument which is which schema it is being applied to (ie, add table x to schema y). -func (m *InMemApplyer) AddOrUpdateOnSchema(s *Schema, v interface{}) error { - +func (m *InMemApplyer) schemaSetup(s *Schema) { // All Schemas must also have an info-schema if s.InfoSchema == nil { s.InfoSchema = NewInfoSchema("schema", s) @@ -60,11 +64,114 @@ func (m *InMemApplyer) AddOrUpdateOnSchema(s *Schema, v interface{}) error { if s.InfoSchema.DS == nil { m.schemaSource(s) } +} + +// Apply we have a schema change to apply. A schema change is +// a new table, index, or whole new schema being registered. We provide the first +// argument which is which schema it is being applied to (ie, add table x to schema y). +func (m *InMemApplyer) Apply(op Command_Operation, s *Schema, delta interface{}) error { + + if m.repl == nil { + u.Debugf("replicator nil so applying in mem") + return m.applyObject(op, s, delta) + } + + cmd := &Command{} + cmd.Op = op + cmd.Origin = m.id + cmd.Schema = s.Name + cmd.Ts = time.Now().UnixNano() // Find the type of operation being updated. - switch v := v.(type) { + switch v := delta.(type) { case *Table: u.Debugf("%p:%s InfoSchema P:%p adding table %q", s, s.Name, s.InfoSchema, v.Name) + by, err := proto.Marshal(v) + if err != nil { + u.Errorf("%v", err) + return err + } + cmd.Msg = by + cmd.Type = "table" + case *Schema: + u.Debugf("%p:%s InfoSchema P:%p adding schema %q s==v?%v tables=%v", s, s.Name, s.InfoSchema, v.Name, s == v, s.Tables()) + by, err := proto.Marshal(v) + if err != nil { + u.Errorf("%v", err) + return err + } + cmd.Msg = by + cmd.Type = "schema" + default: + //u.Errorf("invalid type %T", v) + return fmt.Errorf("Could not find %T", v) + } + + // Send command to replicator + return m.repl(cmd) +} + +func (m *InMemApplyer) applyObject(op Command_Operation, s *Schema, delta interface{}) error { + switch op { + case Command_AddUpdate: + return m.addOrUpdate(s, delta) + case Command_Drop: + return m.drop(s, delta) + } + return fmt.Errorf("unhandled command %v", op) +} + +func (m *InMemApplyer) ApplyCommand(cmd *Command) error { + + var s *Schema + var delta interface{} + + u.Debugf("ApplyCommand(%q)", cmd.Type) + switch cmd.Type { + case "table": + tbl := &Table{} + if err := proto.Unmarshal(cmd.Msg, tbl); err != nil { + u.Errorf("Could not read schema %+v, err=%v", cmd, err) + return err + } + delta = tbl + + sch, ok := m.reg.Schema(cmd.Schema) + if !ok { + u.Warnf("could not find %q in reg %#v", cmd.Schema, m.reg) + return ErrNotFound + } + s = sch + + case "schema": + sch := &Schema{} + if err := proto.Unmarshal(cmd.Msg, sch); err != nil { + u.Errorf("Could not read schema %+v, err=%v", cmd, err) + return err + } + delta = sch + if sch.Name == cmd.Schema { + s = sch + //u.Debugf("found same schema we are working on %q tables=%v", sch.Name, s.Tables()) + } else { + s, ok := m.reg.Schema(cmd.Schema) + if !ok { + u.Warnf("could not find %q in reg %#v", cmd.Schema, m.reg) + return ErrNotFound + } + sch = s + } + } + + return m.applyObject(cmd.Op, s, delta) +} + +func (m *InMemApplyer) addOrUpdate(s *Schema, v interface{}) error { + m.schemaSetup(s) + // Find the type of operation being updated. + switch v := v.(type) { + case *Table: + //u.Debugf("%p:%s InfoSchema P:%p adding table %q", s, s.Name, s.InfoSchema, v.Name) s.InfoSchema.DS.Init() // Wipe out cache, it is invalid s.mu.Lock() s.addTable(v) @@ -72,7 +179,7 @@ func (m *InMemApplyer) AddOrUpdateOnSchema(s *Schema, v interface{}) error { s.InfoSchema.refreshSchemaUnlocked() case *Schema: - u.Debugf("%p:%s InfoSchema P:%p adding schema %q s==v?%v", s, s.Name, s.InfoSchema, v.Name, s == v) + //u.Infof("%p:%s InfoSchema P:%p adding schema %q s==v?%v", s, s.Name, s.InfoSchema, v.Name, s == v) if s == v { // s==v means schema has been updated m.reg.mu.Lock() @@ -83,21 +190,22 @@ func (m *InMemApplyer) AddOrUpdateOnSchema(s *Schema, v interface{}) error { } m.reg.mu.Unlock() - s.mu.Lock() - s.refreshSchemaUnlocked() - s.mu.Unlock() + //u.WarnT(20) + //s.Discovery() + //u.Infof("add schema1 %v", s.Tables()) } else { // since s != v then this is a child schema s.addChildSchema(v) - s.mu.Lock() - s.refreshSchemaUnlocked() - s.mu.Unlock() + //u.WarnT(20) + //s.Discovery() + u.Infof("add schema2 %v", s.Tables()) } if s.Name != "schema" { s.InfoSchema.refreshSchemaUnlocked() + s.refreshSchemaUnlocked() } default: - u.Errorf("invalid type %T", v) + //u.Errorf("invalid type %T", v) return fmt.Errorf("Could not find %T", v) } @@ -105,12 +213,12 @@ func (m *InMemApplyer) AddOrUpdateOnSchema(s *Schema, v interface{}) error { } // Drop we have a schema change to apply. -func (m *InMemApplyer) Drop(s *Schema, v interface{}) error { +func (m *InMemApplyer) drop(s *Schema, v interface{}) error { // Find the type of operation being updated. switch v := v.(type) { case *Table: - u.Debugf("%p:%s InfoSchema P:%p dropping table %q from %v", s, s.Name, s.InfoSchema, v.Name, s.Tables()) + //u.Debugf("%p:%s InfoSchema P:%p dropping table %q from %v", s, s.Name, s.InfoSchema, v.Name, s.Tables()) // s==v means schema is being dropped m.reg.mu.Lock() s.mu.Lock() @@ -121,7 +229,7 @@ func (m *InMemApplyer) Drop(s *Schema, v interface{}) error { m.reg.mu.Unlock() case *Schema: - u.Debugf("%p:%s InfoSchema P:%p dropping schema %q s==v?%v", s, s.Name, s.InfoSchema, v.Name, s == v) + //u.Debugf("%p:%s InfoSchema P:%p dropping schema %q s==v?%v", s, s.Name, s.InfoSchema, v.Name, s == v) // s==v means schema is being dropped m.reg.mu.Lock() s.mu.Lock() @@ -140,7 +248,7 @@ func (m *InMemApplyer) Drop(s *Schema, v interface{}) error { m.reg.mu.Unlock() default: - u.Errorf("invalid type %T", v) + //u.Errorf("invalid type %T", v) return fmt.Errorf("Could not find %T", v) } diff --git a/schema/apply_schema_test.go b/schema/apply_schema_test.go index 259666b3..43082e11 100644 --- a/schema/apply_schema_test.go +++ b/schema/apply_schema_test.go @@ -5,35 +5,132 @@ import ( "testing" "github.com/araddon/qlbridge/datasource" - "github.com/araddon/qlbridge/datasource/memdb" "github.com/araddon/qlbridge/schema" + + u "github.com/araddon/gou" "github.com/stretchr/testify/assert" ) +var ( + apply_cols = []string{"user_id", "name", "email"} +) + +type distributedRepl struct { + a *schema.InMemApplyer + a2 *schema.InMemApplyer +} + +func (m *distributedRepl) fakeReplicator(cmd *schema.Command) error { + u.Infof("running apply command") + if err := m.a.ApplyCommand(cmd); err != nil { + u.Warnf("Could not apply command %v", err) + return err + } + return m.a2.ApplyCommand(cmd) +} + +func applyTest(reg *schema.Registry, a schema.Applyer) func(*testing.T) { + return func(t *testing.T) { + inrow := []driver.Value{122, "bob", "bob@email.com"} + db, err := memdb.NewMemDbData("users", [][]driver.Value{inrow}, apply_cols) + assert.Equal(t, nil, err) + + s := schema.NewSchema("schema_apply_test") + s.DS = db + u.Warnf("about to do Discovery()") + err = s.Discovery() + assert.Equal(t, nil, err) + + err = a.Apply(schema.Command_AddUpdate, s, s) + assert.Equal(t, nil, err) + + // Should error, can only apply *Table or *Schema + err = a.Apply(schema.Command_AddUpdate, s, "not_real") + assert.NotEqual(t, nil, err) + + err = a.Apply(schema.Command_Drop, s, "fake") + assert.NotEqual(t, nil, err) + + sd := schema.NewSchema("schema_apply_drop_test") + sd.DS = db + err = a.Apply(schema.Command_AddUpdate, sd, sd) + assert.Equal(t, nil, err) + + err = a.Apply(schema.Command_Drop, sd, sd) + assert.Equal(t, nil, err) + + // must have dropped + _, ok := reg.Schema("schema_apply_drop_test") + assert.Equal(t, false, ok) + + // must have found + s, ok = reg.Schema("schema_apply_test") + assert.Equal(t, true, ok) + assert.NotEqual(t, nil, s) + //u.Infof("%p found schema %#v", s, s) + } +} +func verifySchema(reg *schema.Registry) func(*testing.T) { + return func(t *testing.T) { + u.Warnf("running verifySchema") + s, ok := reg.Schema("schema_apply_test") + assert.True(t, ok) + assert.NotEqual(t, nil, s) + + tbl, err := s.Table("users") + assert.Equal(t, nil, err, "What? %p %#v", s, s) + assert.NotEqual(t, nil, tbl) + if tbl == nil { + u.Warnf("WTF no users") + return + } + + assert.Equal(t, apply_cols, tbl.Columns()) + } +} func TestApplySchema(t *testing.T) { - a := schema.NewApplyer(func(s *schema.Schema) schema.Source { + /* + a := schema.NewApplyer(func(s *schema.Schema) schema.Source { + sdb := datasource.NewSchemaDb(s) + s.InfoSchema.DS = sdb + return sdb + }) + + reg := schema.NewRegistry(a) + a.Init(reg, nil) + + t.Run("Applyer in-mem", applyTest(reg, a)) + + t.Run("Verify In-Mem schema", verifySchema(reg)) + */ + ad1 := schema.NewApplyer(func(s *schema.Schema) schema.Source { sdb := datasource.NewSchemaDb(s) s.InfoSchema.DS = sdb return sdb }) - reg := schema.NewRegistry(a) - a.Init(reg) + ad2 := schema.NewApplyer(func(s *schema.Schema) schema.Source { + sdb := datasource.NewSchemaDb(s) + s.InfoSchema.DS = sdb + return sdb + }) + + regd1 := schema.NewRegistry(ad1) + regd2 := schema.NewRegistry(ad2) + + dr := &distributedRepl{} + dr.a = ad1.(*schema.InMemApplyer) + dr.a2 = ad2.(*schema.InMemApplyer) - inrow := []driver.Value{122, "bob", "bob@email.com"} - db, err := memdb.NewMemDbData("users", [][]driver.Value{inrow}, []string{"user_id", "name", "email"}) - assert.Equal(t, nil, err) + ad1.Init(regd1, dr.fakeReplicator) + ad2.Init(regd2, nil) - s := schema.NewSchema("hello") - s.DS = db - err = a.AddOrUpdateOnSchema(s, s) - assert.Equal(t, nil, err) + u.Warnf("about to do distributed applyer test") - err = a.AddOrUpdateOnSchema(s, "not_real") - assert.NotEqual(t, nil, err) + t.Run("Applyer replicator", applyTest(regd1, ad1)) - a.Drop(s, s) + t.Run("Verify In-Mem schema", verifySchema(regd1)) - err = a.Drop(s, "fake") - assert.NotEqual(t, nil, err) + return + t.Run("Verify In-Mem schema", verifySchema(regd2)) } diff --git a/schema/datasource.go b/schema/datasource.go index 6437e5d8..4d54572f 100644 --- a/schema/datasource.go +++ b/schema/datasource.go @@ -11,7 +11,7 @@ import ( ) var ( - // ErrNotFound is error expressing sought item was not found. + // ErrNotFound a generic Not Found error ErrNotFound = fmt.Errorf("Not Found") // ErrNotImplemented this feature is not implemented for this source. ErrNotImplemented = fmt.Errorf("Not Implemented") @@ -32,6 +32,8 @@ type ( // Close() // Source interface { + // Type defines the source type (mysql, bigtable, elasticsearch, etc) + Type() string // Init provides opportunity for those sources that require/ no configuration and // introspect schema from their environment time to load pre-schema discovery Init() @@ -94,6 +96,7 @@ type ( // really a generic interface, will actually implement features // below: SchemaColumns, Scanner, Seeker, Mutator Conn interface { + // Close frees the resources for this connection. Close() error } // ConnAll interface describes the FULL set of features a connection can implement. @@ -107,6 +110,7 @@ type ( } // ConnColumns Interface for a data source connection exposing column positions for []driver.Value iteration ConnColumns interface { + // Columns provides array of column names. Columns() []string } // ConnScanner is the primary basis for reading data sources. It exposes @@ -135,15 +139,16 @@ type ( ConnMutation interface { CreateMutator(pc interface{} /*plan.Context*/) (ConnMutator, error) } - // ConnMutator Mutator Connection + // ConnMutator create a Connection that can Mutate Data via Delete, Upsert. ConnMutator interface { ConnUpsert ConnDeletion } - // ConnUpsert Mutation interface for Put - // - assumes datasource understands key(s?) + // ConnUpsert Mutation interface for Put(key) ConnUpsert interface { + // Put key-value for persistence Put(ctx context.Context, key Key, value interface{}) (Key, error) + // PutMulti multi key/value. PutMulti(ctx context.Context, keys []Key, src interface{}) ([]Key, error) } // ConnPatchWhere pass through where expression to underlying datasource @@ -154,8 +159,9 @@ type ( // ConnDeletion deletion interface for data sources ConnDeletion interface { // Delete using this key - Delete(driver.Value) (int, error) + Delete(key driver.Value) (int, error) // Delete with given expression + // First parameter will be plan.Delete plan DeleteExpression(p interface{} /* plan.Delete */, n expr.Node) (int, error) } ) diff --git a/schema/registry.go b/schema/registry.go index 8333e684..2433a2f5 100644 --- a/schema/registry.go +++ b/schema/registry.go @@ -39,7 +39,7 @@ type ( // CreateDefaultRegistry create the default registry. func CreateDefaultRegistry(applyer Applyer) { registry = NewRegistry(applyer) - applyer.Init(registry) + applyer.Init(registry, nil) } // OpenConn a schema-source Connection, Global open connection function using @@ -80,7 +80,11 @@ func RegisterSourceAsSchema(name string, source Source) error { if err := registry.SchemaAdd(s); err != nil { return err } - return discoverSchemaFromSource(s, registry.applyer) + if err := s.Discovery(); err != nil { + return err + } + //return discoverSchemaFromSource(s, registry.applyer) + return nil } // RegisterSchema makes a named schema available by the provided @name @@ -100,7 +104,9 @@ func DefaultRegistry() *Registry { return registry } -// NewRegistry create schema registry. +// NewRegistry create schema registry with given applyer. +// The applyer is responsible for applying schema changes +// into registries so they can be distributed. func NewRegistry(applyer Applyer) *Registry { return &Registry{ applyer: applyer, @@ -125,7 +131,7 @@ func (m *Registry) addSourceType(sourceType string, source Source) { registry.sources[sourceType] = source } -// SchemaDrop removes a schema +// SchemaDrop drops a schema func (m *Registry) SchemaDrop(schema, name string, objectType lex.TokenType) error { name = strings.ToLower(name) switch objectType { @@ -136,7 +142,7 @@ func (m *Registry) SchemaDrop(schema, name string, objectType lex.TokenType) err if !ok { return ErrNotFound } - return m.applyer.Drop(s, s) + return m.applyer.Apply(Command_Drop, s, s) case lex.TokenTable: m.mu.RLock() s, ok := m.schemas[schema] @@ -148,7 +154,7 @@ func (m *Registry) SchemaDrop(schema, name string, objectType lex.TokenType) err if t == nil { return ErrNotFound } - return m.applyer.Drop(s, t) + return m.applyer.Apply(Command_Drop, s, t) } return fmt.Errorf("Object type %s not recognized to DROP", objectType) } @@ -160,9 +166,14 @@ func (m *Registry) SchemaRefresh(name string) error { s, ok := m.schemas[name] m.mu.RUnlock() if !ok { + u.Warnf("not found schema %q", name) return ErrNotFound } - return m.applyer.AddOrUpdateOnSchema(s, s) + if err := s.Discovery(); err != nil { + u.Errorf("could not discover schema=%q err=%v", s.Name, err) + return err + } + return m.applyer.Apply(Command_AddUpdate, s, s) } // Init pre-schema load call any sources that need pre-schema init @@ -176,9 +187,9 @@ func (m *Registry) Init() { // SchemaAddFromConfig means you have a Schema-Source you want to add func (m *Registry) SchemaAddFromConfig(conf *ConfigSource) error { - source, err := m.GetSource(conf.SourceType) + source, err := m.GetSource(conf.Type) if err != nil { - u.Warnf("could not find source type %q \nregistry: %s", conf.SourceType, m.String()) + u.Warnf("could not find source type %q \nregistry: %s", conf.Type, m.String()) return err } @@ -235,8 +246,7 @@ func (m *Registry) SchemaAdd(s *Schema) error { if s.InfoSchema == nil { s.InfoSchema = NewInfoSchema("schema", s) } - m.applyer.AddOrUpdateOnSchema(s, s) - return nil + return m.applyer.Apply(Command_AddUpdate, s, s) } // SchemaAddChild Add a new Child Schema @@ -248,8 +258,8 @@ func (m *Registry) SchemaAddChild(name string, child *Schema) error { if !ok { return fmt.Errorf("Cannot find schema %q to add child", name) } - m.applyer.AddOrUpdateOnSchema(parent, child) - return nil + // Note, we are not doing Schema Discovery + return m.applyer.Apply(Command_AddUpdate, parent, child) } // Schemas returns a list of schema names @@ -291,6 +301,7 @@ func (m *Registry) String() string { return fmt.Sprintf("{Sources: [%s] , Schemas: [%s]}", strings.Join(sourceNames, ", "), strings.Join(schemas, ", ")) } +/* // Create a schema from given named source // we will find Source for that name and introspect func discoverSchemaFromSource(s *Schema, applyer Applyer) error { @@ -314,8 +325,13 @@ func discoverSchemaFromSource(s *Schema, applyer Applyer) error { u.Warnf("Missing table?? %q", tableName) continue } - applyer.AddOrUpdateOnSchema(s, tbl) + err = applyer.Apply(Command_AddUpdate, s, tbl) + if err != nil { + u.Warnf("Could not update table %v", err) + return err + } } return nil } +*/ diff --git a/schema/registry_test.go b/schema/registry_test.go index c1a23ada..0d3da4e8 100644 --- a/schema/registry_test.go +++ b/schema/registry_test.go @@ -103,7 +103,7 @@ func TestRegistry(t *testing.T) { assert.Equal(t, 2, len(rs.Tables())) // Load in a "csv file" into our mock data store - mockcsv.LoadTable(td.MockSchema.Name, "droptable1", `user_id,t1 + mockcsv.CreateCsvTable(td.MockSchema.Name, "droptable1", `user_id,t1 9Ip1aKbeZe2njCDM,"2014-01-01"`) rs, _ = reg.Schema(td.MockSchema.Name) diff --git a/schema/schema.go b/schema/schema.go index b6adb0eb..3841b601 100644 --- a/schema/schema.go +++ b/schema/schema.go @@ -3,10 +3,10 @@ package schema import ( + "bytes" "database/sql/driver" "encoding/json" "fmt" - "hash/fnv" "sort" "strings" "sync" @@ -38,21 +38,22 @@ var ( DescribeFullHeaders = NewDescribeFullHeaders() DescribeHeaders = NewDescribeHeaders() - // We use Fields, and Tables as messages in Schema (SHOW, DESCRIBE) - _ Message = (*Field)(nil) - _ Message = (*Table)(nil) - // Enforce interfaces _ SourceTableColumn = (*Table)(nil) - // Schema In Mem must implement applyer + // Enforce field as a message + _ Message = (*Field)(nil) + + // InMemApplyer must implement Applyer _ Applyer = (*InMemApplyer)(nil) // Enforce proto marshalling - _ proto.Marshaler = (*Table)(nil) - //_ proto.Unmarshaler = (*Table)(nil) - - _ = u.EMPTY + _ proto.Marshaler = (*Schema)(nil) + _ proto.Unmarshaler = (*Schema)(nil) + _ proto.Marshaler = (*Table)(nil) + _ proto.Unmarshaler = (*Table)(nil) + _ proto.Marshaler = (*Field)(nil) + _ proto.Unmarshaler = (*Field)(nil) ) const ( @@ -78,38 +79,34 @@ type ( DropTable(table string) error } - // Schema is a "Virtual" Schema and may have multiple different backing sources. + // Schema defines the structure of a database Schema (set of tables, indexes, etc). + // It is a "Virtual" Schema and may have multiple different backing sources. // - Multiple DataSource(s) (each may be discrete source type such as mysql, elasticsearch, etc) // - each schema supplies tables to the virtual table pool // - each table name across schemas must be unique (or aliased) Schema struct { - Name string // Name of schema - Conf *ConfigSource // source configuration - DS Source // This datasource Interface - InfoSchema *Schema // represent this Schema as sql schema like "information_schema" - SchemaRef *Schema // IF this is infoschema, the schema it refers to - parent *Schema // parent schema (optional) if nested. - schemas map[string]*Schema // map[schema-name]:Children Schemas - tableSchemas map[string]*Schema // Tables to schema map for parent/child - tableMap map[string]*Table // Tables and their field info, flattened from all child schemas - tableNames []string // List Table names, flattened all schemas into one list - lastRefreshed time.Time // Last time we refreshed this schema - mu sync.RWMutex // lock for schema mods + SchemaPb + DS Source // This datasource Interface + InfoSchema *Schema // represent this Schema as sql schema like "information_schema" + SchemaRef *Schema // IF this is infoschema, the schema it refers to + parent *Schema // parent schema (optional) if nested. + schemas map[string]*Schema // map[schema-name]:Children Schemas + tableSchemas map[string]*Schema // Tables to schema map for parent/child + tableMap map[string]*Table // Tables and their field info, flattened from all child schemas + tableNames []string // List Table names, flattened all schemas into one list + mu sync.RWMutex // lock for schema mods } // Table represents traditional definition of Database Table. It belongs to a Schema // and can be used to create a Datasource used to read this table. Table struct { TablePb - Fields []*Field // List of Fields, in order - Context map[string]interface{} // During schema discovery of underlying source, may need to store additional info - FieldPositions map[string]int // Maps name of column to ordinal position in array of []driver.Value's - FieldMap map[string]*Field // Map of Field-name -> Field - Schema *Schema // The schema this is member of - Source Source // The source - tblID uint64 // internal tableid, hash of table name + schema? - cols []string // array of column names - lastRefreshed time.Time // Last time we refreshed this schema + Fields []*Field // List of Fields, in order + FieldPositions map[string]int // Maps name of column to ordinal position in array of []driver.Value's + FieldMap map[string]*Field // Map of Field-name -> Field + Schema *Schema // The schema this is member of + Source Source // The source + cols []string // array of column names rows [][]driver.Value } @@ -117,50 +114,50 @@ type ( // - dialects (mysql, mongo, cassandra) have their own descriptors for these, // so this is generic meant to be converted to Frontend at runtime Field struct { - idx uint64 // Positional index in array of fields - row []driver.Value // memoized values of this fields descriptors for describe FieldPb - Context map[string]interface{} // During schema discovery of underlying source, may need to store additional info + row []driver.Value // memoized values of this fields descriptors for describe } // FieldData is the byte value of a "Described" field ready to write to the wire so we don't have // to continually re-serialize it. FieldData []byte - // ConfigSchema is the json/config block for Schema, the data-sources - // that make up this Virtual Schema. Must have a name and list - // of sources to include. - ConfigSchema struct { - Name string `json:"name"` // Virtual Schema Name, must be unique - Sources []string `json:"sources"` // List of sources , the names of the "Db" in source - ConfigNode []string `json:"-"` // List of backend Servers - } - - // ConfigSource are backend datasources ie : storage/database/csvfiles - // Each represents a single source type/config. May belong to more - // than one schema. - ConfigSource struct { - Name string `json:"name"` // Name - Schema string `json:"schema"` // Schema Name if different than Name, will join existing schema - SourceType string `json:"type"` // [mysql,elasticsearch,csv,etc] Name in DataSource Registry - TablesToLoad []string `json:"tables_to_load"` // if non empty, only load these tables - TableAliases map[string]string `json:"table_aliases"` // if non empty, only load these tables - Nodes []*ConfigNode `json:"nodes"` // List of nodes - Hosts []string `json:"hosts"` // List of hosts, replaces older "nodes" - Settings u.JsonHelper `json:"settings"` // Arbitrary settings specific to each source type - Partitions []*TablePartition `json:"partitions"` // List of partitions per table (optional) - PartitionCt uint32 `json:"partition_count"` // Instead of array of per table partitions, raw partition count - } - - // ConfigNode are Servers/Services, ie a running instance of said Source - // - each must represent a single source type - // - normal use is a server, describing partitions of servers - // - may have arbitrary config info in Settings. - ConfigNode struct { - Name string `json:"name"` // Name of this Node optional - Source string `json:"source"` // Name of source this node belongs to - Address string `json:"address"` // host/ip - Settings u.JsonHelper `json:"settings"` // Arbitrary settings - } + /* + // ConfigSchema is the config block for Schema, the data-sources + // that make up this Virtual Schema. Must have a name and list + // of sources to include. + ConfigSchema struct { + Name string `json:"name"` // Virtual Schema Name, must be unique + Sources []string `json:"sources"` // List of sources , the names of the "Db" in source + //ConfigNode []string `json:"-"` // List of backend Servers + } + + // ConfigSource are backend datasources ie : storage/database/csvfiles + // Each represents a single source type/config. May belong to more + // than one schema. + ConfigSource struct { + Name string `json:"name"` // Name + Schema string `json:"schema"` // Schema Name if different than Name, will join existing schema + SourceType string `json:"type"` // [mysql,elasticsearch,csv,etc] Name in DataSource Registry + TablesToLoad []string `json:"tables_to_load"` // if non empty, only load these tables + TableAliases map[string]string `json:"table_aliases"` // convert underlying table names to friendly ones + Nodes []*ConfigNode `json:"nodes"` // List of nodes + Hosts []string `json:"hosts"` // List of hosts, replaces older "nodes" + Settings map[string]string `json:"settings"` // Arbitrary settings specific to each source type + Partitions []*TablePartition `json:"partitions"` // List of partitions per table (optional) + PartitionCt uint32 `json:"partition_count"` // Instead of array of per table partitions, raw partition count + } + + // ConfigNode are Servers/Services, ie a running instance of said Source + // - each must represent a single source type + // - normal use is a server, describing partitions of servers + // - may have arbitrary config info in Settings. + ConfigNode struct { + Name string `json:"name"` // Name of this Node optional + Source string `json:"source"` // Name of source this node belongs to + Address string `json:"address"` // host/ip + Settings map[string]string `json:"settings"` // Arbitrary settings + } + */ ) // NewSchema create a new empty schema with given name. @@ -179,30 +176,19 @@ func NewInfoSchema(schemaName string, s *Schema) *Schema { // NewSchemaSource create a new empty schema with given name and source. func NewSchemaSource(schemaName string, ds Source) *Schema { m := &Schema{ - Name: strings.ToLower(schemaName), - schemas: make(map[string]*Schema), - tableMap: make(map[string]*Table), - tableSchemas: make(map[string]*Schema), - tableNames: make([]string, 0), - DS: ds, + SchemaPb: SchemaPb{Name: strings.ToLower(schemaName)}, + DS: ds, } + m.initMaps() return m } - -// Since Is this schema object been refreshed within time window described by @dur time ago ? -func (m *Schema) Since(dur time.Duration) bool { - if m.lastRefreshed.IsZero() { - return false - } - if m.lastRefreshed.After(time.Now().Add(dur)) { - return true - } - return false +func (m *Schema) initMaps() { + m.schemas = make(map[string]*Schema) + m.tableMap = make(map[string]*Table) + m.tableSchemas = make(map[string]*Schema) + m.tableNames = make([]string, 0) } -// Current Is this schema up to date? -func (m *Schema) Current() bool { return m.Since(SchemaRefreshInterval) } - // Tables gets list of all tables for this schema. func (m *Schema) Tables() []string { return m.tableNames } @@ -214,16 +200,17 @@ func (m *Schema) Table(tableIn string) (*Table, error) { m.mu.RLock() defer m.mu.RUnlock() - // u.Debugf("%p looking up %q", m, tableName) - tbl, ok := m.tableMap[tableName] if ok && tbl != nil { return tbl, nil } // Lets see if it is `schema`.`table` format - _, tableName, ok = expr.LeftRight(tableName) + ns, tableName, ok := expr.LeftRight(tableName) if ok { + if m.Name != ns { + return nil, fmt.Errorf("Could not find that table: %v", tableIn) + } tbl, ok = m.tableMap[tableName] if ok && tbl != nil { return tbl, nil @@ -243,6 +230,7 @@ func (m *Schema) OpenConn(tableName string) (Conn, error) { defer m.mu.RUnlock() sch, ok := m.tableSchemas[tableName] if !ok || sch == nil || sch.DS == nil { + //u.WarnT(10) return nil, fmt.Errorf("Could not find a DataSource for that table %q", tableName) } @@ -291,181 +279,227 @@ func (m *Schema) SchemaForTable(tableName string) (*Schema, error) { return nil, ErrNotFound } +// Equal check deep equality. +func (m *Schema) Equal(s *Schema) bool { + if m == nil && s == nil { + u.Warnf("wtf1") + return true + } + if m == nil && s != nil { + u.Warnf("wtf2") + return false + } + if m != nil && s == nil { + u.Warnf("wtf3") + return false + } + if m.Name != s.Name { + u.Warnf("name %q != %q", m.Name, s.Name) + return false + } + if len(m.tableNames) != len(s.tableNames) { + return false + } + if len(m.tableMap) != len(s.tableMap) { + return false + } + for k, mt := range m.tableMap { + if st, ok := s.tableMap[k]; !ok || !mt.Equal(st) { + return false + } + } + return true +} + +// Marshal this Schema as protobuf +func (m *Schema) Marshal() ([]byte, error) { + m.SchemaPb.Tables = make(map[string]*TablePb, len(m.tableMap)) + //u.Debugf("tableMap: %#v", m) + for k, t := range m.tableMap { + m.SchemaPb.Tables[k] = &t.TablePb + u.Infof("%p source=%T table %#v", t, t.Source, t.TablePb) + u.Infof("%#v", t.Fields) + u.Infof("table cols? %#v", t) + for _, f := range t.TablePb.Fieldpbs { + u.Debugf("%q %+v", t.Name, f) + } + } + if m.Conf == nil { + m.Conf = &ConfigSource{} + if m.DS != nil { + m.Conf.Type = m.DS.Type() + } + } + u.Warnf("schema tables %#v", m.SchemaPb.Tables) + return proto.Marshal(&m.SchemaPb) +} + +// Unmarshal the protobuf bytes into a Schema. +func (m *Schema) Unmarshal(data []byte) error { + //u.Infof("in Schema Unmarshal %s", string(data)) + m.initMaps() + err := proto.Unmarshal(data, &m.SchemaPb) + if err != nil { + u.Errorf("%v", err) + return err + } + /* + Schema struct { + SchemaPb + Conf *ConfigSource // source configuration + DS Source // This datasource Interface + InfoSchema *Schema // represent this Schema as sql schema like "information_schema" + SchemaRef *Schema // IF this is infoschema, the schema it refers to + parent *Schema // parent schema (optional) if nested. + schemas map[string]*Schema // map[schema-name]:Children Schemas + tableSchemas map[string]*Schema // Tables to schema map for parent/child + tableMap map[string]*Table // Tables and their field info, flattened from all child schemas + tableNames []string // List Table names, flattened all schemas into one list + mu sync.RWMutex // lock for schema mods + } + */ + + for k, tbl := range m.SchemaPb.Tables { + m.tableNames = append(m.tableNames, k) + t := &Table{TablePb: *tbl} + t.initPb() + m.tableMap[k] = t + u.Infof("found table %v", k) + } + + return nil +} + +// Discovery is introspect tables in sources to create schema. +func (m *Schema) Discovery() error { + m.mu.Lock() + defer m.mu.Unlock() + return m.refreshSchemaUnlocked() +} + // addChildSchema add a child schema to this one. Schemas can be tree-in-nature // with schema of multiple backend datasources being combined into parent Schema, but each // child has their own unique defined schema. -func (m *Schema) addChildSchema(child *Schema) { +func (m *Schema) addChildSchema(child *Schema) error { m.mu.Lock() defer m.mu.Unlock() m.schemas[child.Name] = child child.parent = m child.mu.RLock() defer child.mu.RUnlock() - for tableName, tbl := range child.tableMap { - m.tableSchemas[tableName] = child - m.tableMap[tableName] = tbl + for _, tbl := range child.tableMap { + if err := m.addTable(tbl); err != nil { + return err + } } + return nil } -/* -// AddSchemaForTable add table. -func (m *Schema) addSchemaForTable(tableName string, ss *Schema) { - m.mu.Lock() - defer m.mu.Unlock() - m.addschemaForTableUnlocked(tableName, ss) -} -*/ -func (m *Schema) refreshSchemaUnlocked() { - - m.lastRefreshed = time.Now() +func (m *Schema) refreshSchemaUnlocked() error { if m.DS != nil { for _, tableName := range m.DS.Tables() { //u.Debugf("%p:%s DS T:%T table name %s", m, m.Name, m.DS, tableName) - m.addschemaForTableUnlocked(tableName, m) + if err := m.loadTable(tableName); err != nil { + if tableName == "columns" { + continue + } + u.Errorf("Could not load table %q err=%v", tableName, err) + return err + } } } for _, ss := range m.schemas { //u.Infof("schema %p:%s", ss, ss.Name) - ss.refreshSchemaUnlocked() - for _, tableName := range ss.Tables() { - //tbl := ss.tableMap[tableName] - //u.Debugf("s:%p ss:%p add table name %s tbl:%#v", m, ss, tableName, tbl) - m.addschemaForTableUnlocked(tableName, ss) - } - } -} - -func (m *Schema) dropTable(tbl *Table) error { - - // u.Warnf("%p drop %s %v", m, m.Name, m.Tables()) - //u.Infof("infoschema %#v", m.InfoSchema) - - tl := make([]string, 0, len(m.tableNames)) - for _, tn := range m.tableNames { - if tbl.Name != tn { - tl = append(tl, tn) + if err := ss.refreshSchemaUnlocked(); err != nil { + u.Errorf("Could not load schema %q err=%v", ss.Name, err) + return err } - } - - ts := m.tableSchemas[tbl.Name] - if ts != nil { - if as, ok := ts.DS.(Alter); ok { - if err := as.DropTable(tbl.Name); err != nil { - u.Errorf("could not drop table %v err=%v", tbl.Name, err) + for tableName, tbl := range ss.tableMap { + //u.Debugf("s:%p ss:%p add table name %s tbl:%#v", m, ss, tableName, tbl) + if err := m.addTable(tbl); err != nil { + if tableName == "columns" { + continue + } + u.Errorf("Could not load table %q err=%v", tableName, err) return err } } } - - delete(m.tableMap, tbl.Name) - delete(m.tableSchemas, tbl.Name) - m.tableNames = tl - - if salter, ok := m.InfoSchema.DS.(Alter); ok { - err := salter.DropTable(tbl.Name) - if err != nil { - u.Warnf("err %v", err) - return err - } - } - return nil } func (m *Schema) addTable(tbl *Table) error { - // u.Debugf("schema:%p AddTable %#v", m, tbl) - - // create consistent-hash-id of this table name, and or table+schema - hash := fnv.New64() - hash.Write([]byte(tbl.Name)) - tbl.tblID = hash.Sum64() + //u.Infof("table P %p add table: %v partitionct:%v conf:%+v cols:%v", tbl, tbl.Name, tbl.PartitionCt, m.Conf, tbl.Columns()) - // Assign partitions - if m.Conf != nil && m.Conf.PartitionCt > 0 { - tbl.PartitionCt = uint32(m.Conf.PartitionCt) - } else if m.Conf != nil { - for _, pt := range m.Conf.Partitions { - if tbl.Name == pt.Table && tbl.Partition == nil { - tbl.Partition = pt - } - } + if err := tbl.init(m); err != nil { + u.Warnf("could not init table %v err=%v", tbl, err) + return err } - //u.Infof("add table: %v partitionct:%v conf:%+v", tbl.Name, tbl.PartitionCt, m.Conf) - tbl.init(m) - - m.tableMap[tbl.Name] = tbl - - m.addschemaForTableUnlocked(tbl.Name, tbl.Schema) - return nil -} - -func (m *Schema) addschemaForTableUnlocked(tableName string, ss *Schema) { - found := false - for _, curTableName := range m.tableNames { - if tableName == curTableName { - found = true - } - } - if !found { - // u.Debugf("%p:%s Schema addschemaForTableUnlocked %q ", m, m.Name, tableName) - m.tableNames = append(m.tableNames, tableName) + if _, exists := m.tableMap[tbl.Name]; !exists { + m.tableNames = append(m.tableNames, tbl.Name) sort.Strings(m.tableNames) - tbl := ss.tableMap[tableName] - if tbl == nil { - if err := m.loadTable(tableName); err != nil { - switch tableName { - case "columns": - // ignoreable errors - default: - u.Debugf("could not load table %v", err) - } - return - } else { - tbl = ss.tableMap[tableName] - } - } - if _, ok := m.tableMap[tableName]; !ok { - m.tableSchemas[tableName] = ss - m.tableMap[tableName] = tbl - } } + m.tableMap[tbl.Name] = tbl + m.tableSchemas[tbl.Name] = m + return nil } func (m *Schema) loadTable(tableName string) error { - // u.Infof("%p schema.%v loadTable(%q)", m, m.Name, tableName) + //u.Infof("%p schema.%v loadTable(%q)", m, m.Name, tableName) if m.DS == nil { + u.Warnf("no DS for %q", tableName) return nil } + // Getting table from Source will ensure the table-schema is fresh/good tbl, err := m.DS.Table(tableName) if err != nil { - if tableName == "tables" { - return err - } + //u.Warnf("could not get table %q", tableName) return err } if tbl == nil { + u.Warnf("empty table %q", tableName) return ErrNotFound } - tbl.Schema = m + //u.Warnf("DS T: %T table=%q tablePB: %#v", m.DS, tbl.Name, tbl.TablePb.Fieldpbs) + return m.addTable(tbl) +} - // Add partitions - if m.Conf != nil { - for _, tp := range m.Conf.Partitions { - if tp.Table == tableName { - tbl.Partition = tp +func (m *Schema) dropTable(tbl *Table) error { + + ts := m.tableSchemas[tbl.Name] + if ts != nil { + if as, ok := ts.DS.(Alter); ok { + if err := as.DropTable(tbl.Name); err != nil { + u.Errorf("could not drop table %v err=%v", tbl.Name, err) + return err } } } - m.tableMap[tbl.Name] = tbl - m.tableSchemas[tbl.Name] = m + delete(m.tableMap, tbl.Name) + delete(m.tableSchemas, tbl.Name) + tl := make([]string, 0, len(m.tableNames)) + for tn, _ := range m.tableMap { + tl = append(tl, tn) + } + m.tableNames = tl + sort.Strings(m.tableNames) + + if salter, ok := m.InfoSchema.DS.(Alter); ok { + err := salter.DropTable(tbl.Name) + if err != nil { + u.Warnf("err %v", err) + return err + } + } + return nil } @@ -480,11 +514,25 @@ func NewTable(table string) *Table { Fields: make([]*Field, 0), FieldMap: make(map[string]*Field), } - t.init(nil) return t } -func (m *Table) init(s *Schema) { +func (m *Table) init(s *Schema) error { m.Schema = s + if s == nil { + u.Warnf("No Schema for table %q?", m.Name) + return nil + } + // Assign partitions + if s.Conf != nil && s.Conf.PartitionCt > 0 { + m.PartitionCt = uint32(s.Conf.PartitionCt) + } else if s.Conf != nil { + for _, pt := range s.Conf.Partitions { + if m.Name == pt.Table && m.Partition == nil { + m.Partition = pt + } + } + } + return nil } // HasField does this table have given field/column? @@ -505,12 +553,6 @@ func (m *Table) FieldsAsMessages() []Message { return msgs } -// Id satisifieds Message Interface -func (m *Table) Id() uint64 { return m.tblID } - -// Body satisifies Message Interface -func (m *Table) Body() interface{} { return m } - // AddField register a new field func (m *Table) AddField(fld *Field) { found := false @@ -522,15 +564,20 @@ func (m *Table) AddField(fld *Field) { } } if !found { - fld.idx = uint64(len(m.Fields)) + fld.Position = uint64(len(m.Fields)) m.Fields = append(m.Fields, fld) + m.Fieldpbs = append(m.Fieldpbs, &fld.FieldPb) } m.FieldMap[fld.Name] = fld + // Fieldpbs + } // AddFieldType describe and register a new column func (m *Table) AddFieldType(name string, valType value.ValueType) { - m.AddField(&Field{FieldPb: FieldPb{Type: uint32(valType), Name: name}}) + // NewFieldBase(name string, valType value.ValueType, size int, desc string) + // &Field{FieldPb: FieldPb{Type: uint32(valType), Name: name}} + m.AddField(NewFieldBase(name, valType, 255, name)) } // Column get the Underlying data type. @@ -592,36 +639,498 @@ func (m *Table) SetRows(rows [][]driver.Value) { // FieldNamesPositions List of Field Names and ordinal position in Column list func (m *Table) FieldNamesPositions() map[string]int { return m.FieldPositions } -// Current Is this schema object current? ie, have we refreshed it from -// source since refresh interval. -func (m *Table) Current() bool { return m.Since(SchemaRefreshInterval) } - -// SetRefreshed update the refreshed date to now. -func (m *Table) SetRefreshed() { m.lastRefreshed = time.Now() } +// AddContext add key/value pairs to context (settings, metatadata). +func (m *Table) AddContext(key, value string) { + if len(m.Context) == 0 { + m.Context = make(map[string]string) + } + m.Context[key] = value +} -// Since Is this schema object within time window described by @dur time ago ? -func (m *Table) Since(dur time.Duration) bool { - if m.lastRefreshed.IsZero() { +// Equal deep equality check for Table. +func (m *Table) Equal(t *Table) bool { + if m == nil && t == nil { + u.Warnf("wtf1") + return true + } + if m == nil && t != nil { + u.Warnf("wtf2") return false } - if m.lastRefreshed.After(time.Now().Add(dur)) { - return true + if m != nil && t == nil { + u.Warnf("wtf3") + return false } - return false + if len(m.cols) != len(t.cols) { + u.Warnf("wtf4") + return false + } + for i, col := range m.cols { + if t.cols[i] != col { + u.Warnf("wtf4b") + return false + } + } + if (m.Source != nil && t.Source == nil) || (m.Source == nil && t.Source != nil) { + if fmt.Sprintf("%T", m.Source) != fmt.Sprintf("%T", t.Source) { + u.Warnf("wtf5 source type") + } + u.Warnf("wtf5") + return false + } + /* + Table struct { + TablePb + Fields []*Field // List of Fields, in order + FieldPositions map[string]int // Maps name of column to ordinal position in array of []driver.Value's + FieldMap map[string]*Field // Map of Field-name -> Field + Schema *Schema // The schema this is member of + Source Source // The source + cols []string // array of column names + rows [][]driver.Value + } + */ + if len(m.Fields) != len(t.Fields) { + u.Warnf("wtf8") + return false + } + for i, f := range m.Fields { + if !f.Equal(t.Fields[i]) { + u.Warnf("wtf8b") + return false + } + } + if len(m.FieldPositions) != len(t.FieldPositions) { + u.Warnf("wtf9") + return false + } + for k, v := range m.FieldPositions { + if t.FieldPositions[k] != v { + u.Warnf("wtf9b") + return false + } + } + if len(m.FieldMap) != len(t.FieldMap) { + u.Warnf("wtf10") + return false + } + if !m.TablePb.Equal(&t.TablePb) { + return false + } + return true } -// AddContext add key/value pairs to context (settings, metatadata). -func (m *Table) AddContext(key string, value interface{}) { - if len(m.Context) == 0 { - m.Context = make(map[string]interface{}) +// Equal deep equality check for TablePb. +func (m *TablePb) Equal(t *TablePb) bool { + if m == nil && t == nil { + u.Warnf("wtf1") + return true } - m.Context[key] = value + if m == nil && t != nil { + u.Warnf("wtf2") + return false + } + if m != nil && t == nil { + u.Warnf("wtf3") + return false + } + if m.Name != t.Name { + u.Warnf("name %q != %q", m.Name, t.Name) + return false + } + if m.NameOriginal != t.NameOriginal { + u.Warnf("NameOriginal %q != %q", m.NameOriginal, t.NameOriginal) + return false + } + if m.Parent != t.Parent { + u.Warnf("Parent %q != %q", m.Parent, t.Parent) + return false + } + if m.Charset != t.Charset { + u.Warnf("Charset %q != %q", m.Charset, t.Charset) + return false + } + if !m.Partition.Equal(t.Partition) { + u.Warnf("partion") + return false + } + if m.Charset != t.Charset { + u.Warnf("Charset %q != %q", m.Charset, t.Charset) + return false + } + if m.PartitionCt != t.PartitionCt { + u.Warnf("PartitionCt %q != %q", m.PartitionCt, t.PartitionCt) + return false + } + if len(m.Indexes) != len(t.Indexes) { + return false + } + for i, idx := range m.Indexes { + if !idx.Equal(t.Indexes[i]) { + return false + } + } + if len(m.Context) != len(t.Context) { + return false + } + for k, mv := range m.Context { + if tv, ok := t.Context[k]; !ok || mv != tv { + return false + } + } + /* + type TablePb struct { + // Name of table lowercased + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name of table (not lowercased) + NameOriginal string `protobuf:"bytes,2,opt,name=nameOriginal,proto3" json:"nameOriginal,omitempty"` + // some dbs are more hiearchical (table-column-family) + Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` + // Character set, default = utf8 + Charset uint32 `protobuf:"varint,4,opt,name=charset,proto3" json:"charset,omitempty"` + // Partitions in this table, optional may be empty + Partition *TablePartition `protobuf:"bytes,5,opt,name=partition" json:"partition,omitempty"` + // Partition Count + PartitionCt uint32 `protobuf:"varint,6,opt,name=PartitionCt,proto3" json:"PartitionCt,omitempty"` + // List of indexes for this table + Indexes []*Index `protobuf:"bytes,7,rep,name=indexes" json:"indexes,omitempty"` + // context is additional arbitrary map values + Context map[string]string `protobuf:"bytes,8,rep,name=context" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // List of Fields, in order + Fieldpbs []*FieldPb `protobuf:"bytes,9,rep,name=fieldpbs" json:"fieldpbs,omitempty"` + } + */ + if len(m.Fieldpbs) != len(t.Fieldpbs) { + u.Warnf("Fieldpbs") + return false + } + for i, f := range m.Fieldpbs { + if !f.Equal(t.Fieldpbs[i]) { + return false + } + } + return true } +// Marshal this Table as protobuf func (m *Table) Marshal() ([]byte, error) { + sourceType := "" + if m.Source != nil { + sourceType = m.Source.Type() + } + u.Warnf("Table.Marshal() %q source.Type=%q fieldpbct=%v cols=%v", m.Name, sourceType, len(m.TablePb.Fieldpbs), m.cols) return proto.Marshal(&m.TablePb) } +// Unmarshal this protbuf bytes into a Table +func (m *Table) Unmarshal(data []byte) error { + if err := proto.Unmarshal(data, &m.TablePb); err != nil { + return err + } + return m.initPb() +} + +func (m *Table) initPb() error { + /* + Table struct { + TablePb + Fields []*Field // List of Fields, in order + FieldPositions map[string]int // Maps name of column to ordinal position in array of []driver.Value's + FieldMap map[string]*Field // Map of Field-name -> Field + Schema *Schema // The schema this is member of + Source Source // The source + cols []string // array of column names + rows [][]driver.Value + } + */ + m.cols = make([]string, len(m.Fieldpbs)) + m.Fields = make([]*Field, len(m.Fieldpbs)) + m.FieldPositions = make(map[string]int, len(m.Fieldpbs)) + m.FieldMap = make(map[string]*Field, len(m.Fieldpbs)) + u.Warnf("initpb unmarshal %v", len(m.Fieldpbs)) + for i, f := range m.Fieldpbs { + m.Fields[i] = &Field{FieldPb: *f} + m.FieldPositions[f.Name] = int(f.Position) + m.FieldMap[f.Name] = m.Fields[i] + m.cols[int(f.Position)] = f.Name + } + + return nil +} +func (m *Table) initSchema(s *Schema) error { + /* + Table struct { + TablePb + Fields []*Field // List of Fields, in order + FieldPositions map[string]int // Maps name of column to ordinal position in array of []driver.Value's + FieldMap map[string]*Field // Map of Field-name -> Field + Schema *Schema // The schema this is member of + Source Source // The source + cols []string // array of column names + rows [][]driver.Value + } + */ + return nil +} + +/* +type TablePartition struct { + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + Keys []string `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"` + Partitions []*Partition `protobuf:"bytes,3,rep,name=partitions" json:"partitions,omitempty"` +} +*/ +// Equal deep equality check for TablePartition. +func (m *TablePartition) Equal(t *TablePartition) bool { + if m == nil && t == nil { + return true + } + if m == nil && t != nil { + u.Warnf("wtf2") + return false + } + if m != nil && t == nil { + u.Warnf("wtf3") + return false + } + if m.Table != t.Table { + u.Warnf("Table %q != %q", m.Table, t.Table) + return false + } + if len(m.Keys) != len(t.Keys) { + u.Warnf("Keys") + return false + } + for i, k := range m.Keys { + if t.Keys[i] != k { + u.Warnf("keys %d != %v", i, k) + return false + } + } + if len(m.Partitions) != len(t.Partitions) { + return false + } + for i, p := range m.Partitions { + if !p.Equal(t.Partitions[i]) { + return false + } + } + return true +} + +/* +type Partition struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Left string `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"` + Right string `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"` +} +*/ +// Equal deep equality check for Partition. +func (m *Partition) Equal(t *Partition) bool { + if m == nil && t == nil { + return true + } + if m == nil && t != nil { + u.Warnf("wtf2") + return false + } + if m != nil && t == nil { + u.Warnf("wtf3") + return false + } + if m.Id != t.Id { + u.Warnf("Id %q != %q", m.Id, t.Id) + return false + } + if m.Left != t.Left { + u.Warnf("Left %q != %q", m.Left, t.Left) + return false + } + if m.Right != t.Right { + u.Warnf("Right %q != %q", m.Right, t.Right) + return false + } + return true +} + +/* +type Index struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Fields []string `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` + PrimaryKey bool `protobuf:"varint,3,opt,name=primaryKey,proto3" json:"primaryKey,omitempty"` + HashPartition []string `protobuf:"bytes,4,rep,name=hashPartition" json:"hashPartition,omitempty"` + PartitionSize int32 `protobuf:"varint,5,opt,name=partitionSize,proto3" json:"partitionSize,omitempty"` +} +*/ +// Equal deep equality check for Partition. +func (m *Index) Equal(t *Index) bool { + if m == nil && t == nil { + return true + } + if m == nil && t != nil { + u.Warnf("wtf2") + return false + } + if m != nil && t == nil { + u.Warnf("wtf3") + return false + } + if m.Name != t.Name { + u.Warnf("Name %q != %q", m.Name, t.Name) + return false + } + if m.PrimaryKey != t.PrimaryKey { + u.Warnf("PrimaryKey %v != %v", m.PrimaryKey, t.PrimaryKey) + return false + } + if m.PartitionSize != t.PartitionSize { + u.Warnf("PartitionSize %v != %v", m.PartitionSize, t.PartitionSize) + return false + } + if len(m.Fields) != len(t.Fields) { + u.Warnf("Fields") + return false + } + for i, k := range m.Fields { + if t.Fields[i] != k { + u.Warnf("Fields %d != %v", i, k) + return false + } + } + if len(m.HashPartition) != len(t.HashPartition) { + u.Warnf("HashPartition") + return false + } + for i, k := range m.HashPartition { + if t.HashPartition[i] != k { + u.Warnf("HashPartition %d != %v", i, k) + return false + } + } + return true +} + +func (m *FieldPb) Equal(f *FieldPb) bool { + /* + type FieldPb struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Extra string `protobuf:"bytes,4,opt,name=extra,proto3" json:"extra,omitempty"` + Data string `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + Length uint32 `protobuf:"varint,6,opt,name=length,proto3" json:"length,omitempty"` + Type uint32 `protobuf:"varint,7,opt,name=type,proto3" json:"type,omitempty"` + NativeType uint32 `protobuf:"varint,8,opt,name=nativeType,proto3" json:"nativeType,omitempty"` + DefLength uint64 `protobuf:"varint,9,opt,name=defLength,proto3" json:"defLength,omitempty"` + DefVal []byte `protobuf:"bytes,11,opt,name=defVal,proto3" json:"defVal,omitempty"` + Indexed bool `protobuf:"varint,13,opt,name=indexed,proto3" json:"indexed,omitempty"` + NoNulls bool `protobuf:"varint,14,opt,name=noNulls,proto3" json:"noNulls,omitempty"` + Collation string `protobuf:"bytes,15,opt,name=collation,proto3" json:"collation,omitempty"` + Roles []string `protobuf:"bytes,16,rep,name=roles" json:"roles,omitempty"` + Indexes []*Index `protobuf:"bytes,17,rep,name=indexes" json:"indexes,omitempty"` + // context is additional arbitrary map values + Context map[string]string `protobuf:"bytes,18,rep,name=context" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Position uint64 `protobuf:"varint,19,opt,name=position,proto3" json:"position,omitempty"` + } + */ + if m == nil && f == nil { + u.Warnf("wtf nil fields?") + return true + } + if m == nil && f != nil { + u.Warnf("wtf2") + return false + } + if m != nil && f == nil { + u.Warnf("wtf3") + return false + } + if m.Name != f.Name { + u.Warnf("name %q != %q", m.Name, f.Name) + return false + } + if m.Description != f.Description { + u.Warnf("Description") + return false + } + if m.Key != f.Key { + u.Warnf("Key") + return false + } + if m.Extra != f.Extra { + u.Warnf("Type") + return false + } + if m.Data != f.Data { + u.Warnf("Data") + return false + } + if m.Length != f.Length { + u.Warnf("Length") + return false + } + if m.Type != f.Type { + u.Warnf("Type") + return false + } + if m.NativeType != f.NativeType { + u.Warnf("NativeType") + return false + } + if m.DefLength != f.DefLength { + u.Warnf("DefLength") + return false + } + if !bytes.Equal(m.DefVal, f.DefVal) { + u.Warnf("DefVal") + return false + } + if m.Indexed != f.Indexed { + u.Warnf("Indexed") + return false + } + if m.NoNulls != f.NoNulls { + u.Warnf("NoNulls") + return false + } + if m.Collation != f.Collation { + u.Warnf("Collation") + return false + } + if len(m.Roles) != len(f.Roles) { + u.Warnf("Roles") + return false + } + for i, k := range m.Roles { + if f.Roles[i] != k { + u.Warnf("Roles %d != %v", i, k) + return false + } + } + if len(m.Indexes) != len(f.Indexes) { + return false + } + for i, idx := range m.Indexes { + if !idx.Equal(f.Indexes[i]) { + return false + } + } + if len(m.Context) != len(f.Context) { + return false + } + for k, mv := range m.Context { + if fv, ok := f.Context[k]; !ok || mv != fv { + return false + } + } + if m.Position != f.Position { + u.Warnf("Position") + return false + } + return true +} + +// NewFieldBase create a new field with base attributes. func NewFieldBase(name string, valType value.ValueType, size int, desc string) *Field { f := FieldPb{ Name: name, @@ -632,6 +1141,8 @@ func NewFieldBase(name string, valType value.ValueType, size int, desc string) * } return &Field{FieldPb: f} } + +// NewField creates new field with more attributes. func NewField(name string, valType value.ValueType, size int, allowNulls bool, defaultVal driver.Value, key, collation, description string) *Field { jb, _ := json.Marshal(defaultVal) f := FieldPb{ @@ -651,7 +1162,7 @@ func NewField(name string, valType value.ValueType, size int, allowNulls bool, d } } func (m *Field) ValueType() value.ValueType { return value.ValueType(m.Type) } -func (m *Field) Id() uint64 { return m.idx } +func (m *Field) Id() uint64 { return m.Position } func (m *Field) Body() interface{} { return m } func (m *Field) AsRow() []driver.Value { if len(m.row) > 0 { @@ -670,12 +1181,40 @@ func (m *Field) AsRow() []driver.Value { m.row[8] = m.Description // should we put native type in here? return m.row } -func (m *Field) AddContext(key string, value interface{}) { +func (m *Field) AddContext(key, value string) { if len(m.Context) == 0 { - m.Context = make(map[string]interface{}) + m.Context = make(map[string]string) } m.Context[key] = value } +func (m *Field) Equal(f *Field) bool { + if m == nil && f == nil { + u.Warnf("wtf1") + return true + } + if m == nil && f != nil { + u.Warnf("wtf2") + return false + } + if m != nil && f == nil { + u.Warnf("wtf3") + return false + } + if !m.FieldPb.Equal(&f.FieldPb) { + return false + } + return true +} +func (m *Field) Marshal() ([]byte, error) { + return proto.Marshal(&m.FieldPb) +} +func (m *Field) Unmarshal(data []byte) error { + err := proto.Unmarshal(data, &m.FieldPb) + if err != nil { + return err + } + return nil +} func (m *Field) String() string { return fmt.Sprintf("%s type=%s", m.Name, value.ValueType(m.Type).String()) } @@ -708,11 +1247,7 @@ func NewDescribeHeaders() []*Field { func NewSourceConfig(name, sourceType string) *ConfigSource { return &ConfigSource{ - Name: name, - SourceType: sourceType, + Name: name, + Type: sourceType, } } - -func (m *ConfigSource) String() string { - return fmt.Sprintf(``, m.Name, m.SourceType, m.Settings) -} diff --git a/schema/schema.pb.go b/schema/schema.pb.go index b0481a01..bb883dd4 100644 --- a/schema/schema.pb.go +++ b/schema/schema.pb.go @@ -1,24 +1,32 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: schema.proto /* -Package schema is a generated protocol buffer package. + Package schema is a generated protocol buffer package. -It is generated from these files: - schema.proto + It is generated from these files: + schema.proto -It has these top-level messages: - TablePartition - Partition - TablePb - FieldPb - Index + It has these top-level messages: + Command + SchemaPb + TablePartition + Partition + TablePb + FieldPb + Index + ConfigSchema + ConfigSource + ConfigNode */ package schema import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -31,12 +39,140 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type Command_Operation int32 + +const ( + Command_Unknown Command_Operation = 0 + Command_AddUpdate Command_Operation = 1 + Command_Drop Command_Operation = 2 +) + +var Command_Operation_name = map[int32]string{ + 0: "Unknown", + 1: "AddUpdate", + 2: "Drop", +} +var Command_Operation_value = map[string]int32{ + "Unknown": 0, + "AddUpdate": 1, + "Drop": 2, +} + +func (x Command_Operation) String() string { + return proto.EnumName(Command_Operation_name, int32(x)) +} +func (Command_Operation) EnumDescriptor() ([]byte, []int) { return fileDescriptorSchema, []int{0, 0} } + +// Command defines a Schema Replication command/message such as +// Drop, Alter, Create-Schema, etc. Used to replicate schema changes +// across servers. +type Command struct { + Op Command_Operation `protobuf:"varint,1,opt,name=op,proto3,enum=schema.Command_Operation" json:"op,omitempty"` + Origin string `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin,omitempty"` + Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Index uint64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Ts int64 `protobuf:"varint,6,opt,name=ts,proto3" json:"ts,omitempty"` + Msg []byte `protobuf:"bytes,7,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *Command) Reset() { *m = Command{} } +func (m *Command) String() string { return proto.CompactTextString(m) } +func (*Command) ProtoMessage() {} +func (*Command) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{0} } + +func (m *Command) GetOp() Command_Operation { + if m != nil { + return m.Op + } + return Command_Unknown +} + +func (m *Command) GetOrigin() string { + if m != nil { + return m.Origin + } + return "" +} + +func (m *Command) GetSchema() string { + if m != nil { + return m.Schema + } + return "" +} + +func (m *Command) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Command) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Command) GetTs() int64 { + if m != nil { + return m.Ts + } + return 0 +} + +func (m *Command) GetMsg() []byte { + if m != nil { + return m.Msg + } + return nil +} + +// SchemaPb defines the fields that define schema attributes, and +// can be serialized. +type SchemaPb struct { + // Name of schema lowercased + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Tables is map of tables + Tables map[string]*TablePb `protobuf:"bytes,2,rep,name=tables" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + // source configuration + Conf *ConfigSource `protobuf:"bytes,3,opt,name=conf" json:"conf,omitempty"` +} + +func (m *SchemaPb) Reset() { *m = SchemaPb{} } +func (m *SchemaPb) String() string { return proto.CompactTextString(m) } +func (*SchemaPb) ProtoMessage() {} +func (*SchemaPb) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{1} } + +func (m *SchemaPb) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SchemaPb) GetTables() map[string]*TablePb { + if m != nil { + return m.Tables + } + return nil +} + +func (m *SchemaPb) GetConf() *ConfigSource { + if m != nil { + return m.Conf + } + return nil +} + // Partition describes a range of data (in a Table). // left-key is contained in this partition // right key is not contained in this partition, in the next partition. // So any value >= left-key, and < right-key is contained herein. type TablePartition struct { - Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` Keys []string `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"` Partitions []*Partition `protobuf:"bytes,3,rep,name=partitions" json:"partitions,omitempty"` } @@ -44,7 +180,7 @@ type TablePartition struct { func (m *TablePartition) Reset() { *m = TablePartition{} } func (m *TablePartition) String() string { return proto.CompactTextString(m) } func (*TablePartition) ProtoMessage() {} -func (*TablePartition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*TablePartition) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{2} } func (m *TablePartition) GetTable() string { if m != nil { @@ -71,15 +207,15 @@ func (m *TablePartition) GetPartitions() []*Partition { // the left-key is contained in this partition // the right key is not contained in this partition, in the next one type Partition struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Left string `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` - Right string `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Left string `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"` + Right string `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"` } func (m *Partition) Reset() { *m = Partition{} } func (m *Partition) String() string { return proto.CompactTextString(m) } func (*Partition) ProtoMessage() {} -func (*Partition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Partition) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{3} } func (m *Partition) GetId() string { if m != nil { @@ -102,23 +238,25 @@ func (m *Partition) GetRight() string { return "" } +// TablePb defines the fields that define table attributes, and +// can be serialized. type TablePb struct { // Name of table lowercased - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Name of table (not lowercased) - NameOriginal string `protobuf:"bytes,2,opt,name=nameOriginal" json:"nameOriginal,omitempty"` + NameOriginal string `protobuf:"bytes,2,opt,name=nameOriginal,proto3" json:"nameOriginal,omitempty"` // some dbs are more hiearchical (table-column-family) - Parent string `protobuf:"bytes,3,opt,name=parent" json:"parent,omitempty"` + Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` // Character set, default = utf8 - Charset uint32 `protobuf:"varint,4,opt,name=Charset" json:"Charset,omitempty"` + Charset uint32 `protobuf:"varint,4,opt,name=charset,proto3" json:"charset,omitempty"` // Partitions in this table, optional may be empty Partition *TablePartition `protobuf:"bytes,5,opt,name=partition" json:"partition,omitempty"` // Partition Count - PartitionCt uint32 `protobuf:"varint,6,opt,name=PartitionCt" json:"PartitionCt,omitempty"` + PartitionCt uint32 `protobuf:"varint,6,opt,name=PartitionCt,proto3" json:"PartitionCt,omitempty"` // List of indexes for this table Indexes []*Index `protobuf:"bytes,7,rep,name=indexes" json:"indexes,omitempty"` - // context json bytes - ContextJson []byte `protobuf:"bytes,8,opt,name=contextJson,proto3" json:"contextJson,omitempty"` + // context is additional arbitrary map values + Context map[string]string `protobuf:"bytes,8,rep,name=context" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // List of Fields, in order Fieldpbs []*FieldPb `protobuf:"bytes,9,rep,name=fieldpbs" json:"fieldpbs,omitempty"` } @@ -126,7 +264,7 @@ type TablePb struct { func (m *TablePb) Reset() { *m = TablePb{} } func (m *TablePb) String() string { return proto.CompactTextString(m) } func (*TablePb) ProtoMessage() {} -func (*TablePb) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*TablePb) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{4} } func (m *TablePb) GetName() string { if m != nil { @@ -177,9 +315,9 @@ func (m *TablePb) GetIndexes() []*Index { return nil } -func (m *TablePb) GetContextJson() []byte { +func (m *TablePb) GetContext() map[string]string { if m != nil { - return m.ContextJson + return m.Context } return nil } @@ -191,29 +329,33 @@ func (m *TablePb) GetFieldpbs() []*FieldPb { return nil } +// FieldPb defines attributes of a field/column that can +// be serialized and transported. type FieldPb struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` - Extra string `protobuf:"bytes,4,opt,name=extra" json:"extra,omitempty"` - Data string `protobuf:"bytes,5,opt,name=data" json:"data,omitempty"` - Length uint32 `protobuf:"varint,6,opt,name=length" json:"length,omitempty"` - Type uint32 `protobuf:"varint,7,opt,name=type" json:"type,omitempty"` - NativeType uint32 `protobuf:"varint,8,opt,name=nativeType" json:"nativeType,omitempty"` - DefLength uint64 `protobuf:"varint,9,opt,name=defLength" json:"defLength,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Extra string `protobuf:"bytes,4,opt,name=extra,proto3" json:"extra,omitempty"` + Data string `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + Length uint32 `protobuf:"varint,6,opt,name=length,proto3" json:"length,omitempty"` + Type uint32 `protobuf:"varint,7,opt,name=type,proto3" json:"type,omitempty"` + NativeType uint32 `protobuf:"varint,8,opt,name=nativeType,proto3" json:"nativeType,omitempty"` + DefLength uint64 `protobuf:"varint,9,opt,name=defLength,proto3" json:"defLength,omitempty"` DefVal []byte `protobuf:"bytes,11,opt,name=defVal,proto3" json:"defVal,omitempty"` - Indexed bool `protobuf:"varint,13,opt,name=indexed" json:"indexed,omitempty"` - NoNulls bool `protobuf:"varint,14,opt,name=noNulls" json:"noNulls,omitempty"` - Collation string `protobuf:"bytes,15,opt,name=collation" json:"collation,omitempty"` + Indexed bool `protobuf:"varint,13,opt,name=indexed,proto3" json:"indexed,omitempty"` + NoNulls bool `protobuf:"varint,14,opt,name=noNulls,proto3" json:"noNulls,omitempty"` + Collation string `protobuf:"bytes,15,opt,name=collation,proto3" json:"collation,omitempty"` Roles []string `protobuf:"bytes,16,rep,name=roles" json:"roles,omitempty"` Indexes []*Index `protobuf:"bytes,17,rep,name=indexes" json:"indexes,omitempty"` - ContextJson []byte `protobuf:"bytes,18,opt,name=contextJson,proto3" json:"contextJson,omitempty"` + // context is additional arbitrary map values + Context map[string]string `protobuf:"bytes,18,rep,name=context" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Position uint64 `protobuf:"varint,19,opt,name=position,proto3" json:"position,omitempty"` } func (m *FieldPb) Reset() { *m = FieldPb{} } func (m *FieldPb) String() string { return proto.CompactTextString(m) } func (*FieldPb) ProtoMessage() {} -func (*FieldPb) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*FieldPb) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{5} } func (m *FieldPb) GetName() string { if m != nil { @@ -320,26 +462,33 @@ func (m *FieldPb) GetIndexes() []*Index { return nil } -func (m *FieldPb) GetContextJson() []byte { +func (m *FieldPb) GetContext() map[string]string { if m != nil { - return m.ContextJson + return m.Context } return nil } +func (m *FieldPb) GetPosition() uint64 { + if m != nil { + return m.Position + } + return 0 +} + // Index a description of how field(s) should be indexed for a table. type Index struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Fields []string `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` - PrimaryKey bool `protobuf:"varint,3,opt,name=primaryKey" json:"primaryKey,omitempty"` + PrimaryKey bool `protobuf:"varint,3,opt,name=primaryKey,proto3" json:"primaryKey,omitempty"` HashPartition []string `protobuf:"bytes,4,rep,name=hashPartition" json:"hashPartition,omitempty"` - PartitionSize int32 `protobuf:"varint,5,opt,name=partitionSize" json:"partitionSize,omitempty"` + PartitionSize int32 `protobuf:"varint,5,opt,name=partitionSize,proto3" json:"partitionSize,omitempty"` } func (m *Index) Reset() { *m = Index{} } func (m *Index) String() string { return proto.CompactTextString(m) } func (*Index) ProtoMessage() {} -func (*Index) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Index) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{6} } func (m *Index) GetName() string { if m != nil { @@ -376,51 +525,4168 @@ func (m *Index) GetPartitionSize() int32 { return 0 } +// ConfigSchema is the config block for Schema, the data-sources +// that make up this Virtual Schema. Must have a name and list +// of sources to include. +type ConfigSchema struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Sources []string `protobuf:"bytes,2,rep,name=sources" json:"sources,omitempty"` +} + +func (m *ConfigSchema) Reset() { *m = ConfigSchema{} } +func (m *ConfigSchema) String() string { return proto.CompactTextString(m) } +func (*ConfigSchema) ProtoMessage() {} +func (*ConfigSchema) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{7} } + +func (m *ConfigSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ConfigSchema) GetSources() []string { + if m != nil { + return m.Sources + } + return nil +} + +// ConfigSource are backend datasources ie : storage/database/csvfiles +// Each represents a single source type/config. May belong to more +// than one schema. +type ConfigSource struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Schema string `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // TablesToLoad If not all tables from are going to be loaded. + TablesToLoad []string `protobuf:"bytes,4,rep,name=TablesToLoad" json:"tables_to_load"` + // convert underlying table names to friendly ones + TableAliases map[string]string `protobuf:"bytes,5,rep,name=tableAliases" json:"table_aliases" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Hosts []string `protobuf:"bytes,7,rep,name=hosts" json:"hosts,omitempty"` + Settings map[string]string `protobuf:"bytes,8,rep,name=settings" json:"settings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Partitions []*TablePartition `protobuf:"bytes,9,rep,name=partitions" json:"partitions,omitempty"` + PartitionCt uint32 `protobuf:"varint,10,opt,name=partitionCt,proto3" json:"partition_count"` +} + +func (m *ConfigSource) Reset() { *m = ConfigSource{} } +func (m *ConfigSource) String() string { return proto.CompactTextString(m) } +func (*ConfigSource) ProtoMessage() {} +func (*ConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{8} } + +func (m *ConfigSource) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ConfigSource) GetSchema() string { + if m != nil { + return m.Schema + } + return "" +} + +func (m *ConfigSource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ConfigSource) GetTablesToLoad() []string { + if m != nil { + return m.TablesToLoad + } + return nil +} + +func (m *ConfigSource) GetTableAliases() map[string]string { + if m != nil { + return m.TableAliases + } + return nil +} + +func (m *ConfigSource) GetHosts() []string { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *ConfigSource) GetSettings() map[string]string { + if m != nil { + return m.Settings + } + return nil +} + +func (m *ConfigSource) GetPartitions() []*TablePartition { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *ConfigSource) GetPartitionCt() uint32 { + if m != nil { + return m.PartitionCt + } + return 0 +} + +// ConfigNode are Servers/Services, ie a running instance of said Source +// - each must represent a single source type +// - normal use is a server, describing partitions of servers +// - may have arbitrary config info in Settings. +type ConfigNode struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Context map[string]string `protobuf:"bytes,4,rep,name=context" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ConfigNode) Reset() { *m = ConfigNode{} } +func (m *ConfigNode) String() string { return proto.CompactTextString(m) } +func (*ConfigNode) ProtoMessage() {} +func (*ConfigNode) Descriptor() ([]byte, []int) { return fileDescriptorSchema, []int{9} } + +func (m *ConfigNode) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ConfigNode) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *ConfigNode) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *ConfigNode) GetContext() map[string]string { + if m != nil { + return m.Context + } + return nil +} + func init() { + proto.RegisterType((*Command)(nil), "schema.Command") + proto.RegisterType((*SchemaPb)(nil), "schema.SchemaPb") proto.RegisterType((*TablePartition)(nil), "schema.TablePartition") proto.RegisterType((*Partition)(nil), "schema.Partition") proto.RegisterType((*TablePb)(nil), "schema.TablePb") proto.RegisterType((*FieldPb)(nil), "schema.FieldPb") proto.RegisterType((*Index)(nil), "schema.Index") + proto.RegisterType((*ConfigSchema)(nil), "schema.ConfigSchema") + proto.RegisterType((*ConfigSource)(nil), "schema.ConfigSource") + proto.RegisterType((*ConfigNode)(nil), "schema.ConfigNode") + proto.RegisterEnum("schema.Command_Operation", Command_Operation_name, Command_Operation_value) +} +func (m *Command) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Command) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Op != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Op)) + } + if len(m.Origin) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Origin))) + i += copy(dAtA[i:], m.Origin) + } + if len(m.Schema) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Schema))) + i += copy(dAtA[i:], m.Schema) + } + if len(m.Type) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if m.Index != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Index)) + } + if m.Ts != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Ts)) + } + if len(m.Msg) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Msg))) + i += copy(dAtA[i:], m.Msg) + } + return i, nil +} + +func (m *SchemaPb) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemaPb) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Tables) > 0 { + for k, _ := range m.Tables { + dAtA[i] = 0x12 + i++ + v := m.Tables[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovSchema(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovSchema(uint64(len(k))) + msgSize + i = encodeVarintSchema(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(v.Size())) + n1, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + } + } + if m.Conf != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Conf.Size())) + n2, err := m.Conf.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *TablePartition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func init() { proto.RegisterFile("schema.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 546 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5b, 0x8a, 0xdb, 0x30, - 0x14, 0xc5, 0x71, 0x12, 0xc7, 0x37, 0x8f, 0x99, 0x11, 0x25, 0xe8, 0xa3, 0x14, 0x63, 0x0a, 0x35, - 0x14, 0x06, 0x3a, 0xed, 0x0e, 0x86, 0x16, 0xfa, 0xa0, 0x1d, 0xd4, 0xa1, 0xff, 0x72, 0xac, 0xc4, - 0x62, 0x14, 0xdb, 0x58, 0x6a, 0x49, 0xba, 0x99, 0x6e, 0xa1, 0x7b, 0xe8, 0xc6, 0x8a, 0xae, 0x25, - 0xc7, 0xa1, 0xf3, 0xd3, 0xaf, 0xdc, 0x73, 0x74, 0x75, 0xae, 0x74, 0x8e, 0x1c, 0x58, 0xe8, 0x4d, - 0x29, 0xf6, 0xfc, 0xba, 0x69, 0x6b, 0x53, 0x93, 0x69, 0x87, 0xd2, 0x3d, 0xac, 0xee, 0x79, 0xae, - 0xc4, 0x1d, 0x6f, 0x8d, 0x34, 0xb2, 0xae, 0xc8, 0x13, 0x98, 0x18, 0xcb, 0xd0, 0x20, 0x09, 0xb2, - 0x98, 0x75, 0x80, 0x10, 0x18, 0x3f, 0x88, 0xa3, 0xa6, 0xa3, 0x24, 0xcc, 0x62, 0x86, 0x35, 0x79, - 0x05, 0xd0, 0xf8, 0x6d, 0x9a, 0x86, 0x49, 0x98, 0xcd, 0x6f, 0xae, 0xae, 0xdd, 0x98, 0x5e, 0x90, - 0x0d, 0x9a, 0xd2, 0xb7, 0x10, 0x9f, 0x26, 0xad, 0x60, 0x24, 0x0b, 0x37, 0x66, 0x24, 0x0b, 0x3b, - 0x43, 0x89, 0xad, 0xa1, 0x23, 0x64, 0xb0, 0xb6, 0xa7, 0x69, 0xe5, 0xae, 0x34, 0x34, 0xec, 0x4e, - 0x83, 0x20, 0xfd, 0x33, 0x82, 0xa8, 0x3b, 0x76, 0x6e, 0x77, 0x55, 0x7c, 0xef, 0x8f, 0x8b, 0x35, - 0x49, 0x61, 0x61, 0x7f, 0xbf, 0xb4, 0x72, 0x27, 0x2b, 0xae, 0x9c, 0xe2, 0x19, 0x47, 0xd6, 0x30, - 0x6d, 0x78, 0x2b, 0x2a, 0x2f, 0xed, 0x10, 0xa1, 0x10, 0xdd, 0x96, 0xbc, 0xd5, 0xc2, 0xd0, 0x71, - 0x12, 0x64, 0x4b, 0xe6, 0x21, 0x79, 0x03, 0x71, 0x7f, 0x15, 0x3a, 0x49, 0x82, 0x6c, 0x7e, 0xb3, - 0xf6, 0xd7, 0x3d, 0x37, 0x91, 0x9d, 0x1a, 0x49, 0x02, 0xf3, 0x9e, 0xbf, 0x35, 0x74, 0x8a, 0x9a, - 0x43, 0x8a, 0xbc, 0x80, 0x48, 0x56, 0x85, 0x38, 0x08, 0x4d, 0x23, 0x34, 0x71, 0xe9, 0x55, 0xdf, - 0x5b, 0x9a, 0xf9, 0x55, 0x2b, 0xb5, 0xa9, 0x2b, 0x23, 0x0e, 0xe6, 0x83, 0xae, 0x2b, 0x3a, 0x4b, - 0x82, 0x6c, 0xc1, 0x86, 0x14, 0x79, 0x09, 0xb3, 0xad, 0x14, 0xaa, 0x68, 0x72, 0x4d, 0x63, 0xd4, - 0xba, 0xf0, 0x5a, 0xef, 0x2c, 0x7f, 0x97, 0xb3, 0xbe, 0x21, 0xfd, 0x1d, 0x42, 0xe4, 0xd8, 0x47, - 0x5d, 0x4c, 0x60, 0x5e, 0x08, 0xbd, 0x69, 0x65, 0x83, 0x37, 0xee, 0x4c, 0x1c, 0x52, 0xe4, 0x12, - 0xc2, 0x07, 0x71, 0x74, 0x06, 0xda, 0xd2, 0xe6, 0x25, 0x0e, 0xa6, 0xe5, 0xe8, 0x5d, 0xcc, 0x3a, - 0x60, 0xd5, 0x0b, 0x6e, 0x38, 0x9a, 0x16, 0x33, 0xac, 0xad, 0xff, 0x4a, 0x54, 0x3b, 0x53, 0x3a, - 0x4b, 0x1c, 0xb2, 0xbd, 0xe6, 0xd8, 0x08, 0x1a, 0x21, 0x8b, 0x35, 0x79, 0x06, 0x50, 0x71, 0x23, - 0x7f, 0x88, 0x7b, 0xbb, 0x32, 0xc3, 0x95, 0x01, 0x43, 0x9e, 0x42, 0x5c, 0x88, 0xed, 0xa7, 0x4e, - 0x2e, 0x4e, 0x82, 0x6c, 0xcc, 0x4e, 0x84, 0x9d, 0x54, 0x88, 0xed, 0x37, 0xae, 0xe8, 0x1c, 0x1d, - 0x73, 0xc8, 0x26, 0xdd, 0x39, 0x5b, 0xd0, 0x65, 0x12, 0x64, 0x33, 0x6f, 0x74, 0x61, 0x57, 0xaa, - 0xfa, 0xf3, 0x77, 0xa5, 0x34, 0x5d, 0x75, 0x2b, 0x0e, 0xda, 0x49, 0x9b, 0x5a, 0x29, 0x8e, 0x8e, - 0x5c, 0xe0, 0x75, 0x4e, 0x04, 0xbe, 0xd6, 0x5a, 0x09, 0x4d, 0x2f, 0xf1, 0x33, 0xe9, 0xc0, 0x30, - 0xdf, 0xab, 0xff, 0xc9, 0x97, 0xfc, 0x93, 0x6f, 0xfa, 0x2b, 0x80, 0x09, 0x6e, 0x7a, 0x34, 0xb0, - 0x35, 0x4c, 0x31, 0x5c, 0xff, 0x99, 0x3a, 0x64, 0xed, 0x6b, 0x5a, 0xb9, 0xe7, 0xed, 0xf1, 0xa3, - 0x4b, 0x6b, 0xc6, 0x06, 0x0c, 0x79, 0x0e, 0xcb, 0x92, 0xeb, 0xb2, 0x7f, 0x93, 0x74, 0x8c, 0xdb, - 0xcf, 0x49, 0xdb, 0xd5, 0xbf, 0xea, 0xaf, 0xf2, 0xa7, 0xc0, 0x34, 0x27, 0xec, 0x9c, 0xcc, 0xa7, - 0xf8, 0xff, 0xf2, 0xfa, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x29, 0x27, 0x0d, 0x6f, 0x04, - 0x00, 0x00, +func (m *TablePartition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Table) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Table))) + i += copy(dAtA[i:], m.Table) + } + if len(m.Keys) > 0 { + for _, s := range m.Keys { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Partitions) > 0 { + for _, msg := range m.Partitions { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Partition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Partition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Id) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Id))) + i += copy(dAtA[i:], m.Id) + } + if len(m.Left) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Left))) + i += copy(dAtA[i:], m.Left) + } + if len(m.Right) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Right))) + i += copy(dAtA[i:], m.Right) + } + return i, nil +} + +func (m *TablePb) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TablePb) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NameOriginal) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.NameOriginal))) + i += copy(dAtA[i:], m.NameOriginal) + } + if len(m.Parent) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Parent))) + i += copy(dAtA[i:], m.Parent) + } + if m.Charset != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Charset)) + } + if m.Partition != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Partition.Size())) + n3, err := m.Partition.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.PartitionCt != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.PartitionCt)) + } + if len(m.Indexes) > 0 { + for _, msg := range m.Indexes { + dAtA[i] = 0x3a + i++ + i = encodeVarintSchema(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Context) > 0 { + for k, _ := range m.Context { + dAtA[i] = 0x42 + i++ + v := m.Context[k] + mapSize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + i = encodeVarintSchema(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Fieldpbs) > 0 { + for _, msg := range m.Fieldpbs { + dAtA[i] = 0x4a + i++ + i = encodeVarintSchema(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *FieldPb) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldPb) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Extra) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Extra))) + i += copy(dAtA[i:], m.Extra) + } + if len(m.Data) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Length != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Length)) + } + if m.Type != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Type)) + } + if m.NativeType != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.NativeType)) + } + if m.DefLength != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.DefLength)) + } + if len(m.DefVal) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.DefVal))) + i += copy(dAtA[i:], m.DefVal) + } + if m.Indexed { + dAtA[i] = 0x68 + i++ + if m.Indexed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NoNulls { + dAtA[i] = 0x70 + i++ + if m.NoNulls { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Collation) > 0 { + dAtA[i] = 0x7a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Collation))) + i += copy(dAtA[i:], m.Collation) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Indexes) > 0 { + for _, msg := range m.Indexes { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSchema(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Context) > 0 { + for k, _ := range m.Context { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + v := m.Context[k] + mapSize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + i = encodeVarintSchema(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Position != 0 { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.Position)) + } + return i, nil +} + +func (m *Index) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Index) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Fields) > 0 { + for _, s := range m.Fields { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.PrimaryKey { + dAtA[i] = 0x18 + i++ + if m.PrimaryKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.HashPartition) > 0 { + for _, s := range m.HashPartition { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.PartitionSize != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.PartitionSize)) + } + return i, nil +} + +func (m *ConfigSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSchema) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Sources) > 0 { + for _, s := range m.Sources { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Schema) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Schema))) + i += copy(dAtA[i:], m.Schema) + } + if len(m.Type) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.TablesToLoad) > 0 { + for _, s := range m.TablesToLoad { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.TableAliases) > 0 { + for k, _ := range m.TableAliases { + dAtA[i] = 0x2a + i++ + v := m.TableAliases[k] + mapSize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + i = encodeVarintSchema(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Settings) > 0 { + for k, _ := range m.Settings { + dAtA[i] = 0x42 + i++ + v := m.Settings[k] + mapSize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + i = encodeVarintSchema(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Partitions) > 0 { + for _, msg := range m.Partitions { + dAtA[i] = 0x4a + i++ + i = encodeVarintSchema(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.PartitionCt != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintSchema(dAtA, i, uint64(m.PartitionCt)) + } + return i, nil +} + +func (m *ConfigNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigNode) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Address) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + } + if len(m.Context) > 0 { + for k, _ := range m.Context { + dAtA[i] = 0x22 + i++ + v := m.Context[k] + mapSize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + i = encodeVarintSchema(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSchema(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeVarintSchema(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Command) Size() (n int) { + var l int + _ = l + if m.Op != 0 { + n += 1 + sovSchema(uint64(m.Op)) + } + l = len(m.Origin) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovSchema(uint64(m.Index)) + } + if m.Ts != 0 { + n += 1 + sovSchema(uint64(m.Ts)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + return n +} + +func (m *SchemaPb) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.Tables) > 0 { + for k, v := range m.Tables { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovSchema(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovSchema(uint64(len(k))) + l + n += mapEntrySize + 1 + sovSchema(uint64(mapEntrySize)) + } + } + if m.Conf != nil { + l = m.Conf.Size() + n += 1 + l + sovSchema(uint64(l)) + } + return n +} + +func (m *TablePartition) Size() (n int) { + var l int + _ = l + l = len(m.Table) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + sovSchema(uint64(l)) + } + } + if len(m.Partitions) > 0 { + for _, e := range m.Partitions { + l = e.Size() + n += 1 + l + sovSchema(uint64(l)) + } + } + return n +} + +func (m *Partition) Size() (n int) { + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Left) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Right) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + return n +} + +func (m *TablePb) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.NameOriginal) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Parent) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if m.Charset != 0 { + n += 1 + sovSchema(uint64(m.Charset)) + } + if m.Partition != nil { + l = m.Partition.Size() + n += 1 + l + sovSchema(uint64(l)) + } + if m.PartitionCt != 0 { + n += 1 + sovSchema(uint64(m.PartitionCt)) + } + if len(m.Indexes) > 0 { + for _, e := range m.Indexes { + l = e.Size() + n += 1 + l + sovSchema(uint64(l)) + } + } + if len(m.Context) > 0 { + for k, v := range m.Context { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + n += mapEntrySize + 1 + sovSchema(uint64(mapEntrySize)) + } + } + if len(m.Fieldpbs) > 0 { + for _, e := range m.Fieldpbs { + l = e.Size() + n += 1 + l + sovSchema(uint64(l)) + } + } + return n +} + +func (m *FieldPb) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Extra) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if m.Length != 0 { + n += 1 + sovSchema(uint64(m.Length)) + } + if m.Type != 0 { + n += 1 + sovSchema(uint64(m.Type)) + } + if m.NativeType != 0 { + n += 1 + sovSchema(uint64(m.NativeType)) + } + if m.DefLength != 0 { + n += 1 + sovSchema(uint64(m.DefLength)) + } + l = len(m.DefVal) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if m.Indexed { + n += 2 + } + if m.NoNulls { + n += 2 + } + l = len(m.Collation) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 2 + l + sovSchema(uint64(l)) + } + } + if len(m.Indexes) > 0 { + for _, e := range m.Indexes { + l = e.Size() + n += 2 + l + sovSchema(uint64(l)) + } + } + if len(m.Context) > 0 { + for k, v := range m.Context { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + n += mapEntrySize + 2 + sovSchema(uint64(mapEntrySize)) + } + } + if m.Position != 0 { + n += 2 + sovSchema(uint64(m.Position)) + } + return n +} + +func (m *Index) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.Fields) > 0 { + for _, s := range m.Fields { + l = len(s) + n += 1 + l + sovSchema(uint64(l)) + } + } + if m.PrimaryKey { + n += 2 + } + if len(m.HashPartition) > 0 { + for _, s := range m.HashPartition { + l = len(s) + n += 1 + l + sovSchema(uint64(l)) + } + } + if m.PartitionSize != 0 { + n += 1 + sovSchema(uint64(m.PartitionSize)) + } + return n +} + +func (m *ConfigSchema) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.Sources) > 0 { + for _, s := range m.Sources { + l = len(s) + n += 1 + l + sovSchema(uint64(l)) + } + } + return n +} + +func (m *ConfigSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.TablesToLoad) > 0 { + for _, s := range m.TablesToLoad { + l = len(s) + n += 1 + l + sovSchema(uint64(l)) + } + } + if len(m.TableAliases) > 0 { + for k, v := range m.TableAliases { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + n += mapEntrySize + 1 + sovSchema(uint64(mapEntrySize)) + } + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovSchema(uint64(l)) + } + } + if len(m.Settings) > 0 { + for k, v := range m.Settings { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + n += mapEntrySize + 1 + sovSchema(uint64(mapEntrySize)) + } + } + if len(m.Partitions) > 0 { + for _, e := range m.Partitions { + l = e.Size() + n += 1 + l + sovSchema(uint64(l)) + } + } + if m.PartitionCt != 0 { + n += 1 + sovSchema(uint64(m.PartitionCt)) + } + return n +} + +func (m *ConfigNode) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } + if len(m.Context) > 0 { + for k, v := range m.Context { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSchema(uint64(len(k))) + 1 + len(v) + sovSchema(uint64(len(v))) + n += mapEntrySize + 1 + sovSchema(uint64(mapEntrySize)) + } + } + return n +} + +func sovSchema(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSchema(x uint64) (n int) { + return sovSchema(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Command) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Command: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Command: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + m.Op = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Op |= (Command_Operation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Origin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ts", wireType) + } + m.Ts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ts |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) + if m.Msg == nil { + m.Msg = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaPb) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaPb: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaPb: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tables == nil { + m.Tables = make(map[string]*TablePb) + } + var mapkey string + var mapvalue *TablePb + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthSchema + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthSchema + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &TablePb{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tables[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conf == nil { + m.Conf = &ConfigSource{} + } + if err := m.Conf.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TablePartition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TablePartition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TablePartition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Partitions = append(m.Partitions, &Partition{}) + if err := m.Partitions[len(m.Partitions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Partition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Partition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Partition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Left = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Right = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TablePb) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TablePb: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TablePb: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NameOriginal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NameOriginal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Charset", wireType) + } + m.Charset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Charset |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Partition == nil { + m.Partition = &TablePartition{} + } + if err := m.Partition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartitionCt", wireType) + } + m.PartitionCt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartitionCt |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Indexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indexes = append(m.Indexes, &Index{}) + if err := m.Indexes[len(m.Indexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Context[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fieldpbs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fieldpbs = append(m.Fieldpbs, &FieldPb{}) + if err := m.Fieldpbs[len(m.Fieldpbs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FieldPb) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldPb: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldPb: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extra = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NativeType", wireType) + } + m.NativeType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NativeType |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefLength", wireType) + } + m.DefLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DefLength |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefVal", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefVal = append(m.DefVal[:0], dAtA[iNdEx:postIndex]...) + if m.DefVal == nil { + m.DefVal = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Indexed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Indexed = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoNulls", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoNulls = bool(v != 0) + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Indexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indexes = append(m.Indexes, &Index{}) + if err := m.Indexes[len(m.Indexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Context[mapkey] = mapvalue + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + m.Position = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Position |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Index) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Index: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Index: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrimaryKey = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HashPartition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HashPartition = append(m.HashPartition, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartitionSize", wireType) + } + m.PartitionSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartitionSize |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TablesToLoad", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TablesToLoad = append(m.TablesToLoad, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableAliases == nil { + m.TableAliases = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TableAliases[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Settings == nil { + m.Settings = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Settings[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Partitions = append(m.Partitions, &TablePartition{}) + if err := m.Partitions[len(m.Partitions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartitionCt", wireType) + } + m.PartitionCt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartitionCt |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSchema + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Context[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSchema(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSchema + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSchema(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSchema = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSchema = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("schema.proto", fileDescriptorSchema) } + +var fileDescriptorSchema = []byte{ + // 1088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xee, 0xfa, 0x6f, 0xbd, 0xc7, 0x3f, 0x71, 0xa6, 0x51, 0xb4, 0x58, 0x55, 0x62, 0x59, 0xfc, + 0x18, 0x21, 0x52, 0x35, 0x94, 0x08, 0x0a, 0x02, 0x35, 0xa5, 0x48, 0x40, 0xd5, 0x46, 0x93, 0x94, + 0x3b, 0x14, 0x8d, 0xbd, 0x63, 0x7b, 0x95, 0xf5, 0xce, 0x6a, 0x67, 0x5c, 0x12, 0xde, 0x03, 0x89, + 0x2b, 0xb8, 0xe5, 0x51, 0x80, 0x2b, 0x9e, 0x20, 0x42, 0xe9, 0x5d, 0x9e, 0x02, 0xcd, 0x99, 0xd9, + 0xf5, 0xae, 0xea, 0x48, 0xad, 0x7a, 0xe5, 0x39, 0xdf, 0xf9, 0x9b, 0x73, 0xce, 0x37, 0x67, 0x0d, + 0x6d, 0x39, 0x99, 0xf3, 0x05, 0xdb, 0x4b, 0x52, 0xa1, 0x04, 0x69, 0x18, 0xa9, 0xff, 0xf1, 0x2c, + 0x54, 0xf3, 0xe5, 0x78, 0x6f, 0x22, 0x16, 0x77, 0x67, 0x62, 0x26, 0xee, 0xa2, 0x7a, 0xbc, 0x9c, + 0xa2, 0x84, 0x02, 0x9e, 0x8c, 0xdb, 0xf0, 0xa5, 0x03, 0xee, 0x23, 0xb1, 0x58, 0xb0, 0x38, 0x20, + 0x1f, 0x42, 0x45, 0x24, 0xbe, 0x33, 0x70, 0x46, 0xdd, 0xfd, 0x77, 0xf6, 0x6c, 0x74, 0xab, 0xdc, + 0x7b, 0x96, 0xf0, 0x94, 0xa9, 0x50, 0xc4, 0xb4, 0x22, 0x12, 0xb2, 0x0d, 0x0d, 0x91, 0x86, 0xb3, + 0x30, 0xf6, 0x2b, 0x03, 0x67, 0xe4, 0x51, 0x2b, 0x69, 0xdc, 0xf8, 0xf9, 0x55, 0x83, 0x1b, 0x89, + 0x10, 0xa8, 0xa9, 0x8b, 0x84, 0xfb, 0x35, 0x44, 0xf1, 0x4c, 0xb6, 0xa0, 0x1e, 0xc6, 0x01, 0x3f, + 0xf7, 0xeb, 0x03, 0x67, 0x54, 0xa3, 0x46, 0x20, 0x5d, 0xa8, 0x28, 0xe9, 0x37, 0x06, 0xce, 0xa8, + 0x4a, 0x2b, 0x4a, 0x92, 0x1e, 0x54, 0x17, 0x72, 0xe6, 0xbb, 0x03, 0x67, 0xd4, 0xa6, 0xfa, 0x38, + 0xbc, 0x07, 0x5e, 0x7e, 0x19, 0xd2, 0x02, 0xf7, 0x79, 0x7c, 0x16, 0x8b, 0x9f, 0xe3, 0xde, 0x2d, + 0xd2, 0x01, 0xef, 0x61, 0x10, 0x3c, 0x4f, 0x02, 0xa6, 0x78, 0xcf, 0x21, 0x4d, 0xa8, 0x7d, 0x93, + 0x8a, 0xa4, 0x57, 0x19, 0xfe, 0xe3, 0x40, 0xf3, 0x18, 0x6f, 0x72, 0x34, 0xd6, 0x77, 0x89, 0xd9, + 0x82, 0x63, 0xa1, 0x1e, 0xc5, 0x33, 0xb9, 0x0f, 0x0d, 0xc5, 0xc6, 0x11, 0x97, 0x7e, 0x65, 0x50, + 0x1d, 0xb5, 0xf6, 0xef, 0x64, 0xe5, 0x67, 0x5e, 0x7b, 0x27, 0xa8, 0x7e, 0x1c, 0xab, 0xf4, 0x82, + 0x5a, 0x5b, 0x32, 0x82, 0xda, 0x44, 0xc4, 0x53, 0xac, 0xb5, 0xb5, 0xbf, 0xb5, 0x6a, 0x59, 0x3c, + 0x0d, 0x67, 0xc7, 0x62, 0x99, 0x4e, 0x38, 0x45, 0x8b, 0xfe, 0xf7, 0xd0, 0x2a, 0x04, 0xd0, 0x45, + 0x9d, 0xf1, 0x0b, 0x7b, 0x03, 0x7d, 0x24, 0xef, 0x41, 0xfd, 0x05, 0x8b, 0x96, 0x1c, 0xfb, 0xd9, + 0xda, 0xdf, 0xc8, 0x62, 0xa1, 0xd7, 0xd1, 0x98, 0x1a, 0xed, 0x83, 0xca, 0x67, 0xce, 0x70, 0x01, + 0x5d, 0x83, 0xb2, 0x54, 0x85, 0xd8, 0x84, 0x2d, 0xa8, 0xe3, 0x8d, 0x6c, 0x40, 0x23, 0xe8, 0x3a, + 0xcf, 0xf8, 0x85, 0xa9, 0xc8, 0xa3, 0x78, 0x26, 0xf7, 0x00, 0x92, 0xcc, 0x4d, 0xfa, 0x55, 0xac, + 0x75, 0x33, 0xcb, 0x95, 0x07, 0xa4, 0x05, 0xa3, 0xe1, 0x63, 0xf0, 0x56, 0x99, 0xba, 0x50, 0x09, + 0x03, 0x9b, 0xa6, 0x12, 0x06, 0x3a, 0x47, 0xc4, 0xa7, 0xca, 0xb2, 0x00, 0xcf, 0xfa, 0x36, 0x69, + 0x38, 0x9b, 0x2b, 0x4b, 0x01, 0x23, 0x0c, 0x7f, 0xad, 0x82, 0x6b, 0x8b, 0x59, 0x3b, 0x81, 0x21, + 0xb4, 0xf5, 0xef, 0x33, 0xe4, 0x11, 0x8b, 0x6c, 0xc4, 0x12, 0xa6, 0xd9, 0x95, 0xb0, 0x94, 0xc7, + 0x59, 0x68, 0x2b, 0x11, 0x1f, 0xdc, 0xc9, 0x9c, 0xa5, 0x92, 0x2b, 0x24, 0x58, 0x87, 0x66, 0x22, + 0xb9, 0x0f, 0x5e, 0x5e, 0x0a, 0xf2, 0xac, 0xb5, 0xbf, 0x5d, 0x6e, 0x6d, 0x5e, 0xf3, 0xca, 0x90, + 0x0c, 0xa0, 0x95, 0xe3, 0x8f, 0x14, 0x92, 0xb1, 0x43, 0x8b, 0x10, 0xf9, 0x00, 0x5c, 0xa4, 0x2b, + 0x97, 0xbe, 0x8b, 0x4d, 0xec, 0x64, 0x51, 0xbf, 0xd3, 0x30, 0xcd, 0xb4, 0xe4, 0x00, 0xdc, 0x89, + 0x88, 0x15, 0x3f, 0x57, 0x7e, 0xb3, 0xcc, 0x2c, 0xdb, 0x0c, 0xcd, 0x16, 0xad, 0x36, 0xcc, 0xca, + 0x8c, 0xc9, 0x47, 0xd0, 0x9c, 0x86, 0x3c, 0x0a, 0x92, 0xb1, 0xf4, 0x3d, 0x74, 0xcc, 0x29, 0xf1, + 0xad, 0xc6, 0x8f, 0xc6, 0x34, 0x37, 0xe8, 0x3f, 0x80, 0x76, 0x31, 0xca, 0x1a, 0x7a, 0x6d, 0x15, + 0xe9, 0xe5, 0x15, 0xd9, 0xf4, 0x7b, 0x0d, 0x5c, 0x1b, 0x71, 0xed, 0x5c, 0x06, 0xd0, 0x0a, 0xb8, + 0x9c, 0xa4, 0x61, 0x82, 0x3d, 0x34, 0xfe, 0x45, 0x28, 0xcb, 0x56, 0x2d, 0x65, 0xe3, 0xe7, 0x2a, + 0x65, 0xf6, 0xb9, 0x1b, 0x41, 0x47, 0x0f, 0x98, 0x62, 0x38, 0x06, 0x8f, 0xe2, 0x59, 0x4f, 0x34, + 0xe2, 0xf1, 0x4c, 0xcd, 0x6d, 0x93, 0xad, 0x94, 0xef, 0x0b, 0x17, 0x51, 0xb3, 0x2f, 0x76, 0x00, + 0x62, 0xa6, 0xc2, 0x17, 0xfc, 0x44, 0x6b, 0x9a, 0xa8, 0x29, 0x20, 0xe4, 0x0e, 0x78, 0x01, 0x9f, + 0x3e, 0x31, 0xe1, 0x3c, 0xdc, 0x29, 0x2b, 0x40, 0x67, 0x0a, 0xf8, 0xf4, 0x47, 0x16, 0xf9, 0x2d, + 0x5c, 0x25, 0x56, 0xd2, 0xdc, 0x31, 0xb3, 0x0a, 0xfc, 0xce, 0xc0, 0x19, 0x35, 0xb3, 0xd1, 0x05, + 0x5a, 0x13, 0x8b, 0xa7, 0xcb, 0x28, 0x92, 0x7e, 0xd7, 0x68, 0xac, 0xa8, 0x33, 0x4d, 0x44, 0x14, + 0xe1, 0x06, 0xf2, 0x37, 0xb0, 0x9c, 0x15, 0x80, 0xfc, 0x17, 0x7a, 0x95, 0xf4, 0xf0, 0xe1, 0x19, + 0xa1, 0xc8, 0x98, 0xcd, 0xd7, 0x65, 0x0c, 0x29, 0x33, 0xc6, 0x8e, 0xe9, 0x06, 0xc6, 0xf4, 0xa1, + 0x99, 0x08, 0x69, 0x98, 0x7e, 0x1b, 0xab, 0xcf, 0xe5, 0xb7, 0x22, 0xc8, 0x1f, 0x0e, 0xd4, 0xf1, + 0x8a, 0x6b, 0xe9, 0xb1, 0x0d, 0x0d, 0xa4, 0x61, 0xb6, 0x66, 0xac, 0xa4, 0x87, 0x95, 0xa4, 0xe1, + 0x82, 0xa5, 0x17, 0x3f, 0x58, 0x6e, 0x34, 0x69, 0x01, 0x21, 0xef, 0x42, 0x67, 0xce, 0xe4, 0x3c, + 0x7f, 0x53, 0x7e, 0x0d, 0xdd, 0xcb, 0xa0, 0xb6, 0xca, 0x5f, 0xe5, 0x71, 0xf8, 0x0b, 0x47, 0xee, + 0xd4, 0x69, 0x19, 0x1c, 0x7e, 0x89, 0xd5, 0xe9, 0x95, 0x9b, 0x7f, 0x6c, 0x5e, 0xb9, 0xa7, 0x0f, + 0xae, 0xc4, 0x85, 0x9c, 0x5d, 0x34, 0x13, 0x87, 0x7f, 0xd6, 0x72, 0x77, 0x44, 0x6e, 0x2a, 0xd3, + 0x7e, 0xd7, 0x2a, 0x6b, 0xbf, 0x6b, 0xd5, 0xc2, 0x77, 0xed, 0x00, 0xda, 0x66, 0xd7, 0x9f, 0x88, + 0x27, 0x82, 0x05, 0xa6, 0xb2, 0x43, 0x72, 0x7d, 0xb9, 0xdb, 0x35, 0xdf, 0x8d, 0x53, 0x25, 0x4e, + 0x23, 0xc1, 0x02, 0x5a, 0xb2, 0x23, 0x3f, 0x41, 0x1b, 0xf5, 0x0f, 0xa3, 0x90, 0x49, 0x2e, 0xfd, + 0x3a, 0x4e, 0xff, 0xfd, 0x75, 0x5f, 0x15, 0xb3, 0x3c, 0xac, 0x21, 0x8e, 0xf4, 0x70, 0xf3, 0xfa, + 0x72, 0xb7, 0x83, 0xfe, 0xa7, 0xcc, 0xe0, 0xb4, 0x14, 0x4e, 0x4f, 0x78, 0x2e, 0xa4, 0x32, 0x0b, + 0xcb, 0xa3, 0x46, 0x20, 0x5f, 0x41, 0x53, 0x72, 0xa5, 0xc2, 0x78, 0x26, 0xed, 0x82, 0x1a, 0xae, + 0x4d, 0x78, 0x6c, 0x8d, 0x0c, 0xe9, 0x72, 0x1f, 0x72, 0x50, 0xfa, 0xa0, 0x98, 0x4d, 0x75, 0xd3, + 0x86, 0x2d, 0x58, 0x92, 0x4f, 0xa1, 0x95, 0x14, 0x56, 0x2c, 0xe8, 0xd7, 0x7c, 0x78, 0xfb, 0xfa, + 0x72, 0x77, 0x23, 0x87, 0x4f, 0x27, 0x62, 0x19, 0x2b, 0x5a, 0xb4, 0xeb, 0x7f, 0x0d, 0x9b, 0xaf, + 0x94, 0xfe, 0x26, 0x6c, 0xee, 0x7f, 0x01, 0x9d, 0x52, 0x29, 0x6f, 0xf4, 0x14, 0xfe, 0x76, 0x00, + 0x4c, 0x57, 0x9e, 0x8a, 0xe0, 0x66, 0xa2, 0x60, 0xc7, 0x72, 0xa2, 0x18, 0x52, 0xf9, 0xe0, 0xb2, + 0x20, 0x48, 0xb9, 0x94, 0x96, 0x2b, 0x99, 0x48, 0x3e, 0x5f, 0xbd, 0xf7, 0x1a, 0xb6, 0x6f, 0xb7, + 0x3c, 0x00, 0x9d, 0x6a, 0xfd, 0x93, 0x7f, 0x9b, 0x67, 0x7d, 0xd8, 0xfb, 0xeb, 0x6a, 0xc7, 0xf9, + 0xf7, 0x6a, 0xc7, 0xf9, 0xef, 0x6a, 0xc7, 0xf9, 0xed, 0xe5, 0xce, 0xad, 0x71, 0x03, 0xff, 0x11, + 0x7e, 0xf2, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x55, 0xf8, 0x81, 0x58, 0x0a, 0x00, 0x00, } diff --git a/schema/schema.proto b/schema/schema.proto index 57918dd7..0a1feb2e 100644 --- a/schema/schema.proto +++ b/schema/schema.proto @@ -2,13 +2,42 @@ syntax = "proto3"; package schema; -// protoc --go_out=. *.proto +// protoc --proto_path=$GOPATH/src:$GOPATH/src/github.com/gogo/protobuf/protobuf:. --gofast_out=. *.proto +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +// Notation if we do import other types //import "github.com/araddon/qlbridge/rel/sql.proto"; //import "github.com/araddon/qlbridge/expr/node.proto"; -//import "google/protobuf/any.proto"; +// Command defines a Schema Replication command/message such as +// Drop, Alter, Create-Schema, etc. Used to replicate schema changes +// across servers. +message Command { + enum Operation { + Unknown = 0; + AddUpdate = 1; + Drop = 2; + } + Operation op = 1; + string origin = 2; + string schema = 3; + string type = 4; // [schema, table] + uint64 index = 5; + int64 ts = 6; + bytes msg = 7; +} + +// SchemaPb defines the fields that define schema attributes, and +// can be serialized. +message SchemaPb { + // Name of schema lowercased + string name = 1; + // Tables is map of tables + map tables = 2; + // source configuration + ConfigSource conf = 3; +} // Partition describes a range of data (in a Table). // left-key is contained in this partition @@ -20,8 +49,6 @@ message TablePartition { repeated Partition partitions = 3; } - - // Partition describes a range of data // the left-key is contained in this partition // the right key is not contained in this partition, in the next one @@ -31,6 +58,8 @@ message Partition { string right = 3; } +// TablePb defines the fields that define table attributes, and +// can be serialized. message TablePb { // Name of table lowercased string name = 1; @@ -39,36 +68,40 @@ message TablePb { // some dbs are more hiearchical (table-column-family) string parent = 3; // Character set, default = utf8 - uint32 Charset = 4; + uint32 charset = 4; // Partitions in this table, optional may be empty TablePartition partition = 5; // Partition Count uint32 PartitionCt = 6; // List of indexes for this table repeated Index indexes = 7; - // context json bytes - bytes contextJson = 8; + // context is additional arbitrary map values + map context = 8; // List of Fields, in order repeated FieldPb fieldpbs = 9; } +// FieldPb defines attributes of a field/column that can +// be serialized and transported. message FieldPb { - string name = 1; - string description = 2; - string key = 3; - string extra = 4; - string data = 5; - uint32 length = 6; - uint32 type = 7; - uint32 nativeType = 8; - uint64 defLength = 9; - bytes defVal = 11; - bool indexed = 13; - bool noNulls = 14; - string collation = 15; + string name = 1; + string description = 2; + string key = 3; + string extra = 4; + string data = 5; + uint32 length = 6; + uint32 type = 7; + uint32 nativeType = 8; + uint64 defLength = 9; + bytes defVal = 11; + bool indexed = 13; + bool noNulls = 14; + string collation = 15; repeated string roles = 16; - repeated Index indexes = 17; - bytes contextJson = 18; + repeated Index indexes = 17; + // context is additional arbitrary map values + map context = 18; + uint64 position = 19; // Positional index in list of fields for table } // Index a description of how field(s) should be indexed for a table. @@ -78,4 +111,41 @@ message Index { bool primaryKey = 3; repeated string hashPartition = 4; int32 partitionSize = 5; -} \ No newline at end of file +} + +// ConfigSchema is the config block for Schema, the data-sources +// that make up this Virtual Schema. Must have a name and list +// of sources to include. +message ConfigSchema { + string name = 1; + repeated string sources = 2; +} + +// ConfigSource are backend datasources ie : storage/database/csvfiles +// Each represents a single source type/config. May belong to more +// than one schema. +message ConfigSource { + string name = 1; + string schema = 2; + string type = 3; + // TablesToLoad If not all tables from are going to be loaded. + repeated string TablesToLoad = 4 [(gogoproto.jsontag) = "tables_to_load"]; + // convert underlying table names to friendly ones + map tableAliases = 5 [(gogoproto.jsontag) = "table_aliases"]; + repeated string hosts = 7; + map settings = 8; + repeated TablePartition partitions = 9; + uint32 partitionCt = 10 [(gogoproto.jsontag) = "partition_count"]; +} + +// ConfigNode are Servers/Services, ie a running instance of said Source +// - each must represent a single source type +// - normal use is a server, describing partitions of servers +// - may have arbitrary config info in Settings. +message ConfigNode { + string name = 1; // Name of this Node optional + string source = 2; // Name of source this node belongs to + string address = 3; // host/ip + map context = 4; // Arbitrary settings +} + diff --git a/schema/schema_test.go b/schema/schema_test.go index 856c0c00..cd34cfc8 100644 --- a/schema/schema_test.go +++ b/schema/schema_test.go @@ -5,9 +5,9 @@ import ( "encoding/json" "sort" "testing" - "time" "github.com/araddon/qlbridge/lex" + "github.com/gogo/protobuf/proto" u "github.com/araddon/gou" "github.com/stretchr/testify/assert" @@ -63,6 +63,10 @@ func TestRegisterSchema(t *testing.T) { db.Init() db.Setup(nil) childSchema := schema.NewSchemaSource("user_child", db2) + + err = childSchema.Discovery() + assert.Equal(t, nil, err) + err = reg.SchemaAddChild("user_csv", childSchema) assert.Equal(t, nil, err) @@ -140,7 +144,7 @@ func TestAddSchemaFromConfig(t *testing.T) { err = reg.SchemaDrop("schema_parent", "schema_parent", lex.TokenSchema) assert.Equal(t, nil, err) - sourceConf.SourceType = "never-gonna-happen-x" + sourceConf.Type = "never-gonna-happen-x" err = reg.SchemaAddFromConfig(sourceConf) assert.NotEqual(t, nil, err) } @@ -152,7 +156,7 @@ func TestSchema(t *testing.T) { return sdb }) reg := schema.NewRegistry(a) - a.Init(reg) + a.Init(reg, nil) inrow := []driver.Value{122, "bob", "bob@email.com"} cols := []string{"user_id", "name", "email"} @@ -162,18 +166,19 @@ func TestSchema(t *testing.T) { assert.Equal(t, []string{"users"}, db.Tables()) s := schema.NewSchema("user_csv2") - assert.Equal(t, false, s.Current()) s.DS = db + err = s.Discovery() + assert.Equal(t, nil, err) err = reg.SchemaAdd(s) assert.Equal(t, nil, err) s, ok := reg.Schema("user_csv2") assert.Equal(t, true, ok) - assert.Equal(t, true, s.Current()) - reg.SchemaDrop("user_csv2", "user_csv2", lex.TokenSchema) - - tbl, err := s.Table("use_csv2.users") + tbl, err := s.Table("bad_namespace.users") + assert.NotEqual(t, nil, err) + assert.True(t, nil == tbl) + tbl, err = s.Table("user_csv2.users") assert.Equal(t, nil, err) assert.NotEqual(t, nil, tbl) @@ -182,18 +187,16 @@ func TestSchema(t *testing.T) { _, err = s.SchemaForTable("not_a_table") assert.NotEqual(t, nil, err) + + reg.SchemaDrop("user_csv2", "user_csv2", lex.TokenSchema) + + _, ok = reg.Schema("user_csv2") + assert.Equal(t, false, ok) } func TestTable(t *testing.T) { tbl := schema.NewTable("users") assert.Equal(t, "users", tbl.Name) - assert.Equal(t, uint64(0), tbl.Id()) - assert.Equal(t, false, tbl.Current()) - tbl.SetRefreshed() - assert.Equal(t, true, tbl.Current()) - schema.SchemaRefreshInterval = time.Minute * 5 - tbl.SetRefreshed() - assert.Equal(t, false, tbl.Current()) f := schema.NewFieldBase("first_name", value.StringType, 255, "string") tbl.AddField(f) @@ -228,13 +231,17 @@ func TestTable(t *testing.T) { assert.Equal(t, 2, len(tbl.AsRows())) assert.Equal(t, 2, len(tbl.AsRows())) - assert.NotEqual(t, nil, tbl.Body()) - assert.Equal(t, uint64(0), tbl.Id()) + by, err := tbl.Marshal() + assert.Equal(t, nil, err) + tbl2 := &schema.Table{} + err = tbl2.Unmarshal(by) + assert.Equal(t, nil, err) + assert.True(t, tbl.Equal(tbl2)) } func TestFields(t *testing.T) { - f := schema.NewFieldBase("Field", value.StringType, 64, "string") + f := schema.NewFieldBase("user_id", value.StringType, 64, "string") assert.NotEqual(t, nil, f) - assert.Equal(t, "Field", f.Name) + assert.Equal(t, "user_id", f.Name) r := f.AsRow() assert.Equal(t, 9, len(r)) r = f.AsRow() @@ -243,6 +250,13 @@ func TestFields(t *testing.T) { f.AddContext("hello", "world") assert.Equal(t, 1, len(f.Context)) + by, err := proto.Marshal(f) + assert.Equal(t, nil, err) + f2 := &schema.Field{} + err = proto.Unmarshal(by, f2) + assert.Equal(t, nil, err) + assert.True(t, f.Equal(f2)) + // NewField(name string, valType value.ValueType, size int, allowNulls bool, defaultVal driver.Value, key, collation, description string) f = schema.NewField("Field", value.StringType, 64, false, "world", "Key", "utf-8", "this is a description") r = f.AsRow() diff --git a/updateglock.sh b/updateglock.sh index a34434bf..b1367f5e 100755 --- a/updateglock.sh +++ b/updateglock.sh @@ -1,6 +1,5 @@ #! /bin/sh - cd $GOPATH/src/github.com/araddon/dateparse && git checkout master && git pull cd $GOPATH/src/github.com/araddon/gou && git checkout master && git pull cd $GOPATH/src/github.com/couchbaselabs/goforestdb && git checkout master && git pull @@ -26,7 +25,9 @@ cd $GOPATH/src/github.com/lytics/confl && git checkout master && git pull cd $GOPATH/src/github.com/lytics/datemath && git checkout master && git pull cd $GOPATH/src/github.com/mb0/glob && git checkout master && git pull cd $GOPATH/src/github.com/mssola/user_agent && git checkout master && git pull +cd $GOPATH/src/github.com/petar/GoLLRB && git checkout master && git pull cd $GOPATH/src/github.com/pborman/uuid && git checkout master && git pull +cd $GOPATH/src/github.com/pmezard/go-difflib && git checkout master && git pull cd $GOPATH/src/github.com/rcrowley/go-metrics && git checkout master && git pull cd $GOPATH/src/github.com/stretchr/testify && git checkout master && git pull cd $GOPATH/src/github.com/go.opencensus.io && git checkout master && git pull @@ -44,8 +45,3 @@ cd $GOPATH/src/google.golang.org/genproto && git checkout master && git pull cd $GOPATH/src/google.golang.org/grpc && git checkout master && git pull cd $GOPATH/src/cloud.google.com/go/ && git checkout master && git pull - -#go get -u -v ./... - -#glock save github.com/araddon/qlbridge -