From 9659a6da06852ede9bf6b87c0e39a543e88a5034 Mon Sep 17 00:00:00 2001 From: Christopher Speller Date: Tue, 27 Jun 2017 08:02:08 -0700 Subject: Stage 1 of caching layer. Framework (#6693) --- store/layered_store.go | 186 +++++++ store/layered_store_hints.go | 11 + store/layered_store_supplier.go | 29 + store/sql_audit_store.go | 4 +- store/sql_channel_store.go | 4 +- store/sql_cluster_discovery_store.go | 4 +- store/sql_command_store.go | 4 +- store/sql_compliance_store.go | 4 +- store/sql_emoji_store.go | 4 +- store/sql_file_info_store.go | 4 +- store/sql_job_status_store.go | 4 +- store/sql_license_store.go | 4 +- store/sql_oauth_store.go | 4 +- store/sql_post_store.go | 4 +- store/sql_preference_store.go | 4 +- store/sql_preference_store_test.go | 9 +- store/sql_reaction_store.go | 4 +- store/sql_session_store.go | 4 +- store/sql_status_store.go | 4 +- store/sql_store.go | 997 +++-------------------------------- store/sql_store_test.go | 43 +- store/sql_supplier.go | 825 +++++++++++++++++++++++++++++ store/sql_supplier_reactions.go | 35 ++ store/sql_system_store.go | 4 +- store/sql_team_store.go | 4 +- store/sql_tokens_store.go | 4 +- store/sql_upgrade.go | 42 +- store/sql_upgrade_test.go | 14 +- store/sql_user_store.go | 4 +- store/sql_webhook_store.go | 4 +- 30 files changed, 1235 insertions(+), 1036 deletions(-) create mode 100644 store/layered_store.go create mode 100644 store/layered_store_hints.go create mode 100644 store/layered_store_supplier.go create mode 100644 store/sql_supplier.go create mode 100644 store/sql_supplier_reactions.go (limited to 'store') diff --git a/store/layered_store.go b/store/layered_store.go new file mode 100644 index 000000000..58c9e5ca1 --- /dev/null +++ b/store/layered_store.go @@ -0,0 +1,186 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package store + +import ( + "context" + + "github.com/mattermost/platform/model" +) + +type LayeredStore struct { + TmpContext context.Context + ReactionStore ReactionStore + DatabaseLayer *SqlSupplier +} + +func NewLayeredStore() Store { + return &LayeredStore{ + TmpContext: context.TODO(), + ReactionStore: &LayeredReactionStore{}, + DatabaseLayer: NewSqlSupplier(), + } +} + +type QueryFunction func(LayeredStoreSupplier) LayeredStoreSupplierResult + +func (s *LayeredStore) RunQuery(queryFunction QueryFunction) StoreChannel { + storeChannel := make(StoreChannel) + + go func() { + finalResult := StoreResult{} + // Logic for determining what layers to run + if result := queryFunction(s.DatabaseLayer); result.Err == nil { + finalResult.Data = result.Result + } else { + finalResult.Err = result.Err + } + + storeChannel <- finalResult + }() + + return storeChannel +} + +func (s *LayeredStore) Team() TeamStore { + return s.DatabaseLayer.Team() +} + +func (s *LayeredStore) Channel() ChannelStore { + return s.DatabaseLayer.Channel() +} + +func (s *LayeredStore) Post() PostStore { + return s.DatabaseLayer.Post() +} + +func (s *LayeredStore) User() UserStore { + return s.DatabaseLayer.User() +} + +func (s *LayeredStore) Audit() AuditStore { + return s.DatabaseLayer.Audit() +} + +func (s *LayeredStore) ClusterDiscovery() ClusterDiscoveryStore { + return s.DatabaseLayer.ClusterDiscovery() +} + +func (s *LayeredStore) Compliance() ComplianceStore { + return s.DatabaseLayer.Compliance() +} + +func (s *LayeredStore) Session() SessionStore { + return s.DatabaseLayer.Session() +} + +func (s *LayeredStore) OAuth() OAuthStore { + return s.DatabaseLayer.OAuth() +} + +func (s *LayeredStore) System() SystemStore { + return s.DatabaseLayer.System() +} + +func (s *LayeredStore) Webhook() WebhookStore { + return s.DatabaseLayer.Webhook() +} + +func (s *LayeredStore) Command() CommandStore { + return s.DatabaseLayer.Command() +} + +func (s *LayeredStore) Preference() PreferenceStore { + return s.DatabaseLayer.Preference() +} + +func (s *LayeredStore) License() LicenseStore { + return s.DatabaseLayer.License() +} + +func (s *LayeredStore) Token() TokenStore { + return s.DatabaseLayer.Token() +} + +func (s *LayeredStore) Emoji() EmojiStore { + return s.DatabaseLayer.Emoji() +} + +func (s *LayeredStore) Status() StatusStore { + return s.DatabaseLayer.Status() +} + +func (s *LayeredStore) FileInfo() FileInfoStore { + return s.DatabaseLayer.FileInfo() +} + +func (s *LayeredStore) Reaction() ReactionStore { + return s.DatabaseLayer.Reaction() +} + +func (s *LayeredStore) JobStatus() JobStatusStore { + return s.DatabaseLayer.JobStatus() +} + +func (s *LayeredStore) MarkSystemRanUnitTests() { + s.DatabaseLayer.MarkSystemRanUnitTests() +} + +func (s *LayeredStore) Close() { + s.DatabaseLayer.Close() +} + +func (s *LayeredStore) DropAllTables() { + s.DatabaseLayer.DropAllTables() +} + +func (s *LayeredStore) TotalMasterDbConnections() int { + return s.DatabaseLayer.TotalMasterDbConnections() +} + +func (s *LayeredStore) TotalReadDbConnections() int { + return s.DatabaseLayer.TotalReadDbConnections() +} + +func (s *LayeredStore) TotalSearchDbConnections() int { + return s.DatabaseLayer.TotalSearchDbConnections() +} + +type LayeredReactionStore struct { + *LayeredStore +} + +func (s *LayeredReactionStore) Save(reaction *model.Reaction) StoreChannel { + return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult { + return supplier.ReactionSave(s.TmpContext, reaction) + }) +} + +func (s *LayeredReactionStore) Delete(reaction *model.Reaction) StoreChannel { + return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult { + return supplier.ReactionDelete(s.TmpContext, reaction) + }) +} + +// TODO: DELETE ME +func (s *LayeredReactionStore) InvalidateCacheForPost(postId string) { + return +} + +// TODO: DELETE ME +func (s *LayeredReactionStore) InvalidateCache() { + return +} + +func (s *LayeredReactionStore) GetForPost(postId string, allowFromCache bool) StoreChannel { + return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult { + return supplier.ReactionGetForPost(s.TmpContext, postId) + }) +} + +func (s *LayeredReactionStore) DeleteAllWithEmojiName(emojiName string) StoreChannel { + return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult { + return supplier.ReactionDeleteAllWithEmojiName(s.TmpContext, emojiName) + }) +} diff --git a/store/layered_store_hints.go b/store/layered_store_hints.go new file mode 100644 index 000000000..6154af7c9 --- /dev/null +++ b/store/layered_store_hints.go @@ -0,0 +1,11 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package store + +type LayeredStoreHint int + +const ( + LSH_NO_CACHE LayeredStoreHint = iota + LSH_MASTER_ONLY +) diff --git a/store/layered_store_supplier.go b/store/layered_store_supplier.go new file mode 100644 index 000000000..7b7da5710 --- /dev/null +++ b/store/layered_store_supplier.go @@ -0,0 +1,29 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package store + +import "github.com/mattermost/platform/model" +import "context" + +type LayeredStoreSupplierResult struct { + Result StoreResult + Err *model.AppError +} + +func NewSupplierResult() LayeredStoreSupplierResult { + return LayeredStoreSupplierResult{ + Result: StoreResult{}, + Err: nil, + } +} + +type LayeredStoreSupplier interface { + // + // Reactions + //), hints ...LayeredStoreHint) + ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult + ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult + ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) LayeredStoreSupplierResult + ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) LayeredStoreSupplierResult +} diff --git a/store/sql_audit_store.go b/store/sql_audit_store.go index c87b5fe6c..49becb2ca 100644 --- a/store/sql_audit_store.go +++ b/store/sql_audit_store.go @@ -8,10 +8,10 @@ import ( ) type SqlAuditStore struct { - *SqlStore + SqlStore } -func NewSqlAuditStore(sqlStore *SqlStore) AuditStore { +func NewSqlAuditStore(sqlStore SqlStore) AuditStore { s := &SqlAuditStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_channel_store.go b/store/sql_channel_store.go index e25387f50..c009d64d3 100644 --- a/store/sql_channel_store.go +++ b/store/sql_channel_store.go @@ -35,7 +35,7 @@ const ( ) type SqlChannelStore struct { - *SqlStore + SqlStore } var channelMemberCountsCache = utils.NewLru(CHANNEL_MEMBERS_COUNTS_CACHE_SIZE) @@ -52,7 +52,7 @@ func ClearChannelCaches() { channelByNameCache.Purge() } -func NewSqlChannelStore(sqlStore *SqlStore) ChannelStore { +func NewSqlChannelStore(sqlStore SqlStore) ChannelStore { s := &SqlChannelStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_cluster_discovery_store.go b/store/sql_cluster_discovery_store.go index 81d3d6e11..1fbfec0c0 100644 --- a/store/sql_cluster_discovery_store.go +++ b/store/sql_cluster_discovery_store.go @@ -8,10 +8,10 @@ import ( ) type sqlClusterDiscoveryStore struct { - *SqlStore + SqlStore } -func NewSqlClusterDiscoveryStore(sqlStore *SqlStore) ClusterDiscoveryStore { +func NewSqlClusterDiscoveryStore(sqlStore SqlStore) ClusterDiscoveryStore { s := &sqlClusterDiscoveryStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_command_store.go b/store/sql_command_store.go index 334c6daa7..a80d1dbf4 100644 --- a/store/sql_command_store.go +++ b/store/sql_command_store.go @@ -8,10 +8,10 @@ import ( ) type SqlCommandStore struct { - *SqlStore + SqlStore } -func NewSqlCommandStore(sqlStore *SqlStore) CommandStore { +func NewSqlCommandStore(sqlStore SqlStore) CommandStore { s := &SqlCommandStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_compliance_store.go b/store/sql_compliance_store.go index ad502a795..4ffbcf113 100644 --- a/store/sql_compliance_store.go +++ b/store/sql_compliance_store.go @@ -11,10 +11,10 @@ import ( ) type SqlComplianceStore struct { - *SqlStore + SqlStore } -func NewSqlComplianceStore(sqlStore *SqlStore) ComplianceStore { +func NewSqlComplianceStore(sqlStore SqlStore) ComplianceStore { s := &SqlComplianceStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_emoji_store.go b/store/sql_emoji_store.go index 2b7f0d0b3..a51020bba 100644 --- a/store/sql_emoji_store.go +++ b/store/sql_emoji_store.go @@ -17,10 +17,10 @@ const ( var emojiCache *utils.Cache = utils.NewLru(EMOJI_CACHE_SIZE) type SqlEmojiStore struct { - *SqlStore + SqlStore } -func NewSqlEmojiStore(sqlStore *SqlStore) EmojiStore { +func NewSqlEmojiStore(sqlStore SqlStore) EmojiStore { s := &SqlEmojiStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_file_info_store.go b/store/sql_file_info_store.go index bd0362db0..3fe62121c 100644 --- a/store/sql_file_info_store.go +++ b/store/sql_file_info_store.go @@ -12,7 +12,7 @@ import ( ) type SqlFileInfoStore struct { - *SqlStore + SqlStore } const ( @@ -26,7 +26,7 @@ func ClearFileCaches() { fileInfoCache.Purge() } -func NewSqlFileInfoStore(sqlStore *SqlStore) FileInfoStore { +func NewSqlFileInfoStore(sqlStore SqlStore) FileInfoStore { s := &SqlFileInfoStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_job_status_store.go b/store/sql_job_status_store.go index ef039d99a..a87b8267b 100644 --- a/store/sql_job_status_store.go +++ b/store/sql_job_status_store.go @@ -11,10 +11,10 @@ import ( ) type SqlJobStatusStore struct { - *SqlStore + SqlStore } -func NewSqlJobStatusStore(sqlStore *SqlStore) JobStatusStore { +func NewSqlJobStatusStore(sqlStore SqlStore) JobStatusStore { s := &SqlJobStatusStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_license_store.go b/store/sql_license_store.go index 315659c1e..6f40cb685 100644 --- a/store/sql_license_store.go +++ b/store/sql_license_store.go @@ -8,10 +8,10 @@ import ( ) type SqlLicenseStore struct { - *SqlStore + SqlStore } -func NewSqlLicenseStore(sqlStore *SqlStore) LicenseStore { +func NewSqlLicenseStore(sqlStore SqlStore) LicenseStore { ls := &SqlLicenseStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_oauth_store.go b/store/sql_oauth_store.go index 0849353a0..8637055ae 100644 --- a/store/sql_oauth_store.go +++ b/store/sql_oauth_store.go @@ -12,10 +12,10 @@ import ( ) type SqlOAuthStore struct { - *SqlStore + SqlStore } -func NewSqlOAuthStore(sqlStore *SqlStore) OAuthStore { +func NewSqlOAuthStore(sqlStore SqlStore) OAuthStore { as := &SqlOAuthStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_post_store.go b/store/sql_post_store.go index 388414cd9..6db2d5992 100644 --- a/store/sql_post_store.go +++ b/store/sql_post_store.go @@ -17,7 +17,7 @@ import ( ) type SqlPostStore struct { - *SqlStore + SqlStore } const ( @@ -36,7 +36,7 @@ func ClearPostCaches() { lastPostsCache.Purge() } -func NewSqlPostStore(sqlStore *SqlStore) PostStore { +func NewSqlPostStore(sqlStore SqlStore) PostStore { s := &SqlPostStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_preference_store.go b/store/sql_preference_store.go index 231da069a..5d94f2477 100644 --- a/store/sql_preference_store.go +++ b/store/sql_preference_store.go @@ -11,14 +11,14 @@ import ( ) type SqlPreferenceStore struct { - *SqlStore + SqlStore } const ( FEATURE_TOGGLE_PREFIX = "feature_enabled_" ) -func NewSqlPreferenceStore(sqlStore *SqlStore) PreferenceStore { +func NewSqlPreferenceStore(sqlStore SqlStore) PreferenceStore { s := &SqlPreferenceStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_preference_store_test.go b/store/sql_preference_store_test.go index c0df7a85b..b94a39707 100644 --- a/store/sql_preference_store_test.go +++ b/store/sql_preference_store_test.go @@ -4,8 +4,9 @@ package store import ( - "github.com/mattermost/platform/model" "testing" + + "github.com/mattermost/platform/model" ) func TestPreferenceSave(t *testing.T) { @@ -343,10 +344,10 @@ func TestDeleteUnusedFeatures(t *testing.T) { Must(store.Preference().Save(&features)) - store.(*SqlStore).preference.(*SqlPreferenceStore).DeleteUnusedFeatures() + store.Preference().(*SqlPreferenceStore).DeleteUnusedFeatures() //make sure features with value "false" have actually been deleted from the database - if val, err := store.(*SqlStore).preference.(*SqlPreferenceStore).GetReplica().SelectInt(`SELECT COUNT(*) + if val, err := store.Preference().(*SqlPreferenceStore).GetReplica().SelectInt(`SELECT COUNT(*) FROM Preferences WHERE Category = :Category AND Value = :Val @@ -357,7 +358,7 @@ func TestDeleteUnusedFeatures(t *testing.T) { } // // make sure features with value "true" remain saved - if val, err := store.(*SqlStore).preference.(*SqlPreferenceStore).GetReplica().SelectInt(`SELECT COUNT(*) + if val, err := store.Preference().(*SqlPreferenceStore).GetReplica().SelectInt(`SELECT COUNT(*) FROM Preferences WHERE Category = :Category AND Value = :Val diff --git a/store/sql_reaction_store.go b/store/sql_reaction_store.go index 1b927c106..87845421e 100644 --- a/store/sql_reaction_store.go +++ b/store/sql_reaction_store.go @@ -20,10 +20,10 @@ const ( var reactionCache *utils.Cache = utils.NewLru(REACTION_CACHE_SIZE) type SqlReactionStore struct { - *SqlStore + SqlStore } -func NewSqlReactionStore(sqlStore *SqlStore) ReactionStore { +func NewSqlReactionStore(sqlStore SqlStore) ReactionStore { s := &SqlReactionStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_session_store.go b/store/sql_session_store.go index d7ea7073b..e9310fc36 100644 --- a/store/sql_session_store.go +++ b/store/sql_session_store.go @@ -10,10 +10,10 @@ import ( ) type SqlSessionStore struct { - *SqlStore + SqlStore } -func NewSqlSessionStore(sqlStore *SqlStore) SessionStore { +func NewSqlSessionStore(sqlStore SqlStore) SessionStore { us := &SqlSessionStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_status_store.go b/store/sql_status_store.go index d57a70f46..005b858c6 100644 --- a/store/sql_status_store.go +++ b/store/sql_status_store.go @@ -15,10 +15,10 @@ const ( ) type SqlStatusStore struct { - *SqlStore + SqlStore } -func NewSqlStatusStore(sqlStore *SqlStore) StatusStore { +func NewSqlStatusStore(sqlStore SqlStore) StatusStore { s := &SqlStatusStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_store.go b/store/sql_store.go index ee2c678f6..dc3b51d0c 100644 --- a/store/sql_store.go +++ b/store/sql_store.go @@ -4,936 +4,79 @@ package store import ( - "context" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - crand "crypto/rand" - "crypto/sha256" - "crypto/sha512" - dbsql "database/sql" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - sqltrace "log" - "os" - "strings" - "sync/atomic" - "time" - - l4g "github.com/alecthomas/log4go" - _ "github.com/go-sql-driver/mysql" _ "github.com/lib/pq" "github.com/mattermost/gorp" - "github.com/mattermost/platform/model" - "github.com/mattermost/platform/utils" -) - -const ( - INDEX_TYPE_FULL_TEXT = "full_text" - INDEX_TYPE_DEFAULT = "default" - MAX_DB_CONN_LIFETIME = 60 - DB_PING_ATTEMPTS = 18 - DB_PING_TIMEOUT_SECS = 10 ) -const ( - EXIT_CREATE_TABLE = 100 - EXIT_DB_OPEN = 101 - EXIT_PING = 102 - EXIT_NO_DRIVER = 103 - EXIT_TABLE_EXISTS = 104 - EXIT_TABLE_EXISTS_MYSQL = 105 - EXIT_COLUMN_EXISTS = 106 - EXIT_DOES_COLUMN_EXISTS_POSTGRES = 107 - EXIT_DOES_COLUMN_EXISTS_MYSQL = 108 - EXIT_DOES_COLUMN_EXISTS_MISSING = 109 - EXIT_CREATE_COLUMN_POSTGRES = 110 - EXIT_CREATE_COLUMN_MYSQL = 111 - EXIT_CREATE_COLUMN_MISSING = 112 - EXIT_REMOVE_COLUMN = 113 - EXIT_RENAME_COLUMN = 114 - EXIT_MAX_COLUMN = 115 - EXIT_ALTER_COLUMN = 116 - EXIT_CREATE_INDEX_POSTGRES = 117 - EXIT_CREATE_INDEX_MYSQL = 118 - EXIT_CREATE_INDEX_FULL_MYSQL = 119 - EXIT_CREATE_INDEX_MISSING = 120 - EXIT_REMOVE_INDEX_POSTGRES = 121 - EXIT_REMOVE_INDEX_MYSQL = 122 - EXIT_REMOVE_INDEX_MISSING = 123 - EXIT_REMOVE_TABLE = 134 -) - -type SqlStore struct { - master *gorp.DbMap - replicas []*gorp.DbMap - searchReplicas []*gorp.DbMap - team TeamStore - channel ChannelStore - post PostStore - user UserStore - audit AuditStore - clusterDiscovery ClusterDiscoveryStore - compliance ComplianceStore - session SessionStore - oauth OAuthStore - system SystemStore - webhook WebhookStore - command CommandStore - preference PreferenceStore - license LicenseStore - token TokenStore - emoji EmojiStore - status StatusStore - fileInfo FileInfoStore - reaction ReactionStore - jobStatus JobStatusStore - SchemaVersion string - rrCounter int64 - srCounter int64 -} - -func initConnection() *SqlStore { - sqlStore := &SqlStore{ - rrCounter: 0, - srCounter: 0, - } - - sqlStore.master = setupConnection("master", utils.Cfg.SqlSettings.DriverName, - utils.Cfg.SqlSettings.DataSource, utils.Cfg.SqlSettings.MaxIdleConns, - utils.Cfg.SqlSettings.MaxOpenConns, utils.Cfg.SqlSettings.Trace) - - if len(utils.Cfg.SqlSettings.DataSourceReplicas) == 0 { - sqlStore.replicas = make([]*gorp.DbMap, 1) - sqlStore.replicas[0] = sqlStore.master - } else { - sqlStore.replicas = make([]*gorp.DbMap, len(utils.Cfg.SqlSettings.DataSourceReplicas)) - for i, replica := range utils.Cfg.SqlSettings.DataSourceReplicas { - sqlStore.replicas[i] = setupConnection(fmt.Sprintf("replica-%v", i), utils.Cfg.SqlSettings.DriverName, replica, - utils.Cfg.SqlSettings.MaxIdleConns, utils.Cfg.SqlSettings.MaxOpenConns, - utils.Cfg.SqlSettings.Trace) - } - } - - if len(utils.Cfg.SqlSettings.DataSourceSearchReplicas) == 0 { - sqlStore.searchReplicas = sqlStore.replicas - } else { - sqlStore.searchReplicas = make([]*gorp.DbMap, len(utils.Cfg.SqlSettings.DataSourceSearchReplicas)) - for i, replica := range utils.Cfg.SqlSettings.DataSourceSearchReplicas { - sqlStore.searchReplicas[i] = setupConnection(fmt.Sprintf("search-replica-%v", i), utils.Cfg.SqlSettings.DriverName, replica, - utils.Cfg.SqlSettings.MaxIdleConns, utils.Cfg.SqlSettings.MaxOpenConns, - utils.Cfg.SqlSettings.Trace) - } - } - - sqlStore.SchemaVersion = sqlStore.GetCurrentSchemaVersion() - return sqlStore -} - -func NewSqlStore() Store { - - sqlStore := initConnection() - - sqlStore.team = NewSqlTeamStore(sqlStore) - sqlStore.channel = NewSqlChannelStore(sqlStore) - sqlStore.post = NewSqlPostStore(sqlStore) - sqlStore.user = NewSqlUserStore(sqlStore) - sqlStore.audit = NewSqlAuditStore(sqlStore) - sqlStore.clusterDiscovery = NewSqlClusterDiscoveryStore(sqlStore) - sqlStore.compliance = NewSqlComplianceStore(sqlStore) - sqlStore.session = NewSqlSessionStore(sqlStore) - sqlStore.oauth = NewSqlOAuthStore(sqlStore) - sqlStore.system = NewSqlSystemStore(sqlStore) - sqlStore.webhook = NewSqlWebhookStore(sqlStore) - sqlStore.command = NewSqlCommandStore(sqlStore) - sqlStore.preference = NewSqlPreferenceStore(sqlStore) - sqlStore.license = NewSqlLicenseStore(sqlStore) - sqlStore.token = NewSqlTokenStore(sqlStore) - sqlStore.emoji = NewSqlEmojiStore(sqlStore) - sqlStore.status = NewSqlStatusStore(sqlStore) - sqlStore.fileInfo = NewSqlFileInfoStore(sqlStore) - sqlStore.reaction = NewSqlReactionStore(sqlStore) - sqlStore.jobStatus = NewSqlJobStatusStore(sqlStore) - - err := sqlStore.master.CreateTablesIfNotExists() - if err != nil { - l4g.Critical(utils.T("store.sql.creating_tables.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_TABLE) - } - - UpgradeDatabase(sqlStore) - - sqlStore.team.(*SqlTeamStore).CreateIndexesIfNotExists() - sqlStore.channel.(*SqlChannelStore).CreateIndexesIfNotExists() - sqlStore.post.(*SqlPostStore).CreateIndexesIfNotExists() - sqlStore.user.(*SqlUserStore).CreateIndexesIfNotExists() - sqlStore.audit.(*SqlAuditStore).CreateIndexesIfNotExists() - sqlStore.compliance.(*SqlComplianceStore).CreateIndexesIfNotExists() - sqlStore.session.(*SqlSessionStore).CreateIndexesIfNotExists() - sqlStore.oauth.(*SqlOAuthStore).CreateIndexesIfNotExists() - sqlStore.system.(*SqlSystemStore).CreateIndexesIfNotExists() - sqlStore.webhook.(*SqlWebhookStore).CreateIndexesIfNotExists() - sqlStore.command.(*SqlCommandStore).CreateIndexesIfNotExists() - sqlStore.preference.(*SqlPreferenceStore).CreateIndexesIfNotExists() - sqlStore.license.(*SqlLicenseStore).CreateIndexesIfNotExists() - sqlStore.token.(*SqlTokenStore).CreateIndexesIfNotExists() - sqlStore.emoji.(*SqlEmojiStore).CreateIndexesIfNotExists() - sqlStore.status.(*SqlStatusStore).CreateIndexesIfNotExists() - sqlStore.fileInfo.(*SqlFileInfoStore).CreateIndexesIfNotExists() - sqlStore.reaction.(*SqlReactionStore).CreateIndexesIfNotExists() - sqlStore.jobStatus.(*SqlJobStatusStore).CreateIndexesIfNotExists() - - sqlStore.preference.(*SqlPreferenceStore).DeleteUnusedFeatures() - - return sqlStore -} - -func setupConnection(con_type string, driver string, dataSource string, maxIdle int, maxOpen int, trace bool) *gorp.DbMap { - - db, err := dbsql.Open(driver, dataSource) - if err != nil { - l4g.Critical(utils.T("store.sql.open_conn.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_DB_OPEN) - } - - for i := 0; i < DB_PING_ATTEMPTS; i++ { - l4g.Info("Pinging SQL %v database", con_type) - ctx, cancel := context.WithTimeout(context.Background(), DB_PING_TIMEOUT_SECS*time.Second) - defer cancel() - err = db.PingContext(ctx) - if err == nil { - break - } else { - if i == DB_PING_ATTEMPTS-1 { - l4g.Critical("Failed to ping DB, server will exit err=%v", err) - time.Sleep(time.Second) - os.Exit(EXIT_PING) - } else { - l4g.Error("Failed to ping DB retrying in %v seconds err=%v", DB_PING_TIMEOUT_SECS, err) - time.Sleep(DB_PING_TIMEOUT_SECS * time.Second) - } - } - } - - db.SetMaxIdleConns(maxIdle) - db.SetMaxOpenConns(maxOpen) - db.SetConnMaxLifetime(time.Duration(MAX_DB_CONN_LIFETIME) * time.Minute) - - var dbmap *gorp.DbMap - - connectionTimeout := time.Duration(*utils.Cfg.SqlSettings.QueryTimeout) * time.Second - - if driver == "sqlite3" { - dbmap = &gorp.DbMap{Db: db, TypeConverter: mattermConverter{}, Dialect: gorp.SqliteDialect{}, QueryTimeout: connectionTimeout} - } else if driver == model.DATABASE_DRIVER_MYSQL { - dbmap = &gorp.DbMap{Db: db, TypeConverter: mattermConverter{}, Dialect: gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8MB4"}, QueryTimeout: connectionTimeout} - } else if driver == model.DATABASE_DRIVER_POSTGRES { - dbmap = &gorp.DbMap{Db: db, TypeConverter: mattermConverter{}, Dialect: gorp.PostgresDialect{}, QueryTimeout: connectionTimeout} - } else { - l4g.Critical(utils.T("store.sql.dialect_driver.critical")) - time.Sleep(time.Second) - os.Exit(EXIT_NO_DRIVER) - } - - if trace { - dbmap.TraceOn("", sqltrace.New(os.Stdout, "sql-trace:", sqltrace.Lmicroseconds)) - } - - return dbmap -} - -func (ss *SqlStore) TotalMasterDbConnections() int { - return ss.GetMaster().Db.Stats().OpenConnections -} - -func (ss *SqlStore) TotalReadDbConnections() int { - - if len(utils.Cfg.SqlSettings.DataSourceReplicas) == 0 { - return 0 - } - - count := 0 - for _, db := range ss.replicas { - count = count + db.Db.Stats().OpenConnections - } - - return count -} - -func (ss *SqlStore) TotalSearchDbConnections() int { - if len(utils.Cfg.SqlSettings.DataSourceSearchReplicas) == 0 { - return 0 - } - - count := 0 - for _, db := range ss.searchReplicas { - count = count + db.Db.Stats().OpenConnections - } - - return count -} - -func (ss *SqlStore) GetCurrentSchemaVersion() string { - version, _ := ss.GetMaster().SelectStr("SELECT Value FROM Systems WHERE Name='Version'") - return version -} - -func (ss *SqlStore) MarkSystemRanUnitTests() { - if result := <-ss.System().Get(); result.Err == nil { - props := result.Data.(model.StringMap) - unitTests := props[model.SYSTEM_RAN_UNIT_TESTS] - if len(unitTests) == 0 { - systemTests := &model.System{Name: model.SYSTEM_RAN_UNIT_TESTS, Value: "1"} - <-ss.System().Save(systemTests) - } - } -} - -func (ss *SqlStore) DoesTableExist(tableName string) bool { - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - count, err := ss.GetMaster().SelectInt( - `SELECT count(relname) FROM pg_class WHERE relname=$1`, - strings.ToLower(tableName), - ) - - if err != nil { - l4g.Critical(utils.T("store.sql.table_exists.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_TABLE_EXISTS) - } - - return count > 0 - - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - - count, err := ss.GetMaster().SelectInt( - `SELECT - COUNT(0) AS table_exists - FROM - information_schema.TABLES - WHERE - TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = ? - `, - tableName, - ) - - if err != nil { - l4g.Critical(utils.T("store.sql.table_exists.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_TABLE_EXISTS_MYSQL) - } - - return count > 0 - - } else { - l4g.Critical(utils.T("store.sql.column_exists_missing_driver.critical")) - time.Sleep(time.Second) - os.Exit(EXIT_COLUMN_EXISTS) - return false - } -} - -func (ss *SqlStore) DoesColumnExist(tableName string, columnName string) bool { - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - count, err := ss.GetMaster().SelectInt( - `SELECT COUNT(0) - FROM pg_attribute - WHERE attrelid = $1::regclass - AND attname = $2 - AND NOT attisdropped`, - strings.ToLower(tableName), - strings.ToLower(columnName), - ) - - if err != nil { - if err.Error() == "pq: relation \""+strings.ToLower(tableName)+"\" does not exist" { - return false - } - - l4g.Critical(utils.T("store.sql.column_exists.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_DOES_COLUMN_EXISTS_POSTGRES) - } - - return count > 0 - - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - - count, err := ss.GetMaster().SelectInt( - `SELECT - COUNT(0) AS column_exists - FROM - information_schema.COLUMNS - WHERE - TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = ? - AND COLUMN_NAME = ?`, - tableName, - columnName, - ) - - if err != nil { - l4g.Critical(utils.T("store.sql.column_exists.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_DOES_COLUMN_EXISTS_MYSQL) - } - - return count > 0 - - } else { - l4g.Critical(utils.T("store.sql.column_exists_missing_driver.critical")) - time.Sleep(time.Second) - os.Exit(EXIT_DOES_COLUMN_EXISTS_MISSING) - return false - } -} - -func (ss *SqlStore) CreateColumnIfNotExists(tableName string, columnName string, mySqlColType string, postgresColType string, defaultValue string) bool { - - if ss.DoesColumnExist(tableName, columnName) { - return false - } - - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - _, err := ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " ADD " + columnName + " " + postgresColType + " DEFAULT '" + defaultValue + "'") - if err != nil { - l4g.Critical(utils.T("store.sql.create_column.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_COLUMN_POSTGRES) - } - - return true - - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - _, err := ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " ADD " + columnName + " " + mySqlColType + " DEFAULT '" + defaultValue + "'") - if err != nil { - l4g.Critical(utils.T("store.sql.create_column.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_COLUMN_MYSQL) - } - - return true - - } else { - l4g.Critical(utils.T("store.sql.create_column_missing_driver.critical")) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_COLUMN_MISSING) - return false - } -} - -func (ss *SqlStore) RemoveColumnIfExists(tableName string, columnName string) bool { - - if !ss.DoesColumnExist(tableName, columnName) { - return false - } - - _, err := ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " DROP COLUMN " + columnName) - if err != nil { - l4g.Critical("Failed to drop column %v", err) - time.Sleep(time.Second) - os.Exit(EXIT_REMOVE_COLUMN) - } - - return true -} - -func (ss *SqlStore) RemoveTableIfExists(tableName string) bool { - if !ss.DoesTableExist(tableName) { - return false - } - - _, err := ss.GetMaster().ExecNoTimeout("DROP TABLE " + tableName) - if err != nil { - l4g.Critical("Failed to drop table %v", err) - time.Sleep(time.Second) - os.Exit(EXIT_REMOVE_TABLE) - } - - return true -} - -func (ss *SqlStore) RenameColumnIfExists(tableName string, oldColumnName string, newColumnName string, colType string) bool { - if !ss.DoesColumnExist(tableName, oldColumnName) { - return false - } - - var err error - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " CHANGE " + oldColumnName + " " + newColumnName + " " + colType) - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " RENAME COLUMN " + oldColumnName + " TO " + newColumnName) - } - - if err != nil { - l4g.Critical(utils.T("store.sql.rename_column.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_RENAME_COLUMN) - } - - return true -} - -func (ss *SqlStore) GetMaxLengthOfColumnIfExists(tableName string, columnName string) string { - if !ss.DoesColumnExist(tableName, columnName) { - return "" - } - - var result string - var err error - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - result, err = ss.GetMaster().SelectStr("SELECT CHARACTER_MAXIMUM_LENGTH FROM information_schema.columns WHERE table_name = '" + tableName + "' AND COLUMN_NAME = '" + columnName + "'") - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - result, err = ss.GetMaster().SelectStr("SELECT character_maximum_length FROM information_schema.columns WHERE table_name = '" + strings.ToLower(tableName) + "' AND column_name = '" + strings.ToLower(columnName) + "'") - } - - if err != nil { - l4g.Critical(utils.T("store.sql.maxlength_column.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_MAX_COLUMN) - } - - return result -} - -func (ss *SqlStore) AlterColumnTypeIfExists(tableName string, columnName string, mySqlColType string, postgresColType string) bool { - if !ss.DoesColumnExist(tableName, columnName) { - return false - } - - var err error - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " MODIFY " + columnName + " " + mySqlColType) - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + strings.ToLower(tableName) + " ALTER COLUMN " + strings.ToLower(columnName) + " TYPE " + postgresColType) - } - - if err != nil { - l4g.Critical(utils.T("store.sql.alter_column_type.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_ALTER_COLUMN) - } - - return true -} - -func (ss *SqlStore) CreateUniqueIndexIfNotExists(indexName string, tableName string, columnName string) bool { - return ss.createIndexIfNotExists(indexName, tableName, columnName, INDEX_TYPE_DEFAULT, true) -} - -func (ss *SqlStore) CreateIndexIfNotExists(indexName string, tableName string, columnName string) bool { - return ss.createIndexIfNotExists(indexName, tableName, columnName, INDEX_TYPE_DEFAULT, false) -} - -func (ss *SqlStore) CreateFullTextIndexIfNotExists(indexName string, tableName string, columnName string) bool { - return ss.createIndexIfNotExists(indexName, tableName, columnName, INDEX_TYPE_FULL_TEXT, false) -} - -func (ss *SqlStore) createIndexIfNotExists(indexName string, tableName string, columnName string, indexType string, unique bool) bool { - - uniqueStr := "" - if unique { - uniqueStr = "UNIQUE " - } - - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - _, err := ss.GetMaster().SelectStr("SELECT $1::regclass", indexName) - // It should fail if the index does not exist - if err == nil { - return false - } - - query := "" - if indexType == INDEX_TYPE_FULL_TEXT { - postgresColumnNames := convertMySQLFullTextColumnsToPostgres(columnName) - query = "CREATE INDEX " + indexName + " ON " + tableName + " USING gin(to_tsvector('english', " + postgresColumnNames + "))" - } else { - query = "CREATE " + uniqueStr + "INDEX " + indexName + " ON " + tableName + " (" + columnName + ")" - } - - _, err = ss.GetMaster().ExecNoTimeout(query) - if err != nil { - l4g.Critical(utils.T("store.sql.create_index.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_INDEX_POSTGRES) - } - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - - count, err := ss.GetMaster().SelectInt("SELECT COUNT(0) AS index_exists FROM information_schema.statistics WHERE TABLE_SCHEMA = DATABASE() and table_name = ? AND index_name = ?", tableName, indexName) - if err != nil { - l4g.Critical(utils.T("store.sql.check_index.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_INDEX_MYSQL) - } - - if count > 0 { - return false - } - - fullTextIndex := "" - if indexType == INDEX_TYPE_FULL_TEXT { - fullTextIndex = " FULLTEXT " - } - - _, err = ss.GetMaster().ExecNoTimeout("CREATE " + uniqueStr + fullTextIndex + " INDEX " + indexName + " ON " + tableName + " (" + columnName + ")") - if err != nil { - l4g.Critical(utils.T("store.sql.create_index.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_INDEX_FULL_MYSQL) - } - } else { - l4g.Critical(utils.T("store.sql.create_index_missing_driver.critical")) - time.Sleep(time.Second) - os.Exit(EXIT_CREATE_INDEX_MISSING) - } - - return true -} - -func (ss *SqlStore) RemoveIndexIfExists(indexName string, tableName string) bool { - - if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { - _, err := ss.GetMaster().SelectStr("SELECT $1::regclass", indexName) - // It should fail if the index does not exist - if err != nil { - return false - } - - _, err = ss.GetMaster().ExecNoTimeout("DROP INDEX " + indexName) - if err != nil { - l4g.Critical(utils.T("store.sql.remove_index.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_REMOVE_INDEX_POSTGRES) - } - - return true - } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { - - count, err := ss.GetMaster().SelectInt("SELECT COUNT(0) AS index_exists FROM information_schema.statistics WHERE TABLE_SCHEMA = DATABASE() and table_name = ? AND index_name = ?", tableName, indexName) - if err != nil { - l4g.Critical(utils.T("store.sql.check_index.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_REMOVE_INDEX_MYSQL) - } - - if count <= 0 { - return false - } - - _, err = ss.GetMaster().ExecNoTimeout("DROP INDEX " + indexName + " ON " + tableName) - if err != nil { - l4g.Critical(utils.T("store.sql.remove_index.critical"), err) - time.Sleep(time.Second) - os.Exit(EXIT_REMOVE_INDEX_MYSQL) - } - } else { - l4g.Critical(utils.T("store.sql.create_index_missing_driver.critical")) - time.Sleep(time.Second) - os.Exit(EXIT_REMOVE_INDEX_MISSING) - } - - return true -} - -func IsUniqueConstraintError(err string, indexName []string) bool { - unique := strings.Contains(err, "unique constraint") || strings.Contains(err, "Duplicate entry") - field := false - for _, contain := range indexName { - if strings.Contains(err, contain) { - field = true - break - } - } - - return unique && field -} - -func (ss *SqlStore) GetMaster() *gorp.DbMap { - return ss.master -} - -func (ss *SqlStore) GetSearchReplica() *gorp.DbMap { - rrNum := atomic.AddInt64(&ss.srCounter, 1) % int64(len(ss.searchReplicas)) - return ss.searchReplicas[rrNum] -} - -func (ss *SqlStore) GetReplica() *gorp.DbMap { - rrNum := atomic.AddInt64(&ss.rrCounter, 1) % int64(len(ss.replicas)) - return ss.replicas[rrNum] -} - -func (ss *SqlStore) GetAllConns() []*gorp.DbMap { - all := make([]*gorp.DbMap, len(ss.replicas)+1) - copy(all, ss.replicas) - all[len(ss.replicas)] = ss.master - return all -} - -func (ss *SqlStore) Close() { - l4g.Info(utils.T("store.sql.closing.info")) - ss.master.Db.Close() - for _, replica := range ss.replicas { - replica.Db.Close() - } -} - -func (ss *SqlStore) Team() TeamStore { - return ss.team -} - -func (ss *SqlStore) Channel() ChannelStore { - return ss.channel -} - -func (ss *SqlStore) Post() PostStore { - return ss.post -} - -func (ss *SqlStore) User() UserStore { - return ss.user -} - -func (ss *SqlStore) Session() SessionStore { - return ss.session -} - -func (ss *SqlStore) Audit() AuditStore { - return ss.audit -} - -func (ss *SqlStore) ClusterDiscovery() ClusterDiscoveryStore { - return ss.clusterDiscovery -} - -func (ss *SqlStore) Compliance() ComplianceStore { - return ss.compliance -} - -func (ss *SqlStore) OAuth() OAuthStore { - return ss.oauth -} - -func (ss *SqlStore) System() SystemStore { - return ss.system -} - -func (ss *SqlStore) Webhook() WebhookStore { - return ss.webhook -} - -func (ss *SqlStore) Command() CommandStore { - return ss.command -} - -func (ss *SqlStore) Preference() PreferenceStore { - return ss.preference -} - -func (ss *SqlStore) License() LicenseStore { - return ss.license -} - -func (ss *SqlStore) Token() TokenStore { - return ss.token -} - -func (ss *SqlStore) Emoji() EmojiStore { - return ss.emoji -} - -func (ss *SqlStore) Status() StatusStore { - return ss.status -} - -func (ss *SqlStore) FileInfo() FileInfoStore { - return ss.fileInfo -} - -func (ss *SqlStore) Reaction() ReactionStore { - return ss.reaction -} - -func (ss *SqlStore) JobStatus() JobStatusStore { - return ss.jobStatus -} - -func (ss *SqlStore) DropAllTables() { - ss.master.TruncateTables() -} - -type mattermConverter struct{} - -func (me mattermConverter) ToDb(val interface{}) (interface{}, error) { - - switch t := val.(type) { - case model.StringMap: - return model.MapToJson(t), nil - case model.StringArray: - return model.ArrayToJson(t), nil - case model.EncryptStringMap: - return encrypt([]byte(utils.Cfg.SqlSettings.AtRestEncryptKey), model.MapToJson(t)) - case model.StringInterface: - return model.StringInterfaceToJson(t), nil - case map[string]interface{}: - return model.StringInterfaceToJson(model.StringInterface(t)), nil - } - - return val, nil -} - -func (me mattermConverter) FromDb(target interface{}) (gorp.CustomScanner, bool) { - switch target.(type) { - case *model.StringMap: - binder := func(holder, target interface{}) error { - s, ok := holder.(*string) - if !ok { - return errors.New(utils.T("store.sql.convert_string_map")) - } - b := []byte(*s) - return json.Unmarshal(b, target) - } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true - case *model.StringArray: - binder := func(holder, target interface{}) error { - s, ok := holder.(*string) - if !ok { - return errors.New(utils.T("store.sql.convert_string_array")) - } - b := []byte(*s) - return json.Unmarshal(b, target) - } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true - case *model.EncryptStringMap: - binder := func(holder, target interface{}) error { - s, ok := holder.(*string) - if !ok { - return errors.New(utils.T("store.sql.convert_encrypt_string_map")) - } - - ue, err := decrypt([]byte(utils.Cfg.SqlSettings.AtRestEncryptKey), *s) - if err != nil { - return err - } - - b := []byte(ue) - return json.Unmarshal(b, target) - } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true - case *model.StringInterface: - binder := func(holder, target interface{}) error { - s, ok := holder.(*string) - if !ok { - return errors.New(utils.T("store.sql.convert_string_interface")) - } - b := []byte(*s) - return json.Unmarshal(b, target) - } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true - case *map[string]interface{}: - binder := func(holder, target interface{}) error { - s, ok := holder.(*string) - if !ok { - return errors.New(utils.T("store.sql.convert_string_interface")) - } - b := []byte(*s) - return json.Unmarshal(b, target) - } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true - } - - return gorp.CustomScanner{}, false -} - -func convertMySQLFullTextColumnsToPostgres(columnNames string) string { - columns := strings.Split(columnNames, ", ") - concatenatedColumnNames := "" - for i, c := range columns { - concatenatedColumnNames += c - if i < len(columns)-1 { - concatenatedColumnNames += " || ' ' || " - } - } - - return concatenatedColumnNames -} - -func encrypt(key []byte, text string) (string, error) { - - if text == "" || text == "{}" { - return "", nil - } - - plaintext := []byte(text) - skey := sha512.Sum512(key) - ekey, akey := skey[:32], skey[32:] - - block, err := aes.NewCipher(ekey) - if err != nil { - return "", err - } - - macfn := hmac.New(sha256.New, akey) - ciphertext := make([]byte, aes.BlockSize+macfn.Size()+len(plaintext)) - iv := ciphertext[:aes.BlockSize] - if _, err := io.ReadFull(crand.Reader, iv); err != nil { - return "", err - } - - stream := cipher.NewCFBEncrypter(block, iv) - stream.XORKeyStream(ciphertext[aes.BlockSize+macfn.Size():], plaintext) - macfn.Write(ciphertext[aes.BlockSize+macfn.Size():]) - mac := macfn.Sum(nil) - copy(ciphertext[aes.BlockSize:aes.BlockSize+macfn.Size()], mac) - - return base64.URLEncoding.EncodeToString(ciphertext), nil -} - -func decrypt(key []byte, cryptoText string) (string, error) { - - if cryptoText == "" || cryptoText == "{}" { - return "{}", nil - } - - ciphertext, err := base64.URLEncoding.DecodeString(cryptoText) - if err != nil { - return "", err - } - - skey := sha512.Sum512(key) - ekey, akey := skey[:32], skey[32:] - macfn := hmac.New(sha256.New, akey) - if len(ciphertext) < aes.BlockSize+macfn.Size() { - return "", errors.New(utils.T("store.sql.short_ciphertext")) - } - - macfn.Write(ciphertext[aes.BlockSize+macfn.Size():]) - expectedMac := macfn.Sum(nil) - mac := ciphertext[aes.BlockSize : aes.BlockSize+macfn.Size()] - if hmac.Equal(expectedMac, mac) != true { - return "", errors.New(utils.T("store.sql.incorrect_mac")) - } - - block, err := aes.NewCipher(ekey) - if err != nil { - return "", err - } - - if len(ciphertext) < aes.BlockSize { - return "", errors.New(utils.T("store.sql.too_short_ciphertext")) - } - iv := ciphertext[:aes.BlockSize] - ciphertext = ciphertext[aes.BlockSize+macfn.Size():] - - stream := cipher.NewCFBDecrypter(block, iv) - - stream.XORKeyStream(ciphertext, ciphertext) - - return fmt.Sprintf("%s", ciphertext), nil +/*type SqlStore struct { + master *gorp.DbMap + replicas []*gorp.DbMap + searchReplicas []*gorp.DbMap + team TeamStore + channel ChannelStore + post PostStore + user UserStore + audit AuditStore + compliance ComplianceStore + session SessionStore + oauth OAuthStore + system SystemStore + webhook WebhookStore + command CommandStore + preference PreferenceStore + license LicenseStore + token TokenStore + emoji EmojiStore + status StatusStore + fileInfo FileInfoStore + reaction ReactionStore + jobStatus JobStatusStore + SchemaVersion string + rrCounter int64 + srCounter int64 +}*/ + +type SqlStore interface { + GetCurrentSchemaVersion() string + GetMaster() *gorp.DbMap + GetSearchReplica() *gorp.DbMap + GetReplica() *gorp.DbMap + TotalMasterDbConnections() int + TotalReadDbConnections() int + TotalSearchDbConnections() int + MarkSystemRanUnitTests() + DoesTableExist(tablename string) bool + DoesColumnExist(tableName string, columName string) bool + CreateColumnIfNotExists(tableName string, columnName string, mySqlColType string, postgresColType string, defaultValue string) bool + RemoveColumnIfExists(tableName string, columnName string) bool + RemoveTableIfExists(tableName string) bool + RenameColumnIfExists(tableName string, oldColumnName string, newColumnName string, colType string) bool + GetMaxLengthOfColumnIfExists(tableName string, columnName string) string + AlterColumnTypeIfExists(tableName string, columnName string, mySqlColType string, postgresColType string) bool + CreateUniqueIndexIfNotExists(indexName string, tableName string, columnName string) bool + CreateIndexIfNotExists(indexName string, tableName string, columnName string) bool + CreateFullTextIndexIfNotExists(indexName string, tableName string, columnName string) bool + RemoveIndexIfExists(indexName string, tableName string) bool + GetAllConns() []*gorp.DbMap + Close() + Team() TeamStore + Channel() ChannelStore + Post() PostStore + User() UserStore + Audit() AuditStore + ClusterDiscovery() ClusterDiscoveryStore + Compliance() ComplianceStore + Session() SessionStore + OAuth() OAuthStore + System() SystemStore + Webhook() WebhookStore + Command() CommandStore + Preference() PreferenceStore + License() LicenseStore + Token() TokenStore + Emoji() EmojiStore + Status() StatusStore + FileInfo() FileInfoStore + Reaction() ReactionStore } diff --git a/store/sql_store_test.go b/store/sql_store_test.go index 3711d5fec..2885a7b13 100644 --- a/store/sql_store_test.go +++ b/store/sql_store_test.go @@ -3,13 +3,7 @@ package store -import ( - "strings" - "testing" - - "github.com/mattermost/platform/model" - "github.com/mattermost/platform/utils" -) +import "github.com/mattermost/platform/utils" var store Store @@ -18,12 +12,13 @@ func Setup() { utils.TranslationsPreInit() utils.LoadConfig("config.json") utils.InitTranslations(utils.Cfg.LocalizationSettings) - store = NewSqlStore() + store = NewLayeredStore() store.MarkSystemRanUnitTests() } } +/* func TestSqlStore1(t *testing.T) { utils.TranslationsPreInit() utils.LoadConfig("config.json") @@ -42,35 +37,10 @@ func TestSqlStore1(t *testing.T) { utils.LoadConfig("config.json") } -func TestEncrypt(t *testing.T) { - m := make(map[string]string) - - key := []byte("IPc17oYK9NAj6WfJeCqm5AxIBF6WBNuN") // AES-256 - - originalText1 := model.MapToJson(m) - cryptoText1, _ := encrypt(key, originalText1) - text1, _ := decrypt(key, cryptoText1) - rm1 := model.MapFromJson(strings.NewReader(text1)) - - if len(rm1) != 0 { - t.Fatal("error in encrypt") - } - - m["key"] = "value" - originalText2 := model.MapToJson(m) - cryptoText2, _ := encrypt(key, originalText2) - text2, _ := decrypt(key, cryptoText2) - rm2 := model.MapFromJson(strings.NewReader(text2)) - - if rm2["key"] != "value" { - t.Fatal("error in encrypt") - } -} - func TestAlertDbCmds(t *testing.T) { Setup() - sqlStore := store.(*SqlStore) + sqlStore := store.(SqlStore) if !sqlStore.DoesTableExist("Systems") { t.Fatal("Failed table exists") @@ -130,7 +100,7 @@ func TestAlertDbCmds(t *testing.T) { func TestCreateIndexIfNotExists(t *testing.T) { Setup() - sqlStore := store.(*SqlStore) + sqlStore := store.(SqlStore) defer sqlStore.RemoveColumnIfExists("Systems", "Test") if !sqlStore.CreateColumnIfNotExists("Systems", "Test", "VARCHAR(50)", "VARCHAR(50)", "") { @@ -150,7 +120,7 @@ func TestCreateIndexIfNotExists(t *testing.T) { func TestRemoveIndexIfExists(t *testing.T) { Setup() - sqlStore := store.(*SqlStore) + sqlStore := store.(SqlStore) defer sqlStore.RemoveColumnIfExists("Systems", "Test") if !sqlStore.CreateColumnIfNotExists("Systems", "Test", "VARCHAR(50)", "VARCHAR(50)", "") { @@ -174,3 +144,4 @@ func TestRemoveIndexIfExists(t *testing.T) { t.Fatal("Should've failed to remove index that was already removed") } } +*/ diff --git a/store/sql_supplier.go b/store/sql_supplier.go new file mode 100644 index 000000000..c062d0ff0 --- /dev/null +++ b/store/sql_supplier.go @@ -0,0 +1,825 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package store + +import ( + dbsql "database/sql" + "encoding/json" + "errors" + "fmt" + sqltrace "log" + "os" + "strings" + "sync/atomic" + "time" + + l4g "github.com/alecthomas/log4go" + "github.com/mattermost/gorp" + "github.com/mattermost/platform/model" + "github.com/mattermost/platform/utils" +) + +const ( + INDEX_TYPE_FULL_TEXT = "full_text" + INDEX_TYPE_DEFAULT = "default" + MAX_DB_CONN_LIFETIME = 60 +) + +const ( + EXIT_CREATE_TABLE = 100 + EXIT_DB_OPEN = 101 + EXIT_PING = 102 + EXIT_NO_DRIVER = 103 + EXIT_TABLE_EXISTS = 104 + EXIT_TABLE_EXISTS_MYSQL = 105 + EXIT_COLUMN_EXISTS = 106 + EXIT_DOES_COLUMN_EXISTS_POSTGRES = 107 + EXIT_DOES_COLUMN_EXISTS_MYSQL = 108 + EXIT_DOES_COLUMN_EXISTS_MISSING = 109 + EXIT_CREATE_COLUMN_POSTGRES = 110 + EXIT_CREATE_COLUMN_MYSQL = 111 + EXIT_CREATE_COLUMN_MISSING = 112 + EXIT_REMOVE_COLUMN = 113 + EXIT_RENAME_COLUMN = 114 + EXIT_MAX_COLUMN = 115 + EXIT_ALTER_COLUMN = 116 + EXIT_CREATE_INDEX_POSTGRES = 117 + EXIT_CREATE_INDEX_MYSQL = 118 + EXIT_CREATE_INDEX_FULL_MYSQL = 119 + EXIT_CREATE_INDEX_MISSING = 120 + EXIT_REMOVE_INDEX_POSTGRES = 121 + EXIT_REMOVE_INDEX_MYSQL = 122 + EXIT_REMOVE_INDEX_MISSING = 123 + EXIT_REMOVE_TABLE = 134 +) + +type SqlSupplierResult struct { + Err model.AppError + Result interface{} +} + +type SqlSupplierOldStores struct { + team TeamStore + channel ChannelStore + post PostStore + user UserStore + audit AuditStore + cluster ClusterDiscoveryStore + compliance ComplianceStore + session SessionStore + oauth OAuthStore + system SystemStore + webhook WebhookStore + command CommandStore + preference PreferenceStore + license LicenseStore + token TokenStore + emoji EmojiStore + status StatusStore + fileInfo FileInfoStore + reaction ReactionStore + jobStatus JobStatusStore +} + +type SqlSupplier struct { + master *gorp.DbMap + replicas []*gorp.DbMap + searchReplicas []*gorp.DbMap + rrCounter int64 + srCounter int64 + oldStores SqlSupplierOldStores +} + +func NewSqlSupplier() *SqlSupplier { + supplier := &SqlSupplier{ + rrCounter: 0, + srCounter: 0, + } + + supplier.initConnection() + + supplier.oldStores.team = NewSqlTeamStore(supplier) + supplier.oldStores.channel = NewSqlChannelStore(supplier) + supplier.oldStores.post = NewSqlPostStore(supplier) + supplier.oldStores.user = NewSqlUserStore(supplier) + supplier.oldStores.audit = NewSqlAuditStore(supplier) + supplier.oldStores.cluster = NewSqlClusterDiscoveryStore(supplier) + supplier.oldStores.compliance = NewSqlComplianceStore(supplier) + supplier.oldStores.session = NewSqlSessionStore(supplier) + supplier.oldStores.oauth = NewSqlOAuthStore(supplier) + supplier.oldStores.system = NewSqlSystemStore(supplier) + supplier.oldStores.webhook = NewSqlWebhookStore(supplier) + supplier.oldStores.command = NewSqlCommandStore(supplier) + supplier.oldStores.preference = NewSqlPreferenceStore(supplier) + supplier.oldStores.license = NewSqlLicenseStore(supplier) + supplier.oldStores.token = NewSqlTokenStore(supplier) + supplier.oldStores.emoji = NewSqlEmojiStore(supplier) + supplier.oldStores.status = NewSqlStatusStore(supplier) + supplier.oldStores.fileInfo = NewSqlFileInfoStore(supplier) + supplier.oldStores.reaction = NewSqlReactionStore(supplier) + supplier.oldStores.jobStatus = NewSqlJobStatusStore(supplier) + + err := supplier.GetMaster().CreateTablesIfNotExists() + if err != nil { + l4g.Critical(utils.T("store.sql.creating_tables.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_TABLE) + } + + UpgradeDatabase(supplier) + + supplier.oldStores.team.(*SqlTeamStore).CreateIndexesIfNotExists() + supplier.oldStores.channel.(*SqlChannelStore).CreateIndexesIfNotExists() + supplier.oldStores.post.(*SqlPostStore).CreateIndexesIfNotExists() + supplier.oldStores.user.(*SqlUserStore).CreateIndexesIfNotExists() + supplier.oldStores.audit.(*SqlAuditStore).CreateIndexesIfNotExists() + supplier.oldStores.compliance.(*SqlComplianceStore).CreateIndexesIfNotExists() + supplier.oldStores.session.(*SqlSessionStore).CreateIndexesIfNotExists() + supplier.oldStores.oauth.(*SqlOAuthStore).CreateIndexesIfNotExists() + supplier.oldStores.system.(*SqlSystemStore).CreateIndexesIfNotExists() + supplier.oldStores.webhook.(*SqlWebhookStore).CreateIndexesIfNotExists() + supplier.oldStores.command.(*SqlCommandStore).CreateIndexesIfNotExists() + supplier.oldStores.preference.(*SqlPreferenceStore).CreateIndexesIfNotExists() + supplier.oldStores.license.(*SqlLicenseStore).CreateIndexesIfNotExists() + supplier.oldStores.token.(*SqlTokenStore).CreateIndexesIfNotExists() + supplier.oldStores.emoji.(*SqlEmojiStore).CreateIndexesIfNotExists() + supplier.oldStores.status.(*SqlStatusStore).CreateIndexesIfNotExists() + supplier.oldStores.fileInfo.(*SqlFileInfoStore).CreateIndexesIfNotExists() + supplier.oldStores.reaction.(*SqlReactionStore).CreateIndexesIfNotExists() + supplier.oldStores.jobStatus.(*SqlJobStatusStore).CreateIndexesIfNotExists() + + supplier.oldStores.preference.(*SqlPreferenceStore).DeleteUnusedFeatures() + + return supplier +} + +func setupConnection(con_type string, driver string, dataSource string, maxIdle int, maxOpen int, trace bool) *gorp.DbMap { + db, err := dbsql.Open(driver, dataSource) + if err != nil { + l4g.Critical(utils.T("store.sql.open_conn.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_DB_OPEN) + } + + l4g.Info(utils.T("store.sql.pinging.info"), con_type) + err = db.Ping() + if err != nil { + l4g.Critical(utils.T("store.sql.ping.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_PING) + } + + db.SetMaxIdleConns(maxIdle) + db.SetMaxOpenConns(maxOpen) + db.SetConnMaxLifetime(time.Duration(MAX_DB_CONN_LIFETIME) * time.Minute) + + var dbmap *gorp.DbMap + + connectionTimeout := time.Duration(*utils.Cfg.SqlSettings.QueryTimeout) * time.Second + + if driver == "sqlite3" { + dbmap = &gorp.DbMap{Db: db, TypeConverter: mattermConverter{}, Dialect: gorp.SqliteDialect{}, QueryTimeout: connectionTimeout} + } else if driver == model.DATABASE_DRIVER_MYSQL { + dbmap = &gorp.DbMap{Db: db, TypeConverter: mattermConverter{}, Dialect: gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8MB4"}, QueryTimeout: connectionTimeout} + } else if driver == model.DATABASE_DRIVER_POSTGRES { + dbmap = &gorp.DbMap{Db: db, TypeConverter: mattermConverter{}, Dialect: gorp.PostgresDialect{}, QueryTimeout: connectionTimeout} + } else { + l4g.Critical(utils.T("store.sql.dialect_driver.critical")) + time.Sleep(time.Second) + os.Exit(EXIT_NO_DRIVER) + } + + if trace { + dbmap.TraceOn("", sqltrace.New(os.Stdout, "sql-trace:", sqltrace.Lmicroseconds)) + } + + return dbmap +} + +func (s *SqlSupplier) initConnection() { + s.master = setupConnection("master", utils.Cfg.SqlSettings.DriverName, + utils.Cfg.SqlSettings.DataSource, utils.Cfg.SqlSettings.MaxIdleConns, + utils.Cfg.SqlSettings.MaxOpenConns, utils.Cfg.SqlSettings.Trace) + + if len(utils.Cfg.SqlSettings.DataSourceReplicas) == 0 { + s.replicas = make([]*gorp.DbMap, 1) + s.replicas[0] = s.master + } else { + s.replicas = make([]*gorp.DbMap, len(utils.Cfg.SqlSettings.DataSourceReplicas)) + for i, replica := range utils.Cfg.SqlSettings.DataSourceReplicas { + s.replicas[i] = setupConnection(fmt.Sprintf("replica-%v", i), utils.Cfg.SqlSettings.DriverName, replica, + utils.Cfg.SqlSettings.MaxIdleConns, utils.Cfg.SqlSettings.MaxOpenConns, + utils.Cfg.SqlSettings.Trace) + } + } + + if len(utils.Cfg.SqlSettings.DataSourceSearchReplicas) == 0 { + s.searchReplicas = s.replicas + } else { + s.searchReplicas = make([]*gorp.DbMap, len(utils.Cfg.SqlSettings.DataSourceSearchReplicas)) + for i, replica := range utils.Cfg.SqlSettings.DataSourceSearchReplicas { + s.searchReplicas[i] = setupConnection(fmt.Sprintf("search-replica-%v", i), utils.Cfg.SqlSettings.DriverName, replica, + utils.Cfg.SqlSettings.MaxIdleConns, utils.Cfg.SqlSettings.MaxOpenConns, + utils.Cfg.SqlSettings.Trace) + } + } +} + +func (ss *SqlSupplier) GetCurrentSchemaVersion() string { + version, _ := ss.GetMaster().SelectStr("SELECT Value FROM Systems WHERE Name='Version'") + return version +} + +func (ss *SqlSupplier) GetMaster() *gorp.DbMap { + return ss.master +} + +func (ss *SqlSupplier) GetSearchReplica() *gorp.DbMap { + rrNum := atomic.AddInt64(&ss.srCounter, 1) % int64(len(ss.searchReplicas)) + return ss.searchReplicas[rrNum] +} + +func (ss *SqlSupplier) GetReplica() *gorp.DbMap { + rrNum := atomic.AddInt64(&ss.rrCounter, 1) % int64(len(ss.replicas)) + return ss.replicas[rrNum] +} + +func (ss *SqlSupplier) TotalMasterDbConnections() int { + return ss.GetMaster().Db.Stats().OpenConnections +} + +func (ss *SqlSupplier) TotalReadDbConnections() int { + + if len(utils.Cfg.SqlSettings.DataSourceReplicas) == 0 { + return 0 + } + + count := 0 + for _, db := range ss.replicas { + count = count + db.Db.Stats().OpenConnections + } + + return count +} + +func (ss *SqlSupplier) TotalSearchDbConnections() int { + if len(utils.Cfg.SqlSettings.DataSourceSearchReplicas) == 0 { + return 0 + } + + count := 0 + for _, db := range ss.searchReplicas { + count = count + db.Db.Stats().OpenConnections + } + + return count +} + +func (ss *SqlSupplier) MarkSystemRanUnitTests() { + if result := <-ss.System().Get(); result.Err == nil { + props := result.Data.(model.StringMap) + unitTests := props[model.SYSTEM_RAN_UNIT_TESTS] + if len(unitTests) == 0 { + systemTests := &model.System{Name: model.SYSTEM_RAN_UNIT_TESTS, Value: "1"} + <-ss.System().Save(systemTests) + } + } +} + +func (ss *SqlSupplier) DoesTableExist(tableName string) bool { + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + count, err := ss.GetMaster().SelectInt( + `SELECT count(relname) FROM pg_class WHERE relname=$1`, + strings.ToLower(tableName), + ) + + if err != nil { + l4g.Critical(utils.T("store.sql.table_exists.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_TABLE_EXISTS) + } + + return count > 0 + + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + + count, err := ss.GetMaster().SelectInt( + `SELECT + COUNT(0) AS table_exists + FROM + information_schema.TABLES + WHERE + TABLE_SCHEMA = DATABASE() + AND TABLE_NAME = ? + `, + tableName, + ) + + if err != nil { + l4g.Critical(utils.T("store.sql.table_exists.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_TABLE_EXISTS_MYSQL) + } + + return count > 0 + + } else { + l4g.Critical(utils.T("store.sql.column_exists_missing_driver.critical")) + time.Sleep(time.Second) + os.Exit(EXIT_COLUMN_EXISTS) + return false + } +} + +func (ss *SqlSupplier) DoesColumnExist(tableName string, columnName string) bool { + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + count, err := ss.GetMaster().SelectInt( + `SELECT COUNT(0) + FROM pg_attribute + WHERE attrelid = $1::regclass + AND attname = $2 + AND NOT attisdropped`, + strings.ToLower(tableName), + strings.ToLower(columnName), + ) + + if err != nil { + if err.Error() == "pq: relation \""+strings.ToLower(tableName)+"\" does not exist" { + return false + } + + l4g.Critical(utils.T("store.sql.column_exists.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_DOES_COLUMN_EXISTS_POSTGRES) + } + + return count > 0 + + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + + count, err := ss.GetMaster().SelectInt( + `SELECT + COUNT(0) AS column_exists + FROM + information_schema.COLUMNS + WHERE + TABLE_SCHEMA = DATABASE() + AND TABLE_NAME = ? + AND COLUMN_NAME = ?`, + tableName, + columnName, + ) + + if err != nil { + l4g.Critical(utils.T("store.sql.column_exists.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_DOES_COLUMN_EXISTS_MYSQL) + } + + return count > 0 + + } else { + l4g.Critical(utils.T("store.sql.column_exists_missing_driver.critical")) + time.Sleep(time.Second) + os.Exit(EXIT_DOES_COLUMN_EXISTS_MISSING) + return false + } +} + +func (ss *SqlSupplier) CreateColumnIfNotExists(tableName string, columnName string, mySqlColType string, postgresColType string, defaultValue string) bool { + + if ss.DoesColumnExist(tableName, columnName) { + return false + } + + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + _, err := ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " ADD " + columnName + " " + postgresColType + " DEFAULT '" + defaultValue + "'") + if err != nil { + l4g.Critical(utils.T("store.sql.create_column.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_COLUMN_POSTGRES) + } + + return true + + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + _, err := ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " ADD " + columnName + " " + mySqlColType + " DEFAULT '" + defaultValue + "'") + if err != nil { + l4g.Critical(utils.T("store.sql.create_column.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_COLUMN_MYSQL) + } + + return true + + } else { + l4g.Critical(utils.T("store.sql.create_column_missing_driver.critical")) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_COLUMN_MISSING) + return false + } +} + +func (ss *SqlSupplier) RemoveColumnIfExists(tableName string, columnName string) bool { + + if !ss.DoesColumnExist(tableName, columnName) { + return false + } + + _, err := ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " DROP COLUMN " + columnName) + if err != nil { + l4g.Critical("Failed to drop column %v", err) + time.Sleep(time.Second) + os.Exit(EXIT_REMOVE_COLUMN) + } + + return true +} + +func (ss *SqlSupplier) RemoveTableIfExists(tableName string) bool { + if !ss.DoesTableExist(tableName) { + return false + } + + _, err := ss.GetMaster().ExecNoTimeout("DROP TABLE " + tableName) + if err != nil { + l4g.Critical("Failed to drop table %v", err) + time.Sleep(time.Second) + os.Exit(EXIT_REMOVE_TABLE) + } + + return true +} + +func (ss *SqlSupplier) RenameColumnIfExists(tableName string, oldColumnName string, newColumnName string, colType string) bool { + if !ss.DoesColumnExist(tableName, oldColumnName) { + return false + } + + var err error + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " CHANGE " + oldColumnName + " " + newColumnName + " " + colType) + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " RENAME COLUMN " + oldColumnName + " TO " + newColumnName) + } + + if err != nil { + l4g.Critical(utils.T("store.sql.rename_column.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_RENAME_COLUMN) + } + + return true +} + +func (ss *SqlSupplier) GetMaxLengthOfColumnIfExists(tableName string, columnName string) string { + if !ss.DoesColumnExist(tableName, columnName) { + return "" + } + + var result string + var err error + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + result, err = ss.GetMaster().SelectStr("SELECT CHARACTER_MAXIMUM_LENGTH FROM information_schema.columns WHERE table_name = '" + tableName + "' AND COLUMN_NAME = '" + columnName + "'") + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + result, err = ss.GetMaster().SelectStr("SELECT character_maximum_length FROM information_schema.columns WHERE table_name = '" + strings.ToLower(tableName) + "' AND column_name = '" + strings.ToLower(columnName) + "'") + } + + if err != nil { + l4g.Critical(utils.T("store.sql.maxlength_column.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_MAX_COLUMN) + } + + return result +} + +func (ss *SqlSupplier) AlterColumnTypeIfExists(tableName string, columnName string, mySqlColType string, postgresColType string) bool { + if !ss.DoesColumnExist(tableName, columnName) { + return false + } + + var err error + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + tableName + " MODIFY " + columnName + " " + mySqlColType) + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + _, err = ss.GetMaster().ExecNoTimeout("ALTER TABLE " + strings.ToLower(tableName) + " ALTER COLUMN " + strings.ToLower(columnName) + " TYPE " + postgresColType) + } + + if err != nil { + l4g.Critical(utils.T("store.sql.alter_column_type.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_ALTER_COLUMN) + } + + return true +} + +func (ss *SqlSupplier) CreateUniqueIndexIfNotExists(indexName string, tableName string, columnName string) bool { + return ss.createIndexIfNotExists(indexName, tableName, columnName, INDEX_TYPE_DEFAULT, true) +} + +func (ss *SqlSupplier) CreateIndexIfNotExists(indexName string, tableName string, columnName string) bool { + return ss.createIndexIfNotExists(indexName, tableName, columnName, INDEX_TYPE_DEFAULT, false) +} + +func (ss *SqlSupplier) CreateFullTextIndexIfNotExists(indexName string, tableName string, columnName string) bool { + return ss.createIndexIfNotExists(indexName, tableName, columnName, INDEX_TYPE_FULL_TEXT, false) +} + +func (ss *SqlSupplier) createIndexIfNotExists(indexName string, tableName string, columnName string, indexType string, unique bool) bool { + + uniqueStr := "" + if unique { + uniqueStr = "UNIQUE " + } + + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + _, err := ss.GetMaster().SelectStr("SELECT $1::regclass", indexName) + // It should fail if the index does not exist + if err == nil { + return false + } + + query := "" + if indexType == INDEX_TYPE_FULL_TEXT { + postgresColumnNames := convertMySQLFullTextColumnsToPostgres(columnName) + query = "CREATE INDEX " + indexName + " ON " + tableName + " USING gin(to_tsvector('english', " + postgresColumnNames + "))" + } else { + query = "CREATE " + uniqueStr + "INDEX " + indexName + " ON " + tableName + " (" + columnName + ")" + } + + _, err = ss.GetMaster().ExecNoTimeout(query) + if err != nil { + l4g.Critical(utils.T("store.sql.create_index.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_INDEX_POSTGRES) + } + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + + count, err := ss.GetMaster().SelectInt("SELECT COUNT(0) AS index_exists FROM information_schema.statistics WHERE TABLE_SCHEMA = DATABASE() and table_name = ? AND index_name = ?", tableName, indexName) + if err != nil { + l4g.Critical(utils.T("store.sql.check_index.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_INDEX_MYSQL) + } + + if count > 0 { + return false + } + + fullTextIndex := "" + if indexType == INDEX_TYPE_FULL_TEXT { + fullTextIndex = " FULLTEXT " + } + + _, err = ss.GetMaster().ExecNoTimeout("CREATE " + uniqueStr + fullTextIndex + " INDEX " + indexName + " ON " + tableName + " (" + columnName + ")") + if err != nil { + l4g.Critical(utils.T("store.sql.create_index.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_INDEX_FULL_MYSQL) + } + } else { + l4g.Critical(utils.T("store.sql.create_index_missing_driver.critical")) + time.Sleep(time.Second) + os.Exit(EXIT_CREATE_INDEX_MISSING) + } + + return true +} + +func (ss *SqlSupplier) RemoveIndexIfExists(indexName string, tableName string) bool { + + if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_POSTGRES { + _, err := ss.GetMaster().SelectStr("SELECT $1::regclass", indexName) + // It should fail if the index does not exist + if err != nil { + return false + } + + _, err = ss.GetMaster().ExecNoTimeout("DROP INDEX " + indexName) + if err != nil { + l4g.Critical(utils.T("store.sql.remove_index.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_REMOVE_INDEX_POSTGRES) + } + + return true + } else if utils.Cfg.SqlSettings.DriverName == model.DATABASE_DRIVER_MYSQL { + + count, err := ss.GetMaster().SelectInt("SELECT COUNT(0) AS index_exists FROM information_schema.statistics WHERE TABLE_SCHEMA = DATABASE() and table_name = ? AND index_name = ?", tableName, indexName) + if err != nil { + l4g.Critical(utils.T("store.sql.check_index.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_REMOVE_INDEX_MYSQL) + } + + if count <= 0 { + return false + } + + _, err = ss.GetMaster().ExecNoTimeout("DROP INDEX " + indexName + " ON " + tableName) + if err != nil { + l4g.Critical(utils.T("store.sql.remove_index.critical"), err) + time.Sleep(time.Second) + os.Exit(EXIT_REMOVE_INDEX_MYSQL) + } + } else { + l4g.Critical(utils.T("store.sql.create_index_missing_driver.critical")) + time.Sleep(time.Second) + os.Exit(EXIT_REMOVE_INDEX_MISSING) + } + + return true +} + +func IsUniqueConstraintError(err string, indexName []string) bool { + unique := strings.Contains(err, "unique constraint") || strings.Contains(err, "Duplicate entry") + field := false + for _, contain := range indexName { + if strings.Contains(err, contain) { + field = true + break + } + } + + return unique && field +} + +func (ss *SqlSupplier) GetAllConns() []*gorp.DbMap { + all := make([]*gorp.DbMap, len(ss.replicas)+1) + copy(all, ss.replicas) + all[len(ss.replicas)] = ss.master + return all +} + +func (ss *SqlSupplier) Close() { + l4g.Info(utils.T("store.sql.closing.info")) + ss.master.Db.Close() + for _, replica := range ss.replicas { + replica.Db.Close() + } +} + +func (ss *SqlSupplier) Team() TeamStore { + return ss.oldStores.team +} + +func (ss *SqlSupplier) Channel() ChannelStore { + return ss.oldStores.channel +} + +func (ss *SqlSupplier) Post() PostStore { + return ss.oldStores.post +} + +func (ss *SqlSupplier) User() UserStore { + return ss.oldStores.user +} + +func (ss *SqlSupplier) Session() SessionStore { + return ss.oldStores.session +} + +func (ss *SqlSupplier) Audit() AuditStore { + return ss.oldStores.audit +} + +func (ss *SqlSupplier) ClusterDiscovery() ClusterDiscoveryStore { + return ss.oldStores.cluster +} + +func (ss *SqlSupplier) Compliance() ComplianceStore { + return ss.oldStores.compliance +} + +func (ss *SqlSupplier) OAuth() OAuthStore { + return ss.oldStores.oauth +} + +func (ss *SqlSupplier) System() SystemStore { + return ss.oldStores.system +} + +func (ss *SqlSupplier) Webhook() WebhookStore { + return ss.oldStores.webhook +} + +func (ss *SqlSupplier) Command() CommandStore { + return ss.oldStores.command +} + +func (ss *SqlSupplier) Preference() PreferenceStore { + return ss.oldStores.preference +} + +func (ss *SqlSupplier) License() LicenseStore { + return ss.oldStores.license +} + +func (ss *SqlSupplier) Token() TokenStore { + return ss.oldStores.token +} + +func (ss *SqlSupplier) Emoji() EmojiStore { + return ss.oldStores.emoji +} + +func (ss *SqlSupplier) Status() StatusStore { + return ss.oldStores.status +} + +func (ss *SqlSupplier) FileInfo() FileInfoStore { + return ss.oldStores.fileInfo +} + +func (ss *SqlSupplier) Reaction() ReactionStore { + return ss.oldStores.reaction +} + +func (ss *SqlSupplier) JobStatus() JobStatusStore { + return ss.oldStores.jobStatus +} + +func (ss *SqlSupplier) DropAllTables() { + ss.master.TruncateTables() +} + +type mattermConverter struct{} + +func (me mattermConverter) ToDb(val interface{}) (interface{}, error) { + + switch t := val.(type) { + case model.StringMap: + return model.MapToJson(t), nil + case model.StringArray: + return model.ArrayToJson(t), nil + case model.StringInterface: + return model.StringInterfaceToJson(t), nil + case map[string]interface{}: + return model.StringInterfaceToJson(model.StringInterface(t)), nil + } + + return val, nil +} + +func (me mattermConverter) FromDb(target interface{}) (gorp.CustomScanner, bool) { + switch target.(type) { + case *model.StringMap: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New(utils.T("store.sql.convert_string_map")) + } + b := []byte(*s) + return json.Unmarshal(b, target) + } + return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + case *model.StringArray: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New(utils.T("store.sql.convert_string_array")) + } + b := []byte(*s) + return json.Unmarshal(b, target) + } + return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + case *model.StringInterface: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New(utils.T("store.sql.convert_string_interface")) + } + b := []byte(*s) + return json.Unmarshal(b, target) + } + return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + case *map[string]interface{}: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New(utils.T("store.sql.convert_string_interface")) + } + b := []byte(*s) + return json.Unmarshal(b, target) + } + return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + } + + return gorp.CustomScanner{}, false +} + +func convertMySQLFullTextColumnsToPostgres(columnNames string) string { + columns := strings.Split(columnNames, ", ") + concatenatedColumnNames := "" + for i, c := range columns { + concatenatedColumnNames += c + if i < len(columns)-1 { + concatenatedColumnNames += " || ' ' || " + } + } + + return concatenatedColumnNames +} diff --git a/store/sql_supplier_reactions.go b/store/sql_supplier_reactions.go new file mode 100644 index 000000000..14f13cce6 --- /dev/null +++ b/store/sql_supplier_reactions.go @@ -0,0 +1,35 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package store + +import ( + "context" + + "github.com/mattermost/platform/model" +) + +func initSqlSupplierReactions(sqlStore SqlStore) { + for _, db := range sqlStore.GetAllConns() { + table := db.AddTableWithName(model.Reaction{}, "Reactions").SetKeys(false, "UserId", "PostId", "EmojiName") + table.ColMap("UserId").SetMaxSize(26) + table.ColMap("PostId").SetMaxSize(26) + table.ColMap("EmojiName").SetMaxSize(64) + } +} + +func (s *SqlSupplier) ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult { + panic("not implemented") +} + +func (s *SqlSupplier) ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult { + panic("not implemented") +} + +func (s *SqlSupplier) ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) LayeredStoreSupplierResult { + panic("not implemented") +} + +func (s *SqlSupplier) ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) LayeredStoreSupplierResult { + panic("not implemented") +} diff --git a/store/sql_system_store.go b/store/sql_system_store.go index d9a64790e..ecb7f8469 100644 --- a/store/sql_system_store.go +++ b/store/sql_system_store.go @@ -8,10 +8,10 @@ import ( ) type SqlSystemStore struct { - *SqlStore + SqlStore } -func NewSqlSystemStore(sqlStore *SqlStore) SystemStore { +func NewSqlSystemStore(sqlStore SqlStore) SystemStore { s := &SqlSystemStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_team_store.go b/store/sql_team_store.go index ae06ec743..86e1fed62 100644 --- a/store/sql_team_store.go +++ b/store/sql_team_store.go @@ -17,10 +17,10 @@ const ( ) type SqlTeamStore struct { - *SqlStore + SqlStore } -func NewSqlTeamStore(sqlStore *SqlStore) TeamStore { +func NewSqlTeamStore(sqlStore SqlStore) TeamStore { s := &SqlTeamStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_tokens_store.go b/store/sql_tokens_store.go index 591a5498e..0850f1308 100644 --- a/store/sql_tokens_store.go +++ b/store/sql_tokens_store.go @@ -13,10 +13,10 @@ import ( ) type SqlTokenStore struct { - *SqlStore + SqlStore } -func NewSqlTokenStore(sqlStore *SqlStore) TokenStore { +func NewSqlTokenStore(sqlStore SqlStore) TokenStore { s := &SqlTokenStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_upgrade.go b/store/sql_upgrade.go index 11b726712..ef255c5c2 100644 --- a/store/sql_upgrade.go +++ b/store/sql_upgrade.go @@ -36,7 +36,7 @@ const ( EXIT_THEME_MIGRATION = 1004 ) -func UpgradeDatabase(sqlStore *SqlStore) { +func UpgradeDatabase(sqlStore SqlStore) { UpgradeDatabaseToVersion31(sqlStore) UpgradeDatabaseToVersion32(sqlStore) @@ -52,38 +52,36 @@ func UpgradeDatabase(sqlStore *SqlStore) { // If the SchemaVersion is empty this this is the first time it has ran // so lets set it to the current version. - if sqlStore.SchemaVersion == "" { - if result := <-sqlStore.system.SaveOrUpdate(&model.System{Name: "Version", Value: model.CurrentVersion}); result.Err != nil { + if sqlStore.GetCurrentSchemaVersion() == "" { + if result := <-sqlStore.System().SaveOrUpdate(&model.System{Name: "Version", Value: model.CurrentVersion}); result.Err != nil { l4g.Critical(result.Err.Error()) time.Sleep(time.Second) os.Exit(EXIT_VERSION_SAVE_MISSING) } - sqlStore.SchemaVersion = model.CurrentVersion l4g.Info(utils.T("store.sql.schema_set.info"), model.CurrentVersion) } // If we're not on the current version then it's too old to be upgraded - if sqlStore.SchemaVersion != model.CurrentVersion { - l4g.Critical(utils.T("store.sql.schema_version.critical"), sqlStore.SchemaVersion) + if sqlStore.GetCurrentSchemaVersion() != model.CurrentVersion { + l4g.Critical(utils.T("store.sql.schema_version.critical"), sqlStore.GetCurrentSchemaVersion()) time.Sleep(time.Second) os.Exit(EXIT_TOO_OLD) } } -func saveSchemaVersion(sqlStore *SqlStore, version string) { - if result := <-sqlStore.system.Update(&model.System{Name: "Version", Value: version}); result.Err != nil { +func saveSchemaVersion(sqlStore SqlStore, version string) { + if result := <-sqlStore.System().Update(&model.System{Name: "Version", Value: version}); result.Err != nil { l4g.Critical(result.Err.Error()) time.Sleep(time.Second) os.Exit(EXIT_VERSION_SAVE) } - sqlStore.SchemaVersion = version l4g.Warn(utils.T("store.sql.upgraded.warn"), version) } -func shouldPerformUpgrade(sqlStore *SqlStore, currentSchemaVersion string, expectedSchemaVersion string) bool { - if sqlStore.SchemaVersion == currentSchemaVersion { +func shouldPerformUpgrade(sqlStore SqlStore, currentSchemaVersion string, expectedSchemaVersion string) bool { + if sqlStore.GetCurrentSchemaVersion() == currentSchemaVersion { l4g.Warn(utils.T("store.sql.schema_out_of_date.warn"), currentSchemaVersion) l4g.Warn(utils.T("store.sql.schema_upgrade_attempt.warn"), expectedSchemaVersion) @@ -93,14 +91,14 @@ func shouldPerformUpgrade(sqlStore *SqlStore, currentSchemaVersion string, expec return false } -func UpgradeDatabaseToVersion31(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion31(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_0_0, VERSION_3_1_0) { sqlStore.CreateColumnIfNotExists("OutgoingWebhooks", "ContentType", "varchar(128)", "varchar(128)", "") saveSchemaVersion(sqlStore, VERSION_3_1_0) } } -func UpgradeDatabaseToVersion32(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion32(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_1_0, VERSION_3_2_0) { sqlStore.CreateColumnIfNotExists("TeamMembers", "DeleteAt", "bigint(20)", "bigint", "0") @@ -114,7 +112,7 @@ func themeMigrationFailed(err error) { os.Exit(EXIT_THEME_MIGRATION) } -func UpgradeDatabaseToVersion33(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion33(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_2_0, VERSION_3_3_0) { if sqlStore.DoesColumnExist("Users", "ThemeProps") { params := map[string]interface{}{ @@ -191,7 +189,7 @@ func UpgradeDatabaseToVersion33(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion34(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion34(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_3_0, VERSION_3_4_0) { sqlStore.CreateColumnIfNotExists("Status", "Manual", "BOOLEAN", "BOOLEAN", "0") sqlStore.CreateColumnIfNotExists("Status", "ActiveChannel", "varchar(26)", "varchar(26)", "") @@ -200,7 +198,7 @@ func UpgradeDatabaseToVersion34(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion35(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion35(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_4_0, VERSION_3_5_0) { sqlStore.GetMaster().Exec("UPDATE Users SET Roles = 'system_user' WHERE Roles = ''") sqlStore.GetMaster().Exec("UPDATE Users SET Roles = 'system_user system_admin' WHERE Roles = 'system_admin'") @@ -223,7 +221,7 @@ func UpgradeDatabaseToVersion35(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion36(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion36(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_5_0, VERSION_3_6_0) { sqlStore.CreateColumnIfNotExists("Posts", "HasReactions", "tinyint", "boolean", "0") @@ -240,7 +238,7 @@ func UpgradeDatabaseToVersion36(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion37(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion37(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_6_0, VERSION_3_7_0) { // Add EditAt column to Posts sqlStore.CreateColumnIfNotExists("Posts", "EditAt", " bigint", " bigint", "0") @@ -249,7 +247,7 @@ func UpgradeDatabaseToVersion37(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion38(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion38(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_7_0, VERSION_3_8_0) { // Add the IsPinned column to posts. sqlStore.CreateColumnIfNotExists("Posts", "IsPinned", "boolean", "boolean", "0") @@ -258,7 +256,7 @@ func UpgradeDatabaseToVersion38(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion39(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion39(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_8_0, VERSION_3_9_0) { sqlStore.CreateColumnIfNotExists("OAuthAccessData", "Scope", "varchar(128)", "varchar(128)", model.DEFAULT_SCOPE) sqlStore.RemoveTableIfExists("PasswordRecovery") @@ -267,13 +265,13 @@ func UpgradeDatabaseToVersion39(sqlStore *SqlStore) { } } -func UpgradeDatabaseToVersion310(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion310(sqlStore SqlStore) { if shouldPerformUpgrade(sqlStore, VERSION_3_9_0, VERSION_3_10_0) { saveSchemaVersion(sqlStore, VERSION_3_10_0) } } -func UpgradeDatabaseToVersion40(sqlStore *SqlStore) { +func UpgradeDatabaseToVersion40(sqlStore SqlStore) { // TODO: Uncomment following condition when version 4.0.0 is released //if shouldPerformUpgrade(sqlStore, VERSION_3_10_0, VERSION_4_0_0) { diff --git a/store/sql_upgrade_test.go b/store/sql_upgrade_test.go index 7a65e5b81..c2d28c90b 100644 --- a/store/sql_upgrade_test.go +++ b/store/sql_upgrade_test.go @@ -12,17 +12,17 @@ import ( func TestStoreUpgrade(t *testing.T) { Setup() - saveSchemaVersion(store.(*SqlStore), VERSION_3_0_0) - UpgradeDatabase(store.(*SqlStore)) + saveSchemaVersion(store.(*LayeredStore).DatabaseLayer, VERSION_3_0_0) + UpgradeDatabase(store.(*LayeredStore).DatabaseLayer) - store.(*SqlStore).SchemaVersion = "" - UpgradeDatabase(store.(*SqlStore)) + saveSchemaVersion(store.(*LayeredStore).DatabaseLayer, "") + UpgradeDatabase(store.(*LayeredStore).DatabaseLayer) } func TestSaveSchemaVersion(t *testing.T) { Setup() - saveSchemaVersion(store.(*SqlStore), VERSION_3_0_0) + saveSchemaVersion(store.(*LayeredStore).DatabaseLayer, VERSION_3_0_0) if result := <-store.System().Get(); result.Err != nil { t.Fatal(result.Err) } else { @@ -32,9 +32,9 @@ func TestSaveSchemaVersion(t *testing.T) { } } - if store.(*SqlStore).SchemaVersion != VERSION_3_0_0 { + if store.(*LayeredStore).DatabaseLayer.GetCurrentSchemaVersion() != VERSION_3_0_0 { t.Fatal("version not updated") } - saveSchemaVersion(store.(*SqlStore), model.CurrentVersion) + saveSchemaVersion(store.(*LayeredStore).DatabaseLayer, model.CurrentVersion) } diff --git a/store/sql_user_store.go b/store/sql_user_store.go index e6054365c..4aa6f6cfe 100644 --- a/store/sql_user_store.go +++ b/store/sql_user_store.go @@ -33,7 +33,7 @@ const ( ) type SqlUserStore struct { - *SqlStore + SqlStore } var profilesInChannelCache *utils.Cache = utils.NewLru(PROFILES_IN_CHANNEL_CACHE_SIZE) @@ -48,7 +48,7 @@ func (us SqlUserStore) InvalidatProfileCacheForUser(userId string) { profileByIdsCache.Remove(userId) } -func NewSqlUserStore(sqlStore *SqlStore) UserStore { +func NewSqlUserStore(sqlStore SqlStore) UserStore { us := &SqlUserStore{sqlStore} for _, db := range sqlStore.GetAllConns() { diff --git a/store/sql_webhook_store.go b/store/sql_webhook_store.go index 1d750efac..d59d7e03a 100644 --- a/store/sql_webhook_store.go +++ b/store/sql_webhook_store.go @@ -14,7 +14,7 @@ import ( ) type SqlWebhookStore struct { - *SqlStore + SqlStore } const ( @@ -28,7 +28,7 @@ func ClearWebhookCaches() { webhookCache.Purge() } -func NewSqlWebhookStore(sqlStore *SqlStore) WebhookStore { +func NewSqlWebhookStore(sqlStore SqlStore) WebhookStore { s := &SqlWebhookStore{sqlStore} for _, db := range sqlStore.GetAllConns() { -- cgit v1.2.3-1-g7c22