Upgrade default templates on launch (#2167)

* store ImportArchive

* fix db call

* blockModifier

* fix template cards appearing

* remove old templates

* disable max-lines eslint rule

* undo eslint rule chg

* updates default templates

* Update server/services/store/sqlstore/archive.go

Co-authored-by: Mattermod <mattermod@users.noreply.github.com>
This commit is contained in:
Doug Lauder 2022-02-01 13:36:12 -05:00 committed by GitHub
parent e3d42c6663
commit 42c6ec5b61
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 685 additions and 165 deletions

View file

@ -108,13 +108,25 @@ type BlockPatchBatch struct {
BlockPatches []BlockPatch `json:"block_patches"`
}
// Archive is an import / export archive.
type Archive struct {
Version int64 `json:"version"`
Date int64 `json:"date"`
Blocks []Block `json:"blocks"`
// ArchiveHeader is the first line of any archive file.
type ArchiveHeader struct {
Version int `json:"version"`
Date int64 `json:"date"`
}
// ArchiveLine is any non-header line in an archive.
type ArchiveLine struct {
Type string `json:"type"`
Data json.RawMessage `json:"data"`
}
// BlockModifier is a callback that can modify each block during an import.
// A cache of arbitrary data will be passed for each call and any changes
// to the cache will be preserved for the next call.
// Return true to import the block or false to skip import.
type BlockModifier func(block *Block, cache map[string]interface{}) bool
// BlocksFromJSON creates a slice from blocks from a JSON stream, ignoring any errors.
func BlocksFromJSON(data io.Reader) []Block {
var blocks []Block
_ = json.NewDecoder(data).Decode(&blocks)

View file

@ -14,6 +14,7 @@ package sqlstore
import (
"context"
"io"
"time"
"github.com/mattermost/focalboard/server/model"

View file

@ -5,6 +5,7 @@
package mockstore
import (
io "io"
reflect "reflect"
time "time"
@ -661,6 +662,20 @@ func (mr *MockStoreMockRecorder) HasWorkspaceAccess(arg0, arg1 interface{}) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasWorkspaceAccess", reflect.TypeOf((*MockStore)(nil).HasWorkspaceAccess), arg0, arg1)
}
// ImportArchive mocks base method.
func (m *MockStore) ImportArchive(arg0 store.Container, arg1 io.Reader, arg2 string, arg3 model.BlockModifier) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImportArchive", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ImportArchive indicates an expected call of ImportArchive.
func (mr *MockStoreMockRecorder) ImportArchive(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportArchive", reflect.TypeOf((*MockStore)(nil).ImportArchive), arg0, arg1, arg2, arg3)
}
// InsertBlock mocks base method.
func (m *MockStore) InsertBlock(arg0 store.Container, arg1 *model.Block, arg2 string) error {
m.ctrl.T.Helper()

View file

@ -0,0 +1,130 @@
package sqlstore
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
sq "github.com/Masterminds/squirrel"
"github.com/mattermost/focalboard/server/model"
"github.com/mattermost/focalboard/server/services/store"
"github.com/mattermost/mattermost-server/v6/shared/mlog"
)
type blockModifierInfo struct {
modifier model.BlockModifier
cache map[string]interface{}
}
var (
ErrUnsupportedLineType = errors.New("unsupported line type")
)
// InitializeTemplates imports default templates if the blocks table is empty.
func (s *SQLStore) importArchive(db sq.BaseRunner, container store.Container, r io.Reader, userID string, mod model.BlockModifier) error {
s.logger.Debug("importArchive")
// archives are stored in JSONL format so we must read them
// line by line.
reader := bufio.NewReader(r)
// first line should be the archive header
line, err := readLine(reader)
if err != nil {
return fmt.Errorf("error reading archive header: %w", err)
}
var header model.ArchiveHeader
err = json.Unmarshal(line, &header)
if err != nil {
return err
}
modInfo := blockModifierInfo{
modifier: mod,
cache: make(map[string]interface{}),
}
args := importArchiveLineArgs{
db: db,
container: container,
userID: userID,
modInfo: modInfo,
}
lineNum := 1
for {
line, errRead := readLine(reader)
if len(line) != 0 {
var archiveLine model.ArchiveLine
err = json.Unmarshal(line, &archiveLine)
if err != nil {
return fmt.Errorf("error parsing archive line %d: %w", lineNum, err)
}
if err2 := s.importArchiveLine(&archiveLine, args); err2 != nil {
return fmt.Errorf("error importing archive line %d: %w", lineNum, err2)
}
}
if errRead != nil {
if errors.Is(errRead, io.EOF) {
break
}
return fmt.Errorf("error reading archive line %d: %w", lineNum, errRead)
}
lineNum++
}
return nil
}
type importArchiveLineArgs struct {
db sq.BaseRunner
container store.Container
userID string
modInfo blockModifierInfo
}
// importArchiveLine parses a single line from an archive and imports it to the database.
func (s *SQLStore) importArchiveLine(line *model.ArchiveLine, args importArchiveLineArgs) error {
switch line.Type {
case "block":
var block model.Block
err := json.Unmarshal(line.Data, &block)
if err != nil {
return err
}
if args.modInfo.modifier != nil {
if !args.modInfo.modifier(&block, args.modInfo.cache) {
s.logger.Trace("skipping insert block per block modifier",
mlog.String("blockID", block.ID),
mlog.String("block_type", block.Type.String()),
mlog.String("block_title", block.Title),
)
return nil
}
}
s.logger.Trace("insert block",
mlog.String("blockID", block.ID),
mlog.String("block_type", block.Type.String()),
mlog.String("block_title", block.Title),
)
if err := s.insertBlock(args.db, args.container, &block, args.userID); err != nil {
return err
}
default:
return fmt.Errorf("%w (%s)", ErrUnsupportedLineType, line.Type)
}
return nil
}
func readLine(r *bufio.Reader) ([]byte, error) {
line, err := r.ReadBytes('\n')
line = bytes.TrimSpace(line)
return line, err
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,7 +1,10 @@
package sqlstore
import (
"encoding/json"
"bytes"
"errors"
"fmt"
"strings"
sq "github.com/Masterminds/squirrel"
"github.com/mattermost/focalboard/server/model"
@ -11,65 +14,145 @@ import (
"github.com/mattermost/mattermost-server/v6/shared/mlog"
)
const (
defaultTemplateVersion = 2
)
var (
ErrUnsupportedDatabaseType = errors.New("database type is unsupported")
)
// InitializeTemplates imports default templates if the blocks table is empty.
func (s *SQLStore) InitializeTemplates() error {
isNeeded, err := s.isInitializationNeeded()
blocks, err := s.getDefaultTemplateBlocks()
if err != nil {
return err
return fmt.Errorf("cannot initialize templates: %w", err)
}
if isNeeded {
return s.importInitialTemplates()
isNeeded, reason := s.isInitializationNeeded(blocks)
if !isNeeded {
s.logger.Debug("Template import not needed, skipping")
return nil
}
return nil
}
s.logger.Debug("Importing new default templates", mlog.String("reason", reason))
func (s *SQLStore) importInitialTemplates() error {
s.logger.Debug("importInitialTemplates")
blocksJSON := initializations.MustAsset("templates.json")
var archive model.Archive
err := json.Unmarshal(blocksJSON, &archive)
if err != nil {
return err
if err := s.removeDefaultTemplates(blocks); err != nil {
return fmt.Errorf("cannot remove old templates: %w", err)
}
blocksJSONL := initializations.MustAsset("templates.json")
globalContainer := store.Container{
WorkspaceID: "0",
}
s.logger.Debug("Inserting blocks", mlog.Int("block_count", len(archive.Blocks)))
for i := range archive.Blocks {
s.logger.Trace("insert block",
mlog.String("blockID", archive.Blocks[i].ID),
mlog.String("block_type", archive.Blocks[i].Type.String()),
mlog.String("block_title", archive.Blocks[i].Title),
)
err := s.InsertBlock(globalContainer, &archive.Blocks[i], "system")
if err != nil {
return err
return s.ImportArchive(globalContainer, bytes.NewReader(blocksJSONL), "system", fixTemplateBlock)
}
// removeDefaultTemplates deletes all the default templates and their children.
func (s *SQLStore) removeDefaultTemplates(blocks []model.Block) error {
count := 0
for _, block := range blocks {
// default template deletion does not need to go to blocks_history
deleteQuery := s.getQueryBuilder(s.db).
Delete(s.tablePrefix + "blocks").
Where(sq.Or{
sq.Eq{"id": block.ID},
sq.Eq{"parent_id": block.ID},
sq.Eq{"root_id": block.ID},
})
if _, err := deleteQuery.Exec(); err != nil {
return fmt.Errorf("cannot delete default template %s: %w", block.ID, err)
}
s.logger.Trace("removed default template block",
mlog.String("block_id", block.ID),
mlog.String("block_type", string(block.Type)),
)
count++
}
s.logger.Debug("Removed default templates", mlog.Int("count", count))
return nil
}
// isInitializationNeeded returns true if the blocks table is empty.
func (s *SQLStore) isInitializationNeeded() (bool, error) {
// getDefaultTemplateBlocks fetches all template blocks .
func (s *SQLStore) getDefaultTemplateBlocks() ([]model.Block, error) {
query := s.getQueryBuilder(s.db).
Select("count(*)").
Select(s.blockFields()...).
From(s.tablePrefix + "blocks").
Where(sq.Eq{"COALESCE(workspace_id, '0')": "0"})
Where(sq.Eq{"coalesce(workspace_id, '0')": "0"}).
Where(sq.Eq{"created_by": "system"})
row := query.QueryRow()
var count int
err := row.Scan(&count)
if err != nil {
s.logger.Error("isInitializationNeeded", mlog.Err(err))
return false, err
switch s.dbType {
case sqliteDBType:
query = query.Where(s.tablePrefix + "blocks.fields LIKE '%\"isTemplate\":true%'")
case mysqlDBType:
query = query.Where(s.tablePrefix + "blocks.fields LIKE '%\"isTemplate\":true%'")
case postgresDBType:
query = query.Where(s.tablePrefix + "blocks.fields ->> 'isTemplate' = 'true'")
default:
return nil, fmt.Errorf("cannot get default template blocks for database type %s: %w", s.dbType, ErrUnsupportedDatabaseType)
}
return (count == 0), nil
rows, err := query.Query()
if err != nil {
s.logger.Error(`isInitializationNeeded ERROR`, mlog.Err(err))
return nil, err
}
defer s.CloseRows(rows)
return s.blocksFromRows(rows)
}
// isInitializationNeeded returns true if the blocks table contains no default templates,
// or contains at least one default template with an old version number.
func (s *SQLStore) isInitializationNeeded(blocks []model.Block) (bool, string) {
if len(blocks) == 0 {
return true, "no default templates found"
}
// look for any template blocks with the wrong version number (or no version #).
for _, block := range blocks {
v, ok := block.Fields["templateVer"]
if !ok {
return true, "block missing templateVer"
}
version, ok := v.(float64)
if !ok {
return true, "templateVer NaN"
}
if version < defaultTemplateVersion {
return true, "templateVer too old"
}
}
return false, ""
}
// fixTemplateBlock fixes a block to be inserted as part of a template.
func fixTemplateBlock(block *model.Block, cache map[string]interface{}) bool {
// cache contains ids of skipped blocks. Ensure their children are skipped as well.
if _, ok := cache[block.ParentID]; ok {
cache[block.ID] = struct{}{}
return false
}
// filter out template blocks; we only want the non-template
// blocks which we will turn into default template blocks.
if b, ok := block.Fields["isTemplate"]; ok {
if val, ok := b.(bool); ok && val {
cache[block.ID] = struct{}{}
return false
}
}
// remove '(NEW)' from title & force template flag
if block.Type == "board" {
block.Title = strings.ReplaceAll(block.Title, "(NEW)", "")
block.Fields["isTemplate"] = true
block.Fields["templateVer"] = defaultTemplateVersion
}
return true
}

View file

@ -1,4 +1,4 @@
// Code generated for package migrations by go-bindata DO NOT EDIT. (@generated)
// Code generated by go-bindata.
// sources:
// migrations_files/000001_init.down.sql
// migrations_files/000001_init.up.sql
@ -32,6 +32,8 @@
// migrations_files/000015_blocks_history_no_nulls.up.sql
// migrations_files/000016_subscriptions_table.down.sql
// migrations_files/000016_subscriptions_table.up.sql
// DO NOT EDIT!
package migrations
import (
@ -78,32 +80,21 @@ type bindataFileInfo struct {
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
return false
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
@ -123,7 +114,7 @@ func _000001_initDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000001_init.down.sql", size: 30, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000001_init.down.sql", size: 30, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -143,7 +134,7 @@ func _000001_initUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000001_init.up.sql", size: 588, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000001_init.up.sql", size: 588, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -163,7 +154,7 @@ func _000002_system_settings_tableDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000002_system_settings_table.down.sql", size: 39, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000002_system_settings_table.down.sql", size: 39, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -183,7 +174,7 @@ func _000002_system_settings_tableUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000002_system_settings_table.up.sql", size: 158, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000002_system_settings_table.up.sql", size: 158, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -203,7 +194,7 @@ func _000003_blocks_rootidDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000003_blocks_rootid.down.sql", size: 51, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000003_blocks_rootid.down.sql", size: 51, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -223,7 +214,7 @@ func _000003_blocks_rootidUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000003_blocks_rootid.up.sql", size: 62, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000003_blocks_rootid.up.sql", size: 62, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -243,7 +234,7 @@ func _000004_auth_tableDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000004_auth_table.down.sql", size: 61, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000004_auth_table.down.sql", size: 61, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -263,7 +254,7 @@ func _000004_auth_tableUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000004_auth_table.up.sql", size: 683, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000004_auth_table.up.sql", size: 683, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -283,7 +274,7 @@ func _000005_blocks_modifiedbyDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000005_blocks_modifiedby.down.sql", size: 55, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000005_blocks_modifiedby.down.sql", size: 55, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -303,7 +294,7 @@ func _000005_blocks_modifiedbyUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000005_blocks_modifiedby.up.sql", size: 66, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000005_blocks_modifiedby.up.sql", size: 66, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -323,7 +314,7 @@ func _000006_sharing_tableDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000006_sharing_table.down.sql", size: 31, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000006_sharing_table.down.sql", size: 31, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -343,7 +334,7 @@ func _000006_sharing_tableUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000006_sharing_table.up.sql", size: 220, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000006_sharing_table.up.sql", size: 220, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -363,7 +354,7 @@ func _000007_workspaces_tableDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000007_workspaces_table.down.sql", size: 34, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000007_workspaces_table.down.sql", size: 34, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -383,7 +374,7 @@ func _000007_workspaces_tableUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000007_workspaces_table.up.sql", size: 272, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000007_workspaces_table.up.sql", size: 272, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -403,7 +394,7 @@ func _000008_teamsDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000008_teams.down.sql", size: 173, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000008_teams.down.sql", size: 173, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -423,7 +414,7 @@ func _000008_teamsUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000008_teams.up.sql", size: 304, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000008_teams.up.sql", size: 304, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -443,7 +434,7 @@ func _000009_blocks_historyDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000009_blocks_history.down.sql", size: 97, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000009_blocks_history.down.sql", size: 97, mode: os.FileMode(436), modTime: time.Unix(1621275260, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -463,7 +454,7 @@ func _000009_blocks_historyUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000009_blocks_history.up.sql", size: 1183, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000009_blocks_history.up.sql", size: 1183, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -483,7 +474,7 @@ func _000010_blocks_created_byDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000010_blocks_created_by.down.sql", size: 2537, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000010_blocks_created_by.down.sql", size: 2537, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -503,7 +494,7 @@ func _000010_blocks_created_byUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000010_blocks_created_by.up.sql", size: 381, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000010_blocks_created_by.up.sql", size: 381, mode: os.FileMode(436), modTime: time.Unix(1625794710, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -523,7 +514,7 @@ func _000011_match_collationDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000011_match_collation.down.sql", size: 78, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000011_match_collation.down.sql", size: 78, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -543,7 +534,7 @@ func _000011_match_collationUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000011_match_collation.up.sql", size: 1959, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000011_match_collation.up.sql", size: 1959, mode: os.FileMode(436), modTime: time.Unix(1631562120, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -563,7 +554,7 @@ func _000012_match_column_collationDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000012_match_column_collation.down.sql", size: 78, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000012_match_column_collation.down.sql", size: 78, mode: os.FileMode(436), modTime: time.Unix(1633447591, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -583,7 +574,7 @@ func _000012_match_column_collationUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000012_match_column_collation.up.sql", size: 2610, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000012_match_column_collation.up.sql", size: 2610, mode: os.FileMode(436), modTime: time.Unix(1633447591, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -603,7 +594,7 @@ func _000013_millisecond_timestampsDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000013_millisecond_timestamps.down.sql", size: 756, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000013_millisecond_timestamps.down.sql", size: 756, mode: os.FileMode(436), modTime: time.Unix(1633610888, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -623,7 +614,7 @@ func _000013_millisecond_timestampsUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000013_millisecond_timestamps.up.sql", size: 756, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000013_millisecond_timestamps.up.sql", size: 756, mode: os.FileMode(436), modTime: time.Unix(1633610888, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -643,7 +634,7 @@ func _000014_add_not_null_constraintDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000014_add_not_null_constraint.down.sql", size: 310, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000014_add_not_null_constraint.down.sql", size: 310, mode: os.FileMode(436), modTime: time.Unix(1635256817, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -663,7 +654,7 @@ func _000014_add_not_null_constraintUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000014_add_not_null_constraint.up.sql", size: 483, mode: os.FileMode(436), modTime: time.Unix(1641808881, 0)}
info := bindataFileInfo{name: "000014_add_not_null_constraint.up.sql", size: 483, mode: os.FileMode(436), modTime: time.Unix(1635256817, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -683,7 +674,7 @@ func _000015_blocks_history_no_nullsDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000015_blocks_history_no_nulls.down.sql", size: 214, mode: os.FileMode(436), modTime: time.Unix(1642270860, 0)}
info := bindataFileInfo{name: "000015_blocks_history_no_nulls.down.sql", size: 214, mode: os.FileMode(436), modTime: time.Unix(1641495576, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -703,7 +694,7 @@ func _000015_blocks_history_no_nullsUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000015_blocks_history_no_nulls.up.sql", size: 3142, mode: os.FileMode(436), modTime: time.Unix(1642272514, 0)}
info := bindataFileInfo{name: "000015_blocks_history_no_nulls.up.sql", size: 3142, mode: os.FileMode(436), modTime: time.Unix(1642530526, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -723,7 +714,7 @@ func _000016_subscriptions_tableDownSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000016_subscriptions_table.down.sql", size: 79, mode: os.FileMode(436), modTime: time.Unix(1642270860, 0)}
info := bindataFileInfo{name: "000016_subscriptions_table.down.sql", size: 79, mode: os.FileMode(436), modTime: time.Unix(1641495576, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -743,7 +734,7 @@ func _000016_subscriptions_tableUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "000016_subscriptions_table.up.sql", size: 618, mode: os.FileMode(436), modTime: time.Unix(1642270860, 0)}
info := bindataFileInfo{name: "000016_subscriptions_table.up.sql", size: 618, mode: os.FileMode(436), modTime: time.Unix(1641495576, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -800,38 +791,38 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"000001_init.down.sql": _000001_initDownSql,
"000001_init.up.sql": _000001_initUpSql,
"000002_system_settings_table.down.sql": _000002_system_settings_tableDownSql,
"000002_system_settings_table.up.sql": _000002_system_settings_tableUpSql,
"000003_blocks_rootid.down.sql": _000003_blocks_rootidDownSql,
"000003_blocks_rootid.up.sql": _000003_blocks_rootidUpSql,
"000004_auth_table.down.sql": _000004_auth_tableDownSql,
"000004_auth_table.up.sql": _000004_auth_tableUpSql,
"000005_blocks_modifiedby.down.sql": _000005_blocks_modifiedbyDownSql,
"000005_blocks_modifiedby.up.sql": _000005_blocks_modifiedbyUpSql,
"000006_sharing_table.down.sql": _000006_sharing_tableDownSql,
"000006_sharing_table.up.sql": _000006_sharing_tableUpSql,
"000007_workspaces_table.down.sql": _000007_workspaces_tableDownSql,
"000007_workspaces_table.up.sql": _000007_workspaces_tableUpSql,
"000008_teams.down.sql": _000008_teamsDownSql,
"000008_teams.up.sql": _000008_teamsUpSql,
"000009_blocks_history.down.sql": _000009_blocks_historyDownSql,
"000009_blocks_history.up.sql": _000009_blocks_historyUpSql,
"000010_blocks_created_by.down.sql": _000010_blocks_created_byDownSql,
"000010_blocks_created_by.up.sql": _000010_blocks_created_byUpSql,
"000011_match_collation.down.sql": _000011_match_collationDownSql,
"000011_match_collation.up.sql": _000011_match_collationUpSql,
"000012_match_column_collation.down.sql": _000012_match_column_collationDownSql,
"000012_match_column_collation.up.sql": _000012_match_column_collationUpSql,
"000013_millisecond_timestamps.down.sql": _000013_millisecond_timestampsDownSql,
"000013_millisecond_timestamps.up.sql": _000013_millisecond_timestampsUpSql,
"000001_init.down.sql": _000001_initDownSql,
"000001_init.up.sql": _000001_initUpSql,
"000002_system_settings_table.down.sql": _000002_system_settings_tableDownSql,
"000002_system_settings_table.up.sql": _000002_system_settings_tableUpSql,
"000003_blocks_rootid.down.sql": _000003_blocks_rootidDownSql,
"000003_blocks_rootid.up.sql": _000003_blocks_rootidUpSql,
"000004_auth_table.down.sql": _000004_auth_tableDownSql,
"000004_auth_table.up.sql": _000004_auth_tableUpSql,
"000005_blocks_modifiedby.down.sql": _000005_blocks_modifiedbyDownSql,
"000005_blocks_modifiedby.up.sql": _000005_blocks_modifiedbyUpSql,
"000006_sharing_table.down.sql": _000006_sharing_tableDownSql,
"000006_sharing_table.up.sql": _000006_sharing_tableUpSql,
"000007_workspaces_table.down.sql": _000007_workspaces_tableDownSql,
"000007_workspaces_table.up.sql": _000007_workspaces_tableUpSql,
"000008_teams.down.sql": _000008_teamsDownSql,
"000008_teams.up.sql": _000008_teamsUpSql,
"000009_blocks_history.down.sql": _000009_blocks_historyDownSql,
"000009_blocks_history.up.sql": _000009_blocks_historyUpSql,
"000010_blocks_created_by.down.sql": _000010_blocks_created_byDownSql,
"000010_blocks_created_by.up.sql": _000010_blocks_created_byUpSql,
"000011_match_collation.down.sql": _000011_match_collationDownSql,
"000011_match_collation.up.sql": _000011_match_collationUpSql,
"000012_match_column_collation.down.sql": _000012_match_column_collationDownSql,
"000012_match_column_collation.up.sql": _000012_match_column_collationUpSql,
"000013_millisecond_timestamps.down.sql": _000013_millisecond_timestampsDownSql,
"000013_millisecond_timestamps.up.sql": _000013_millisecond_timestampsUpSql,
"000014_add_not_null_constraint.down.sql": _000014_add_not_null_constraintDownSql,
"000014_add_not_null_constraint.up.sql": _000014_add_not_null_constraintUpSql,
"000014_add_not_null_constraint.up.sql": _000014_add_not_null_constraintUpSql,
"000015_blocks_history_no_nulls.down.sql": _000015_blocks_history_no_nullsDownSql,
"000015_blocks_history_no_nulls.up.sql": _000015_blocks_history_no_nullsUpSql,
"000016_subscriptions_table.down.sql": _000016_subscriptions_tableDownSql,
"000016_subscriptions_table.up.sql": _000016_subscriptions_tableUpSql,
"000015_blocks_history_no_nulls.up.sql": _000015_blocks_history_no_nullsUpSql,
"000016_subscriptions_table.down.sql": _000016_subscriptions_tableDownSql,
"000016_subscriptions_table.up.sql": _000016_subscriptions_tableUpSql,
}
// AssetDir returns the file names below a certain
@ -873,40 +864,39 @@ type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"000001_init.down.sql": &bintree{_000001_initDownSql, map[string]*bintree{}},
"000001_init.up.sql": &bintree{_000001_initUpSql, map[string]*bintree{}},
"000002_system_settings_table.down.sql": &bintree{_000002_system_settings_tableDownSql, map[string]*bintree{}},
"000002_system_settings_table.up.sql": &bintree{_000002_system_settings_tableUpSql, map[string]*bintree{}},
"000003_blocks_rootid.down.sql": &bintree{_000003_blocks_rootidDownSql, map[string]*bintree{}},
"000003_blocks_rootid.up.sql": &bintree{_000003_blocks_rootidUpSql, map[string]*bintree{}},
"000004_auth_table.down.sql": &bintree{_000004_auth_tableDownSql, map[string]*bintree{}},
"000004_auth_table.up.sql": &bintree{_000004_auth_tableUpSql, map[string]*bintree{}},
"000005_blocks_modifiedby.down.sql": &bintree{_000005_blocks_modifiedbyDownSql, map[string]*bintree{}},
"000005_blocks_modifiedby.up.sql": &bintree{_000005_blocks_modifiedbyUpSql, map[string]*bintree{}},
"000006_sharing_table.down.sql": &bintree{_000006_sharing_tableDownSql, map[string]*bintree{}},
"000006_sharing_table.up.sql": &bintree{_000006_sharing_tableUpSql, map[string]*bintree{}},
"000007_workspaces_table.down.sql": &bintree{_000007_workspaces_tableDownSql, map[string]*bintree{}},
"000007_workspaces_table.up.sql": &bintree{_000007_workspaces_tableUpSql, map[string]*bintree{}},
"000008_teams.down.sql": &bintree{_000008_teamsDownSql, map[string]*bintree{}},
"000008_teams.up.sql": &bintree{_000008_teamsUpSql, map[string]*bintree{}},
"000009_blocks_history.down.sql": &bintree{_000009_blocks_historyDownSql, map[string]*bintree{}},
"000009_blocks_history.up.sql": &bintree{_000009_blocks_historyUpSql, map[string]*bintree{}},
"000010_blocks_created_by.down.sql": &bintree{_000010_blocks_created_byDownSql, map[string]*bintree{}},
"000010_blocks_created_by.up.sql": &bintree{_000010_blocks_created_byUpSql, map[string]*bintree{}},
"000011_match_collation.down.sql": &bintree{_000011_match_collationDownSql, map[string]*bintree{}},
"000011_match_collation.up.sql": &bintree{_000011_match_collationUpSql, map[string]*bintree{}},
"000012_match_column_collation.down.sql": &bintree{_000012_match_column_collationDownSql, map[string]*bintree{}},
"000012_match_column_collation.up.sql": &bintree{_000012_match_column_collationUpSql, map[string]*bintree{}},
"000013_millisecond_timestamps.down.sql": &bintree{_000013_millisecond_timestampsDownSql, map[string]*bintree{}},
"000013_millisecond_timestamps.up.sql": &bintree{_000013_millisecond_timestampsUpSql, map[string]*bintree{}},
"000001_init.down.sql": &bintree{_000001_initDownSql, map[string]*bintree{}},
"000001_init.up.sql": &bintree{_000001_initUpSql, map[string]*bintree{}},
"000002_system_settings_table.down.sql": &bintree{_000002_system_settings_tableDownSql, map[string]*bintree{}},
"000002_system_settings_table.up.sql": &bintree{_000002_system_settings_tableUpSql, map[string]*bintree{}},
"000003_blocks_rootid.down.sql": &bintree{_000003_blocks_rootidDownSql, map[string]*bintree{}},
"000003_blocks_rootid.up.sql": &bintree{_000003_blocks_rootidUpSql, map[string]*bintree{}},
"000004_auth_table.down.sql": &bintree{_000004_auth_tableDownSql, map[string]*bintree{}},
"000004_auth_table.up.sql": &bintree{_000004_auth_tableUpSql, map[string]*bintree{}},
"000005_blocks_modifiedby.down.sql": &bintree{_000005_blocks_modifiedbyDownSql, map[string]*bintree{}},
"000005_blocks_modifiedby.up.sql": &bintree{_000005_blocks_modifiedbyUpSql, map[string]*bintree{}},
"000006_sharing_table.down.sql": &bintree{_000006_sharing_tableDownSql, map[string]*bintree{}},
"000006_sharing_table.up.sql": &bintree{_000006_sharing_tableUpSql, map[string]*bintree{}},
"000007_workspaces_table.down.sql": &bintree{_000007_workspaces_tableDownSql, map[string]*bintree{}},
"000007_workspaces_table.up.sql": &bintree{_000007_workspaces_tableUpSql, map[string]*bintree{}},
"000008_teams.down.sql": &bintree{_000008_teamsDownSql, map[string]*bintree{}},
"000008_teams.up.sql": &bintree{_000008_teamsUpSql, map[string]*bintree{}},
"000009_blocks_history.down.sql": &bintree{_000009_blocks_historyDownSql, map[string]*bintree{}},
"000009_blocks_history.up.sql": &bintree{_000009_blocks_historyUpSql, map[string]*bintree{}},
"000010_blocks_created_by.down.sql": &bintree{_000010_blocks_created_byDownSql, map[string]*bintree{}},
"000010_blocks_created_by.up.sql": &bintree{_000010_blocks_created_byUpSql, map[string]*bintree{}},
"000011_match_collation.down.sql": &bintree{_000011_match_collationDownSql, map[string]*bintree{}},
"000011_match_collation.up.sql": &bintree{_000011_match_collationUpSql, map[string]*bintree{}},
"000012_match_column_collation.down.sql": &bintree{_000012_match_column_collationDownSql, map[string]*bintree{}},
"000012_match_column_collation.up.sql": &bintree{_000012_match_column_collationUpSql, map[string]*bintree{}},
"000013_millisecond_timestamps.down.sql": &bintree{_000013_millisecond_timestampsDownSql, map[string]*bintree{}},
"000013_millisecond_timestamps.up.sql": &bintree{_000013_millisecond_timestampsUpSql, map[string]*bintree{}},
"000014_add_not_null_constraint.down.sql": &bintree{_000014_add_not_null_constraintDownSql, map[string]*bintree{}},
"000014_add_not_null_constraint.up.sql": &bintree{_000014_add_not_null_constraintUpSql, map[string]*bintree{}},
"000014_add_not_null_constraint.up.sql": &bintree{_000014_add_not_null_constraintUpSql, map[string]*bintree{}},
"000015_blocks_history_no_nulls.down.sql": &bintree{_000015_blocks_history_no_nullsDownSql, map[string]*bintree{}},
"000015_blocks_history_no_nulls.up.sql": &bintree{_000015_blocks_history_no_nullsUpSql, map[string]*bintree{}},
"000016_subscriptions_table.down.sql": &bintree{_000016_subscriptions_tableDownSql, map[string]*bintree{}},
"000016_subscriptions_table.up.sql": &bintree{_000016_subscriptions_tableUpSql, map[string]*bintree{}},
"000015_blocks_history_no_nulls.up.sql": &bintree{_000015_blocks_history_no_nullsUpSql, map[string]*bintree{}},
"000016_subscriptions_table.down.sql": &bintree{_000016_subscriptions_tableDownSql, map[string]*bintree{}},
"000016_subscriptions_table.up.sql": &bintree{_000016_subscriptions_tableUpSql, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory
@ -955,3 +945,4 @@ func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}

View file

@ -14,6 +14,7 @@ package sqlstore
import (
"context"
"io"
"time"
"github.com/mattermost/focalboard/server/model"
@ -248,6 +249,11 @@ func (s *SQLStore) HasWorkspaceAccess(userID string, workspaceID string) (bool,
}
func (s *SQLStore) ImportArchive(container store.Container, r io.Reader, userID string, mod model.BlockModifier) error {
return s.importArchive(s.db, container, r, userID, mod)
}
func (s *SQLStore) InsertBlock(c store.Container, block *model.Block, userID string) error {
tx, txErr := s.db.BeginTx(context.Background(), nil)
if txErr != nil {

View file

@ -5,6 +5,7 @@ package store
import (
"errors"
"fmt"
"io"
"time"
"github.com/mattermost/focalboard/server/model"
@ -90,6 +91,8 @@ type Store interface {
GetNotificationHint(c Container, blockID string) (*model.NotificationHint, error)
GetNextNotificationHint(remove bool) (*model.NotificationHint, error)
ImportArchive(container Container, r io.Reader, userID string, mod model.BlockModifier) error
IsErrNotFound(err error) bool
}