starting to write the manager library
This commit is contained in:
@@ -86,9 +86,9 @@ func decompressDelta(compressedData []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func applyPatchToFile(originalbytes, delta []byte) ([]byte, error) {
|
||||
if patchedBytes, err := fdelta.Apply(originalbytes, delta); err != nil {
|
||||
patchedBytes, err := fdelta.Apply(originalbytes, delta)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
return patchedBytes, nil
|
||||
}
|
||||
return patchedBytes, nil
|
||||
}
|
||||
|
||||
140
common/engine/filesystem.go
Normal file
140
common/engine/filesystem.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
)
|
||||
|
||||
var log *logger.Logger
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
log, err = logger.New("utilities logger", 1, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.SetFormat("[%{module}] [%{level}] %{message}")
|
||||
log.Info("Utilities logger Created")
|
||||
}
|
||||
|
||||
// CompressIntArray compresses an array of integers into a buffer
|
||||
func CompressIntArray(arry []int64, compressionBuffer *bytes.Buffer) (float64, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.LittleEndian, arry)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
//now compress it
|
||||
compressor := gzip.NewWriter(compressionBuffer)
|
||||
// if err != nil {
|
||||
// fmt.Println("writer level failed to set compression level")
|
||||
// }
|
||||
if _, err := compressor.Write(buf.Bytes()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := compressor.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ratio := float64(len(compressionBuffer.Bytes())) / float64(len(buf.Bytes()))
|
||||
return ratio, nil
|
||||
}
|
||||
|
||||
// ExpandToIntArray firstly unzips the byte array, then it
|
||||
// converts the byte array back into an int array for use
|
||||
func ExpandToIntArray(length int64, arry []byte, intArray *[]int64) error {
|
||||
buf := bytes.NewBuffer(arry)
|
||||
if reader, err := gzip.NewReader(buf); err != nil {
|
||||
fmt.Println("gzip failed ", err)
|
||||
return err
|
||||
} else {
|
||||
*intArray = make([]int64, length) //you must know the length of the original data if you are to do it this way.
|
||||
err := binary.Read(reader, binary.LittleEndian, intArray)
|
||||
if err != nil {
|
||||
fmt.Println("read failed ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// VerifySrcFile checks to see that the file is a regular file
|
||||
// that the OS has meta information about and that can be read by
|
||||
// the os.
|
||||
func VerifySrcFile(src string) (string, error) {
|
||||
_, fileName := filepath.Split(src) //dirPath
|
||||
sourceFileStat, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return fileName, errors.New("error on os.Stat " + err.Error())
|
||||
}
|
||||
|
||||
if !sourceFileStat.Mode().IsRegular() {
|
||||
return fileName, errors.New("%s is not a regular file" + src)
|
||||
}
|
||||
return fileName, nil
|
||||
}
|
||||
|
||||
func InitiateDirectory(directory string) {
|
||||
// For the keys-folder we need to check if the folder exists...
|
||||
checkDir, err := IsDirectory(directory)
|
||||
if err != nil {
|
||||
log.ErrorF("Error checking for "+directory+" directory: %s\r\n", err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if checkDir == true {
|
||||
log.Warning(directory + " already exists")
|
||||
} else {
|
||||
// Create the directory.
|
||||
log.Info("Creating " + directory)
|
||||
err = CreateDirectory(directory)
|
||||
if err != nil {
|
||||
log.ErrorF("Error creating the folder %s\r\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func IsDirectory(path string) (bool, error) {
|
||||
|
||||
s, err := os.Stat(path) // returns an error if the path does not exist.
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err // Different error...?
|
||||
}
|
||||
|
||||
if s.IsDir() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil // Redundancy
|
||||
|
||||
}
|
||||
|
||||
func CreateDirectory(path string) error {
|
||||
// Assumes checks have been done on if the directory exists...
|
||||
err := os.MkdirAll(path, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil // Redundancy
|
||||
|
||||
}
|
||||
|
||||
func DeleteDirectory(path string) error {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
//TODO: Most likely can be done with the filepath command so replace this everywhere
|
||||
func StripFilePathBase(pathToFile, base string) string {
|
||||
return strings.Replace(pathToFile, base, "", -1)
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package engine
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// The watcher is responsible for not only seeing when a file changes,
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// * copying any versions and keeping them safe (even if temporary)
|
||||
// * creating the diff of the file, in both directions if necessary
|
||||
// * storing the details in the database
|
||||
func NewPatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (Patcher, error) {
|
||||
func NewPatcher(logger *zerolog.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (Patcher, error) {
|
||||
p := Patcher{
|
||||
logger,
|
||||
KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER,
|
||||
@@ -25,28 +25,26 @@ func NewPatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, TH
|
||||
// last save is the file you want to get.
|
||||
func (p *Patcher) PatchFromFile(filePath, patchPath, restorePath string) error {
|
||||
if subject, err := openFile(filePath); err != nil {
|
||||
return fmt.Errorf("error on subject file: ", err)
|
||||
return fmt.Errorf("error on subject file: %s", err)
|
||||
} else if patch, err := openFile(patchPath); err != nil {
|
||||
return fmt.Errorf("error on patch file: ", err)
|
||||
return fmt.Errorf("error on patch file: %s", err)
|
||||
} else {
|
||||
return p.applyPatch(subject, patch, restorePath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//applyPatch actively applies the patch to the subject. This could eventually
|
||||
// be upgraded for different patching algorithms
|
||||
func (p *Patcher) applyPatch(subject, patch []byte, restorePath string) error {
|
||||
if delta, err := decompressDelta(patch); err != nil {
|
||||
return fmt.Errorf("error decompressing delta", err)
|
||||
return fmt.Errorf("error decompressing delta %s", err)
|
||||
} else {
|
||||
if appliedBytes, err := applyPatchToFile(subject, delta); err != nil {
|
||||
return fmt.Errorf("error applying delta to original file", err)
|
||||
return fmt.Errorf("error applying delta to original file %s", err)
|
||||
} else if err := writeFile(restorePath, appliedBytes); err != nil {
|
||||
return fmt.Errorf("error writing patchedFile", err)
|
||||
} else {
|
||||
return nil
|
||||
return fmt.Errorf("error writing patchedFile %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
watcher "github.com/radovskyb/watcher"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type FileWatcher struct {
|
||||
*watcher.Watcher
|
||||
*logger.Logger
|
||||
*zerolog.Logger
|
||||
Enabled bool
|
||||
KEYFOLDER string
|
||||
DOWNLOADFOLDER string
|
||||
@@ -16,7 +16,7 @@ type FileWatcher struct {
|
||||
DIFFFOLDER string
|
||||
}
|
||||
type Patcher struct {
|
||||
*logger.Logger
|
||||
*zerolog.Logger
|
||||
KEYFOLDER string
|
||||
DOWNLOADFOLDER string
|
||||
SYNCFOLDER string
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
"github.com/deranjer/gvc/common/database"
|
||||
watcher "github.com/radovskyb/watcher"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type key string
|
||||
@@ -22,7 +22,7 @@ type Event struct {
|
||||
// * copying any versions and keeping them safe (even if temporary)
|
||||
// * creating the diff of the file, in both directions if necessary
|
||||
// * storing the details in the database
|
||||
func NewWatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (FileWatcher, error) {
|
||||
func NewWatcher(logger *zerolog.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (FileWatcher, error) {
|
||||
w := FileWatcher{
|
||||
watcher.New(),
|
||||
logger,
|
||||
@@ -59,11 +59,11 @@ func (fw *FileWatcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGro
|
||||
// we have filtered already on the [Op]erations we want to listen for so no need to check here
|
||||
case event := <-fw.Watcher.Event:
|
||||
if !fw.IsEnabled() {
|
||||
fw.Infof("ignoring event and reenabling the watcher %s\r\n", event)
|
||||
fw.Info().Msgf("ignoring event and reenabling the watcher %s\r\n", event)
|
||||
fw.Enable()
|
||||
continue
|
||||
}
|
||||
fw.Infof("event fired ", event)
|
||||
fw.Info().Msgf("event fired ", event)
|
||||
//this is currently slow as it does a db lookup on the path.
|
||||
//TODO: On load (or whenever a file is added to the watcher, the db information for files being watched, could be cached in memory. This would be much faster)
|
||||
fileInfo, err := onFileChanged(event.Path) //could return the 'Event' object here
|
||||
@@ -74,7 +74,7 @@ func (fw *FileWatcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGro
|
||||
//we need the hash of the current base, not the hash of the original file
|
||||
// fileHash := fileInfo.CurrentHash //hash needs to come from
|
||||
if err != nil {
|
||||
fw.ErrorF("path was not returned to sync path", err)
|
||||
fw.Err(err).Msg("path was not returned to sync path")
|
||||
continue
|
||||
}
|
||||
//cancel the event if it indeed is running...
|
||||
@@ -105,12 +105,12 @@ func (fw *FileWatcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGro
|
||||
eventContext := context.WithValue(cancelContext, key(event.Path), e)
|
||||
if err := manageFileDiffing(eventContext, event.Path, syncFilePath, fw.DIFFFOLDER, true, diffChannel, wg); err != nil {
|
||||
// I don't think this can be reached...
|
||||
fw.WarningF("Error managing the diffing process %s", err)
|
||||
fw.Warn().Msgf("Error managing the diffing process %s", err)
|
||||
}
|
||||
case err := <-fw.Watcher.Error:
|
||||
fw.Errorf("%s\r\n", err)
|
||||
fw.Err(err)
|
||||
case <-fw.Watcher.Closed:
|
||||
fw.Notice("radovskyb closed")
|
||||
//fw.Notice("radovskyb closed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user