dumping all of the database and utilites into the project, will work on the managers next

This commit is contained in:
2020-06-11 13:53:51 -04:00
parent 2cbdf21a81
commit 0ecb0b96ce
10 changed files with 741 additions and 0 deletions

View File

@@ -0,0 +1,70 @@
package engine
import (
"bytes"
"compress/gzip"
"encoding/gob"
"fmt"
"io"
)
type SomeStruct struct {
A string
B int64
C float64
}
//1.
func StructToBytes(obj SomeStruct) (bytes.Buffer, error) {
//now gob this
var indexBuffer bytes.Buffer
encoder := gob.NewEncoder(&indexBuffer)
if err := encoder.Encode(obj); err != nil {
return indexBuffer, err
}
return indexBuffer, nil
}
//1.
func BytesToGob(obj []byte) (bytes.Buffer, error) {
//now gob this
var indexBuffer bytes.Buffer
encoder := gob.NewEncoder(&indexBuffer)
if err := encoder.Encode(obj); err != nil {
return indexBuffer, err
}
return indexBuffer, nil
}
//2.
func CompressBinary(binaryBuffer *bytes.Buffer) (bytes.Buffer, error) {
//now compress it
var compressionBuffer bytes.Buffer
compressor := gzip.NewWriter(&compressionBuffer)
_, err := compressor.Write(binaryBuffer.Bytes())
err = compressor.Close()
return compressionBuffer, err
}
//3.
func DecompressBinary(compressionBuffer bytes.Buffer) (*gzip.Reader, error) {
//now decompress it
dataReader := bytes.NewReader(compressionBuffer.Bytes())
if reader, err := gzip.NewReader(dataReader); err != nil {
fmt.Println("gzip failed ", err)
return &gzip.Reader{}, err
} else {
err := reader.Close()
return reader, err
}
}
//4.
func GobToBytes(binaryBytes io.Reader) ([]byte, error) {
decoder := gob.NewDecoder(binaryBytes)
var tmp []byte
if err := decoder.Decode(&tmp); err != nil {
return tmp, err
}
return tmp, nil
}

View File

@@ -0,0 +1,51 @@
package engine
import (
"fmt"
"io/ioutil"
"testing"
)
var testStruct = SomeStruct{
A: "alex walker",
B: 24,
C: 1.234,
}
func TestMain(t *testing.T) {
//use assert library to check for similarity or require library to check something exists
t.Run("check that we can 'gob' the data correctly", func(t *testing.T) {
if structBytes, err := StructToBytes(testStruct); err != nil {
t.Error("failed to create the bytes from the struct provided ", err)
t.FailNow()
} else if binaryToGobBuffer, err := BytesToGob(structBytes.Bytes()); err != nil {
t.Error("failed to create the gob from the bytes provided ", err)
t.FailNow()
//issue here with gob reading from an interface
} else if compressedData, err := CompressBinary(&binaryToGobBuffer); err != nil {
t.Error("failed to create the gob from the struct provided ", err)
t.FailNow()
} else if decompressionReader, err := DecompressBinary(compressedData); err != nil {
t.Error("failed to decompress the binary data ", err)
t.FailNow()
} else if res, err := GobToBytes(decompressionReader); err != nil {
t.Error("failed to convert bytes to struct ", err)
t.FailNow()
} else {
fmt.Printf("result %+v\r\n", res)
}
})
}
func openFile(path string) ([]byte, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println("File reading error", err)
}
return data, err
}
func writeFile(path string, data []byte) error {
err := ioutil.WriteFile(path, data, 0644)
return err
}

94
common/engine/fdelta.go Normal file
View File

@@ -0,0 +1,94 @@
package engine
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"github.com/amlwwalker/fdelta"
)
// var originalFile string
// var newFile string
// var patchFile string
// var appliedFile string
func openReader(file io.Reader) ([]byte, error) {
buffer, err := ioutil.ReadAll(file)
return buffer, err
}
func openFile(path string) ([]byte, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println("File reading error", err)
}
return data, err
}
func writeFile(path string, data []byte) error {
err := ioutil.WriteFile(path, data, 0644)
return err
}
func getOriginalBytes(originalFile string) ([]byte, error) {
originalBytes, err := openFile(originalFile)
if err != nil {
return []byte{}, err
}
return originalBytes, nil
}
func createDelta(newFile string, originalBytes []byte) ([]byte, error) {
newBytes, err := openFile(newFile)
if err != nil {
return []byte{}, err
}
delta := fdelta.Create(originalBytes, newBytes)
fmt.Println("size of delta ", len(delta))
return delta, nil
}
func compressDelta(delta []byte) ([]byte, error) {
if binaryToGobBuffer, err := compressor.BytesToGob(delta); err != nil {
return []byte{}, err
} else if compressedData, err := compressor.CompressBinary(&binaryToGobBuffer); err != nil {
return []byte{}, err
} else {
return compressedData.Bytes(), nil
}
}
func storeDelta(patchFile string, delta []byte) ([]byte, error) {
if compressedData, err := compressDelta(delta); err != nil {
return []byte{}, err
} else if err := writeFile(patchFile, compressedData); err != nil {
return []byte{}, err
} else {
return compressedData, nil
}
}
func retrieveDelta(patchFile string) ([]byte, error) {
compressedData, err := openFile(patchFile)
if err != nil {
return []byte{}, err
}
return compressedData, nil
}
func decompressDelta(compressedData []byte) ([]byte, error) {
var compressedBuffer bytes.Buffer
compressedBuffer.Write(compressedData)
if decompressionReader, err := compressor.DecompressBinary(compressedBuffer); err != nil {
return []byte{}, err
} else if res, err := compressor.GobToBytes(decompressionReader); err != nil {
return []byte{}, err
} else {
return res, nil
}
}
func applyPatchToFile(originalbytes, delta []byte) ([]byte, error) {
if patchedBytes, err := fdelta.Apply(originalbytes, delta); err != nil {
return []byte{}, err
} else {
return patchedBytes, nil
}
}

View File

@@ -0,0 +1,33 @@
package engine
import (
"fmt"
"os"
"github.com/amlwwalker/fdelta"
)
func main() {
DefineFiles()
originalBytes := GetOriginalBytes()
delta := CreateDelta(originalBytes)
StoreDelta(delta)
retrievedDelta := RetrieveDelta()
// var deltaBytes []byte
fmt.Printf("res : `%s`\n", len(retrievedDelta))
//test loading the delta from disk
appliedBytes, err := fdelta.Apply(originalBytes, retrievedDelta)
if err != nil {
panic(err)
}
fmt.Println("exporting delta")
err = writeFile(appliedFile, appliedBytes)
if err != nil {
fmt.Println("error reading bytes [3]", err)
os.Exit(1)
}
fmt.Printf("Origin : `%s`\n", originalFile)
fmt.Printf("Target : `%s`\n", len(appliedBytes))
fmt.Printf("Delta : `%s`\n", len(delta))
fmt.Printf("Result: `%s`\n", appliedFile)
}

52
common/engine/patcher.go Normal file
View File

@@ -0,0 +1,52 @@
package engine
import (
logger "github.com/apsdehal/go-logger"
)
// The watcher is responsible for not only seeing when a file changes,
// but also keeping track of
// * the file hash so that if it changes again any modifications can be handled
// * copying any versions and keeping them safe (even if temporary)
// * creating the diff of the file, in both directions if necessary
// * storing the details in the database
func NewPatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (Patcher, error) {
p := Patcher{
logger,
KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER,
}
return p, nil
}
// PatchFromFile takes the version of the file that was backed up
// and applies the specified patch to it, to get the latest file. This is incase the
// last save is the file you want to get.
func (p *Patcher) PatchFromFile(filePath, patchPath, restorePath string) error {
if subject, err := openFile(filePath); err != nil {
p.ErrorF("error on subject file: ", err)
} else if patch, err := openFile(patchPath); err != nil {
p.ErrorF("error on patch file: ", err)
} else {
return p.applyPatch(subject, patch, restorePath)
}
return nil
}
//applyPatch actively applies the patch to the subject. This could eventually
// be upgraded for different patching algorithms
func (p *Patcher) applyPatch(subject, patch []byte, restorePath string) error {
if delta, err := decompressDelta(patch); err != nil {
p.ErrorF("error decompressing delta", err)
} else {
if appliedBytes, err := applyPatchToFile(subject, delta); err != nil {
p.ErrorF("error applying delta to original file", err)
return err
} else if err := writeFile(restorePath, appliedBytes); err != nil {
p.ErrorF("error writing patchedFile", err)
return err
} else {
return nil
}
}
return nil
}

View File

@@ -0,0 +1,25 @@
package engine
import (
logger "github.com/apsdehal/go-logger"
radovskyb "github.com/radovskyb/watcher"
)
type Watcher struct {
*radovskyb.Watcher
*logger.Logger
Enabled bool
KEYFOLDER string
DOWNLOADFOLDER string
SYNCFOLDER string
THUMBFOLDER string
DIFFFOLDER string
}
type Patcher struct {
*logger.Logger
KEYFOLDER string
DOWNLOADFOLDER string
SYNCFOLDER string
THUMBFOLDER string
DIFFFOLDER string
}

132
common/engine/watcher.go Normal file
View File

@@ -0,0 +1,132 @@
package engine
import (
"context"
"path/filepath"
"sync"
"time"
logger "github.com/apsdehal/go-logger"
radovskyb "github.com/radovskyb/watcher"
)
type key string
type Event struct {
Name string
Progress int
Total int
}
// The watcher is responsible for not only seeing when a file changes,
// but also keeping track of
// * the file hash so that if it changes again any modifications can be handled
// * copying any versions and keeping them safe (even if temporary)
// * creating the diff of the file, in both directions if necessary
// * storing the details in the database
func NewWatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (Watcher, error) {
w := Watcher{
radovskyb.New(),
logger,
true, //used to temporarily ignore events if necessary
KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER,
}
return w, nil
}
func (w *Watcher) Ignore() bool {
w.Enabled = false
return w.Enabled
}
func (w *Watcher) Enable() bool {
w.Enabled = true
return w.Enabled
}
func (w *Watcher) IsEnabled() bool {
return w.Enabled
}
// BeginWatcherRoutine kicks off the watcher. When the watcher noticies a file change,
// certain actions will be taken in case of event and error
// the routine will handle these whenever this occurs.
// If certain functions need to be called then this will
// need to be specified as part of the managers lambda functions
// TODO: Should return an error
func (w *Watcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGroup, diffChannel chan utilities.DiffObject, onFileChanged func(string) (utilities.File, error)) {
//seems a bit barking, but we can now cancel any diff that is occuring on a file when it fires again
cancelFunctions := make(map[string]func())
for {
select {
// we have filtered already on the [Op]erations we want to listen for so no need to check here
case event := <-w.Event:
if !w.IsEnabled() {
w.Infof("ignoring event and reenabling the watcher %s\r\n", event)
w.Enable()
continue
}
w.Infof("event fired ", event)
//this is currently slow as it does a db lookup on the path.
//TODO: On load (or whenever a file is added to the watcher, the db information for files being watched, could be cached in memory. This would be much faster)
fileInfo, err := onFileChanged(event.Path) //could return the 'Event' object here
syncFilePath := fileInfo.CurrentBase
uniqueName := fileInfo.Unique
// //begin taking screenshot if we are supposed to
screenshotChannel := make(chan utilities.ScreenshotWrapper)
go func(ssChannel chan utilities.ScreenshotWrapper) {
w.Infof("beginning taking screenshot at ", time.Now())
var ssStruct utilities.ScreenshotWrapper
if screenshotFileName, err := takeScreenShot(w.THUMBFOLDER, uniqueName); err != nil {
w.WarningF("could not take screenshot", err)
ssStruct.ScreenshotError = err
} else {
ssStruct.Screenshot = filepath.Join(w.THUMBFOLDER, screenshotFileName)
w.Infof("screenshot recorded ", ssStruct.Screenshot, " at ", time.Now())
}
ssChannel <- ssStruct
}(screenshotChannel)
// fileID := fileInfo.ID
//we need the hash of the current base, not the hash of the original file
// fileHash := fileInfo.CurrentHash //hash needs to come from
if err != nil {
w.ErrorF("path was not returned to sync path", err)
continue
}
//cancel the event if it indeed is running...
if cancelFunctions[event.Path] != nil {
cancelFunctions[event.Path]()
delete(cancelFunctions, event.Path)
}
//context for the current event. Calling cancel will cancel the routines
//to kill a context you must have access to the cancel function.
// i could add the cancel function to a map of them
// if you want to kill it you call on the correct cancel function
// that will kill the context. Fine.
// If however you want to kill it from another place....
// then you need a cancel channel, which inturn calls the equivelent cancel function.... ok
// sounds about best i can do right now...
// kind of bonkers right....
cancelContext, cancel := context.WithCancel(ctx)
cancelFunctions[event.Path] = cancel
// good idea to not use strings as keys directly as can conflict across namespaces
// this needs to be sorted out -- too many things called an event....
// TODO: its totally bananas
e := Event{
Name: event.Path,
Progress: 0,
Total: 100,
}
eventContext := context.WithValue(cancelContext, key(event.Path), e)
if err := manageFileDiffing(eventContext, event.Path, syncFilePath, w.DIFFFOLDER, true, screenshotChannel, diffChannel, wg); err != nil {
// I don't think this can be reached...
w.WarningF("Error managing the diffing process %s", err)
}
case err := <-w.Watcher.Error:
w.ErrorF("%s\r\n", err)
case <-w.Closed:
w.Notice("radovskyb closed")
return
}
}
}