making changes to common library, starting to integrate database functions
This commit is contained in:
@@ -19,7 +19,7 @@ func (db *DB) ConfigureDB(dbPath string) error {
|
||||
db.ErrorF("Error finding file by path %s", err)
|
||||
return err
|
||||
}
|
||||
db.WarningF("No file found. initialising the database")
|
||||
db.WarningF("No existing databse found. initialising new database")
|
||||
file.Name = "-- All Files --"
|
||||
//file.Ignore = true //this is currently not used however could result in this file being ignored when file watching (etc) starts
|
||||
if err := db.Save(&file); err != nil {
|
||||
|
@@ -53,11 +53,11 @@ type DiffObject struct {
|
||||
DiffPath string //path of the diff/patch
|
||||
//Label string //store a comment if the user wants to (user written)
|
||||
//Screenshot string //path to the screen shot when the diff was made
|
||||
Fs bool //whether it was written to the directly
|
||||
Description string //record of forward or backward (just a quick helper)
|
||||
E error //a record of the error when it was created. Maybe able to optimize out later
|
||||
//Diff *[]byte //the diff itself (incase we want to store in memory) - unused as of now
|
||||
DiffSize int64 //the size of the diff in bytes
|
||||
StartTime time.Time //when was the diff created (can take a while to create)
|
||||
Message string //any message we want to store against the diff while its created
|
||||
Fs bool //whether it was written to the directly
|
||||
Description string //record of forward or backward (just a quick helper)
|
||||
E error //a record of the error when it was created. Maybe able to optimize out later
|
||||
Diff *[]byte //the diff itself (incase we want to store in memory) - unused as of now
|
||||
DiffSize int64 //the size of the diff in bytes
|
||||
StartTime time.Time //when was the diff created (can take a while to create)
|
||||
Message string //any message we want to store against the diff while its created
|
||||
}
|
||||
|
@@ -37,7 +37,7 @@ func TestMain(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func openFile(path string) ([]byte, error) {
|
||||
func testOpenFile(path string) ([]byte, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
fmt.Println("File reading error", err)
|
||||
@@ -45,7 +45,7 @@ func openFile(path string) ([]byte, error) {
|
||||
return data, err
|
||||
}
|
||||
|
||||
func writeFile(path string, data []byte) error {
|
||||
func testWriteFile(path string, data []byte) error {
|
||||
err := ioutil.WriteFile(path, data, 0644)
|
||||
return err
|
||||
}
|
||||
|
136
common/engine/diff.go
Normal file
136
common/engine/diff.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/deranjer/gvc/common/database"
|
||||
)
|
||||
|
||||
// ManageFileDiffing handles creating the diffs on the background routines and creating the information
|
||||
// about each diff that is made.
|
||||
//
|
||||
// TODO: fs works however it takes a while to write the diffs to disk. It maybe a better idea to keep the diffs
|
||||
// in memory (although they could get huge??) and then write them to disk at a later point in time.
|
||||
// In any event, this works now.
|
||||
//
|
||||
// TODO: Be able to cancel a diff creation (for instance if the user resaves). Does this work? Should we block
|
||||
// creating diffs within 5 minutes of creating one? Cancelling is probably better at this point.
|
||||
// it might be nice to inform the user when diffs build up
|
||||
func manageFileDiffing(ctx context.Context, subject, object, diffStorageLocation string, fs bool, diffChannel chan database.DiffObject, wg *sync.WaitGroup) error {
|
||||
|
||||
var subjectHash, objectHash [16]byte
|
||||
var err error
|
||||
if subjectHash, err = UniqueFileHash(subject); err != nil {
|
||||
return err
|
||||
}
|
||||
if objectHash, err = UniqueFileHash(object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diffTime := time.Now()
|
||||
wg.Add(1)
|
||||
go func(messages chan<- database.DiffObject) {
|
||||
defer wg.Done()
|
||||
|
||||
var dO database.DiffObject
|
||||
//doing this on routine to not lose anytime... does it change anything?
|
||||
dO.Description = ""
|
||||
dO.Subject = object
|
||||
dO.Object = subject
|
||||
dO.StartTime = diffTime
|
||||
dO.SubjectHash = objectHash //TODO: these being the wrong way round is a legacy thing. Swapping them needs testing, but should be fine
|
||||
dO.ObjectHash = subjectHash
|
||||
fmt.Println("creating diff object now")
|
||||
if diff, err := binaryDiff(ctx, &dO, diffStorageLocation, fs); err != nil { //binaryDiff
|
||||
fmt.Println("error from binary diff ", err)
|
||||
dO.E = err
|
||||
} else {
|
||||
dO.Diff = &diff
|
||||
}
|
||||
// ssStruct := <-ssChannel
|
||||
// fmt.Printf("received over ssChannel %+v\r\n", ssStruct)
|
||||
// if ssStruct.ScreenshotError != nil {
|
||||
// fmt.Println("screenshot failed, ", ssStruct.ScreenshotError)
|
||||
// } else {
|
||||
// fmt.Println("diff reeived screenshot ", ssStruct.Screenshot)
|
||||
// dO.Screenshot = ssStruct.Screenshot
|
||||
// }
|
||||
elapsed := time.Since(diffTime)
|
||||
dO.Message = "elapsed time:" + elapsed.String()
|
||||
messages <- dO
|
||||
}(diffChannel)
|
||||
return nil
|
||||
}
|
||||
|
||||
//run instead of binaryDiff to turn it off
|
||||
func dryrun(ctx context.Context, dO *database.DiffObject, diffStorageLocation string, fs bool) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// Diff manages the creation of the diffs but doesn't actually create the diffs itself.
|
||||
// Sources are file system sources in this case and an array of diffs (io.Writers) are returned
|
||||
// 1. This handles whether to save the diffs directly to the drive, and if so, will save to the
|
||||
// specified location. If so, it will return the diffs.
|
||||
// 2. Whether to save diffs in both directions
|
||||
// 3. Creates a diff object that contains any necessary metadata about the diff files
|
||||
// subject is the file that changed, object is file on record
|
||||
func binaryDiff(ctx context.Context, dO *database.DiffObject, diffStorageLocation string, fs bool) ([]byte, error) {
|
||||
var fileName string
|
||||
_, fileName = filepath.Split(dO.Subject) // dirPath
|
||||
dO.Watching = fileName
|
||||
// var sub io.Reader
|
||||
// if sub, err = os.Open(dO.Subject); err != nil {
|
||||
// return []byte{}, err
|
||||
// }
|
||||
// var obj io.Reader
|
||||
// if obj, err = os.Open(dO.Object); err != nil {
|
||||
// return []byte{}, err
|
||||
// }
|
||||
startTime := strconv.FormatInt(dO.StartTime.Unix(), 10)
|
||||
if fs { //if the wish is to store to the filesystem
|
||||
dO.DiffPath = filepath.Join(diffStorageLocation, fileName+"_"+startTime+"_"+dO.Description) + "_diff.patch"
|
||||
if writeDiff, err := os.Create(dO.DiffPath); err != nil {
|
||||
return []byte{}, err
|
||||
} else if deltaBytes, err := fdeltaDiff(ctx, dO.Subject, dO.Object); err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
if bytesWritten, err := writeDiff.Write(deltaBytes); err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
dO.DiffSize = int64(bytesWritten)
|
||||
return []byte{}, nil
|
||||
}
|
||||
}
|
||||
} else { //if we actually want the bytes we have to set fs to false (can do this above.)
|
||||
if deltaBytes, err := fdeltaDiff(ctx, dO.Subject, dO.Object); err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
dO.DiffSize = int64(len(deltaBytes))
|
||||
return deltaBytes, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//sub is the original
|
||||
func fdeltaDiff(ctx context.Context, sub, obj string) ([]byte, error) {
|
||||
//now follow what is found in fdelta to retrieve the bytes and get back a delta
|
||||
//you can use the gob/compression code to save the files according to where in pickle it they are saved
|
||||
//TODO: currently the code is used to compress the bsdiff index, but we dont need that, just need to store the
|
||||
// delta on disk. This is currently already done somewhere, so can possibly add/swap out the delta and compressor code
|
||||
// so that it uses the new code.
|
||||
if originalBytes, err := getOriginalBytes(sub); err != nil {
|
||||
return []byte{}, err
|
||||
} else if deltaBytes, err := createDelta(obj, originalBytes); err != nil {
|
||||
return []byte{}, err
|
||||
} else if compressedDelta, err := compressDelta(deltaBytes); err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
return compressedDelta, nil
|
||||
}
|
||||
}
|
@@ -48,9 +48,9 @@ func createDelta(newFile string, originalBytes []byte) ([]byte, error) {
|
||||
return delta, nil
|
||||
}
|
||||
func compressDelta(delta []byte) ([]byte, error) {
|
||||
if binaryToGobBuffer, err := compressor.BytesToGob(delta); err != nil {
|
||||
if binaryToGobBuffer, err := BytesToGob(delta); err != nil {
|
||||
return []byte{}, err
|
||||
} else if compressedData, err := compressor.CompressBinary(&binaryToGobBuffer); err != nil {
|
||||
} else if compressedData, err := CompressBinary(&binaryToGobBuffer); err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
return compressedData.Bytes(), nil
|
||||
@@ -76,9 +76,9 @@ func retrieveDelta(patchFile string) ([]byte, error) {
|
||||
func decompressDelta(compressedData []byte) ([]byte, error) {
|
||||
var compressedBuffer bytes.Buffer
|
||||
compressedBuffer.Write(compressedData)
|
||||
if decompressionReader, err := compressor.DecompressBinary(compressedBuffer); err != nil {
|
||||
if decompressionReader, err := DecompressBinary(compressedBuffer); err != nil {
|
||||
return []byte{}, err
|
||||
} else if res, err := compressor.GobToBytes(decompressionReader); err != nil {
|
||||
} else if res, err := GobToBytes(decompressionReader); err != nil {
|
||||
return []byte{}, err
|
||||
} else {
|
||||
return res, nil
|
||||
|
@@ -1,33 +1,26 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/amlwwalker/fdelta"
|
||||
)
|
||||
|
||||
func main() {
|
||||
DefineFiles()
|
||||
originalBytes := GetOriginalBytes()
|
||||
delta := CreateDelta(originalBytes)
|
||||
StoreDelta(delta)
|
||||
retrievedDelta := RetrieveDelta()
|
||||
// var deltaBytes []byte
|
||||
fmt.Printf("res : `%s`\n", len(retrievedDelta))
|
||||
//test loading the delta from disk
|
||||
appliedBytes, err := fdelta.Apply(originalBytes, retrievedDelta)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("exporting delta")
|
||||
err = writeFile(appliedFile, appliedBytes)
|
||||
if err != nil {
|
||||
fmt.Println("error reading bytes [3]", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Origin : `%s`\n", originalFile)
|
||||
fmt.Printf("Target : `%s`\n", len(appliedBytes))
|
||||
fmt.Printf("Delta : `%s`\n", len(delta))
|
||||
fmt.Printf("Result: `%s`\n", appliedFile)
|
||||
}
|
||||
// func main() {
|
||||
// DefineFiles()
|
||||
// originalBytes := GetOriginalBytes()
|
||||
// delta := CreateDelta(originalBytes)
|
||||
// StoreDelta(delta)
|
||||
// retrievedDelta := RetrieveDelta()
|
||||
// // var deltaBytes []byte
|
||||
// fmt.Printf("res : `%s`\n", len(retrievedDelta))
|
||||
// //test loading the delta from disk
|
||||
// appliedBytes, err := fdelta.Apply(originalBytes, retrievedDelta)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// fmt.Println("exporting delta")
|
||||
// err = writeFile(appliedFile, appliedBytes)
|
||||
// if err != nil {
|
||||
// fmt.Println("error reading bytes [3]", err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// fmt.Printf("Origin : `%s`\n", originalFile)
|
||||
// fmt.Printf("Target : `%s`\n", len(appliedBytes))
|
||||
// fmt.Printf("Delta : `%s`\n", len(delta))
|
||||
// fmt.Printf("Result: `%s`\n", appliedFile)
|
||||
// }
|
||||
|
14
common/engine/filehashing.go
Normal file
14
common/engine/filehashing.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"github.com/kalafut/imohash"
|
||||
)
|
||||
|
||||
// UniqueFileHash creats a fast hash of a file. It's not bullet proof (could cause a collision, but in practice unlikely) but its fast
|
||||
func UniqueFileHash(src string) ([16]byte, error) {
|
||||
hash, err := imohash.SumFile(src)
|
||||
if err != nil {
|
||||
return [16]byte{}, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
@@ -1,6 +1,8 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
)
|
||||
|
||||
@@ -23,9 +25,9 @@ func NewPatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, TH
|
||||
// last save is the file you want to get.
|
||||
func (p *Patcher) PatchFromFile(filePath, patchPath, restorePath string) error {
|
||||
if subject, err := openFile(filePath); err != nil {
|
||||
p.ErrorF("error on subject file: ", err)
|
||||
return fmt.Errorf("error on subject file: ", err)
|
||||
} else if patch, err := openFile(patchPath); err != nil {
|
||||
p.ErrorF("error on patch file: ", err)
|
||||
return fmt.Errorf("error on patch file: ", err)
|
||||
} else {
|
||||
return p.applyPatch(subject, patch, restorePath)
|
||||
}
|
||||
@@ -36,14 +38,12 @@ func (p *Patcher) PatchFromFile(filePath, patchPath, restorePath string) error {
|
||||
// be upgraded for different patching algorithms
|
||||
func (p *Patcher) applyPatch(subject, patch []byte, restorePath string) error {
|
||||
if delta, err := decompressDelta(patch); err != nil {
|
||||
p.ErrorF("error decompressing delta", err)
|
||||
return fmt.Errorf("error decompressing delta", err)
|
||||
} else {
|
||||
if appliedBytes, err := applyPatchToFile(subject, delta); err != nil {
|
||||
p.ErrorF("error applying delta to original file", err)
|
||||
return err
|
||||
return fmt.Errorf("error applying delta to original file", err)
|
||||
} else if err := writeFile(restorePath, appliedBytes); err != nil {
|
||||
p.ErrorF("error writing patchedFile", err)
|
||||
return err
|
||||
return fmt.Errorf("error writing patchedFile", err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
@@ -2,11 +2,11 @@ package engine
|
||||
|
||||
import (
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
radovskyb "github.com/radovskyb/watcher"
|
||||
watcher "github.com/radovskyb/watcher"
|
||||
)
|
||||
|
||||
type Watcher struct {
|
||||
*radovskyb.Watcher
|
||||
type FileWatcher struct {
|
||||
*watcher.Watcher
|
||||
*logger.Logger
|
||||
Enabled bool
|
||||
KEYFOLDER string
|
||||
|
@@ -2,12 +2,11 @@ package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
logger "github.com/apsdehal/go-logger"
|
||||
radovskyb "github.com/radovskyb/watcher"
|
||||
"github.com/deranjer/gvc/common/database"
|
||||
watcher "github.com/radovskyb/watcher"
|
||||
)
|
||||
|
||||
type key string
|
||||
@@ -23,9 +22,9 @@ type Event struct {
|
||||
// * copying any versions and keeping them safe (even if temporary)
|
||||
// * creating the diff of the file, in both directions if necessary
|
||||
// * storing the details in the database
|
||||
func NewWatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (Watcher, error) {
|
||||
w := Watcher{
|
||||
radovskyb.New(),
|
||||
func NewWatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER string) (FileWatcher, error) {
|
||||
w := FileWatcher{
|
||||
watcher.New(),
|
||||
logger,
|
||||
true, //used to temporarily ignore events if necessary
|
||||
KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, THUMBFOLDER, DIFFFOLDER,
|
||||
@@ -33,16 +32,16 @@ func NewWatcher(logger *logger.Logger, KEYFOLDER, DOWNLOADFOLDER, SYNCFOLDER, TH
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) Ignore() bool {
|
||||
w.Enabled = false
|
||||
return w.Enabled
|
||||
func (fw *FileWatcher) Ignore() bool {
|
||||
fw.Enabled = false
|
||||
return fw.Enabled
|
||||
}
|
||||
func (w *Watcher) Enable() bool {
|
||||
w.Enabled = true
|
||||
return w.Enabled
|
||||
func (fw *FileWatcher) Enable() bool {
|
||||
fw.Enabled = true
|
||||
return fw.Enabled
|
||||
}
|
||||
func (w *Watcher) IsEnabled() bool {
|
||||
return w.Enabled
|
||||
func (fw *FileWatcher) IsEnabled() bool {
|
||||
return fw.Enabled
|
||||
}
|
||||
|
||||
// BeginWatcherRoutine kicks off the watcher. When the watcher noticies a file change,
|
||||
@@ -51,45 +50,31 @@ func (w *Watcher) IsEnabled() bool {
|
||||
// If certain functions need to be called then this will
|
||||
// need to be specified as part of the managers lambda functions
|
||||
// TODO: Should return an error
|
||||
func (w *Watcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGroup, diffChannel chan utilities.DiffObject, onFileChanged func(string) (utilities.File, error)) {
|
||||
func (fw *FileWatcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGroup, diffChannel chan database.DiffObject, onFileChanged func(string) (database.File, error)) {
|
||||
|
||||
//seems a bit barking, but we can now cancel any diff that is occuring on a file when it fires again
|
||||
cancelFunctions := make(map[string]func())
|
||||
for {
|
||||
select {
|
||||
// we have filtered already on the [Op]erations we want to listen for so no need to check here
|
||||
case event := <-w.Event:
|
||||
if !w.IsEnabled() {
|
||||
w.Infof("ignoring event and reenabling the watcher %s\r\n", event)
|
||||
w.Enable()
|
||||
case event := <-fw.Watcher.Event:
|
||||
if !fw.IsEnabled() {
|
||||
fw.Infof("ignoring event and reenabling the watcher %s\r\n", event)
|
||||
fw.Enable()
|
||||
continue
|
||||
}
|
||||
w.Infof("event fired ", event)
|
||||
fw.Infof("event fired ", event)
|
||||
//this is currently slow as it does a db lookup on the path.
|
||||
//TODO: On load (or whenever a file is added to the watcher, the db information for files being watched, could be cached in memory. This would be much faster)
|
||||
fileInfo, err := onFileChanged(event.Path) //could return the 'Event' object here
|
||||
syncFilePath := fileInfo.CurrentBase
|
||||
uniqueName := fileInfo.Unique
|
||||
// //begin taking screenshot if we are supposed to
|
||||
screenshotChannel := make(chan utilities.ScreenshotWrapper)
|
||||
go func(ssChannel chan utilities.ScreenshotWrapper) {
|
||||
w.Infof("beginning taking screenshot at ", time.Now())
|
||||
var ssStruct utilities.ScreenshotWrapper
|
||||
if screenshotFileName, err := takeScreenShot(w.THUMBFOLDER, uniqueName); err != nil {
|
||||
w.WarningF("could not take screenshot", err)
|
||||
ssStruct.ScreenshotError = err
|
||||
} else {
|
||||
ssStruct.Screenshot = filepath.Join(w.THUMBFOLDER, screenshotFileName)
|
||||
w.Infof("screenshot recorded ", ssStruct.Screenshot, " at ", time.Now())
|
||||
}
|
||||
ssChannel <- ssStruct
|
||||
}(screenshotChannel)
|
||||
//uniqueName := fileInfo.Unique
|
||||
|
||||
// fileID := fileInfo.ID
|
||||
//we need the hash of the current base, not the hash of the original file
|
||||
// fileHash := fileInfo.CurrentHash //hash needs to come from
|
||||
if err != nil {
|
||||
w.ErrorF("path was not returned to sync path", err)
|
||||
fw.ErrorF("path was not returned to sync path", err)
|
||||
continue
|
||||
}
|
||||
//cancel the event if it indeed is running...
|
||||
@@ -118,14 +103,14 @@ func (w *Watcher) BeginWatcherRoutine(ctx context.Context, wg *sync.WaitGroup, d
|
||||
Total: 100,
|
||||
}
|
||||
eventContext := context.WithValue(cancelContext, key(event.Path), e)
|
||||
if err := manageFileDiffing(eventContext, event.Path, syncFilePath, w.DIFFFOLDER, true, screenshotChannel, diffChannel, wg); err != nil {
|
||||
if err := manageFileDiffing(eventContext, event.Path, syncFilePath, fw.DIFFFOLDER, true, diffChannel, wg); err != nil {
|
||||
// I don't think this can be reached...
|
||||
w.WarningF("Error managing the diffing process %s", err)
|
||||
fw.WarningF("Error managing the diffing process %s", err)
|
||||
}
|
||||
case err := <-w.Watcher.Error:
|
||||
w.ErrorF("%s\r\n", err)
|
||||
case <-w.Closed:
|
||||
w.Notice("radovskyb closed")
|
||||
case err := <-fw.Watcher.Error:
|
||||
fw.Errorf("%s\r\n", err)
|
||||
case <-fw.Watcher.Closed:
|
||||
fw.Notice("radovskyb closed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user