25 Commits

Author SHA1 Message Date
5b262e6651 fixed fatal linux error with listenaddr incorrectly duplicated 2019-03-22 16:22:48 -04:00
496e255884 updating dependencies, adding go mod for versioning 2019-03-21 22:09:58 -04:00
0560c2f3fe Fixed error msg about 64-bit port integer (did not actually cause fatal errors), tested with updated go bin 2018-12-27 15:24:07 -05:00
01a976ab2f updating logrus configurations 2018-12-08 21:58:29 -05:00
d6341c9844 finished core rewrite for stability, just needs extensive testing, still need queue rewrite 2018-12-07 19:50:48 -05:00
1fac8757d0 Pulled in latest version of libraries, added Socks5 config 2018-11-15 17:19:15 -05:00
aba7382113 Fixing Queue issues, start/stop torrent issues 2018-09-13 19:34:30 -04:00
a5e9b6745f Moving Queue and Ratio checks to cron (fixes failure to stop on ratio) 2018-09-10 15:18:30 -04:00
224e7892ef Updating torrent library to latest, fixing breaking changes to torrent library api 2018-09-01 20:24:03 -04:00
6e5ba2c755 Ready for new release with new engine, will start bugfixes 2018-06-08 18:19:51 -04:00
cbfcba4cbc Adding a few settings to webui, cleanup of unneeded func 2018-05-27 17:45:29 -04:00
d15bb9752a Finished Engine re-write, awaiting testing 2018-05-27 17:34:14 -04:00
35a5ac37eb Engine rewrite about 80% done, but a ton of bugs and a few new features to add, almost no testing done 2018-05-17 13:52:47 -04:00
4909429390 starting to redo the core to do queuing and better downloading 2018-04-18 22:41:32 -04:00
0fdc926cc4 adding ability to generate API keys 2018-04-07 13:07:47 -04:00
3280360d47 fixing sorting in webui 2018-04-05 16:30:44 -04:00
f69ec5b9f2 Changing permissions to walk the entire structure 2018-04-03 21:39:22 -04:00
aee3516682 fix folder permissions in donetorrentactions 2018-04-02 21:21:41 -04:00
a7881a14c7 fixing path issue with starting torrent 2018-03-30 20:05:18 -04:00
128ec774bd changing how the start API command works to start torrents 2018-03-27 15:39:02 -04:00
bc612bf5e4 removing symlink option only copy for now 2018-03-26 21:06:14 -04:00
3f1f9e7104 separate thread for torrent list 2018-03-25 23:07:22 -04:00
eeb6e102f1 fixing log not writing to file 2018-03-25 21:23:15 -04:00
0a0f0cd577 fixing notification issue, parallelizing startTorrent, verifying torrent after move 2018-03-25 09:34:32 -04:00
10399cc6e5 updating readme with new documentation link 2018-03-23 15:17:35 -04:00
25 changed files with 1420 additions and 493 deletions

2
.gitignore vendored
View File

@@ -2,6 +2,7 @@ downloads/
downloading/ downloading/
downloaded/ downloaded/
uploadedTorrents/ uploadedTorrents/
boltBrowser/
storage.db.lock storage.db.lock
storage.db storage.db
storage.db.old storage.db.old
@@ -24,3 +25,4 @@ config.toml.old
/public/static/js/kickwebsocket-generated.js /public/static/js/kickwebsocket-generated.js
clientAuth.txt clientAuth.txt
dist dist
debScripts/

3
Dockerfile Normal file
View File

@@ -0,0 +1,3 @@
FROM scratch
COPY goTorrent /
ENTRYPOINT [ "/goTorrent" ]

View File

@@ -65,7 +65,7 @@ Image of the frontend UI
# Documentation # Documentation
All the documentation is available [here](https://deranjer.github.io/) All the documentation is available [here](https://deranjer.github.io/goTorrentDocs/)
# Special Thanks # Special Thanks

View File

@@ -1,9 +1,9 @@
[serverConfig] [serverConfig]
ServerPort = ":8000" #leave format as is it expects a string with colon ServerPort = "8000" #Required to input as string
ServerAddr = "192.168.1.100" #Put in the IP address you want to bind to ServerAddr = "192.168.1.8" #Put in the IP address you want to bind to as string
LogLevel = "Info" # Options = Debug, Info, Warn, Error, Fatal, Panic LogLevel = "Debug" # Options = Debug, Info, Warn, Error, Fatal, Panic
LogOutput = "stdout" #Options = file, stdout #file will print it to logs/server.log LogOutput = "file" #Options = file, stdout #file will print it to logs/server.log
SeedRatioStop = 1.50 #automatically stops the torrent after it reaches this seeding ratio SeedRatioStop = 1.50 #automatically stops the torrent after it reaches this seeding ratio
@@ -15,6 +15,8 @@
#Low = ~.05MB/s, Medium = ~.5MB/s, High = ~1.5MB/s #Low = ~.05MB/s, Medium = ~.5MB/s, High = ~1.5MB/s
UploadRateLimit = "Unlimited" #Options are "Low", "Medium", "High", "Unlimited" #Unlimited is default UploadRateLimit = "Unlimited" #Options are "Low", "Medium", "High", "Unlimited" #Unlimited is default
DownloadRateLimit = "Unlimited" DownloadRateLimit = "Unlimited"
#Maximum number of allowed active torrents, the rest will be queued
MaxActiveTorrents = 5
[goTorrentWebUI] [goTorrentWebUI]
#Basic goTorrentWebUI authentication (not terribly secure, implemented in JS, password is hashed to SHA256, not salted, basically don't depend on this if you require very good security) #Basic goTorrentWebUI authentication (not terribly secure, implemented in JS, password is hashed to SHA256, not salted, basically don't depend on this if you require very good security)
@@ -33,6 +35,13 @@
#URL is CASE SENSITIVE #URL is CASE SENSITIVE
BaseURL = "domain.com/subroute/" # MUST be in the format (if you have a subdomain, and must have trailing slash) "yoursubdomain.domain.org/subroute/" BaseURL = "domain.com/subroute/" # MUST be in the format (if you have a subdomain, and must have trailing slash) "yoursubdomain.domain.org/subroute/"
[socksProxy]
SocksProxyEnabled = false #bool, either false or true
# Sets usage of Socks5 Proxy. Authentication should be included in the url if needed.
# Examples: socks5://demo:demo@192.168.99.100:1080
# http://proxy.domain.com:3128
SocksProxyURL = ""
[EncryptionPolicy] [EncryptionPolicy]
DisableEncryption = false DisableEncryption = false

122
config.toml.bk Normal file
View File

@@ -0,0 +1,122 @@
[serverConfig]
ServerPort = ":8000" #leave format as is it expects a string with colon
ServerAddr = "192.168.1.8" #Put in the IP address you want to bind to
LogLevel = "Info" # Options = Debug, Info, Warn, Error, Fatal, Panic
LogOutput = "stdout" #Options = file, stdout #file will print it to logs/server.log
SeedRatioStop = 1.50 #automatically stops the torrent after it reaches this seeding ratio
#Relative or absolute path accepted, the server will convert any relative path to an absolute path.
DefaultMoveFolder = 'Z:\downloads' #default path that a finished torrent is symlinked to after completion. Torrents added via RSS will default here
TorrentWatchFolder = 'torrentUpload' #folder path that is watched for .torrent files and adds them automatically every 5 minutes
#Limits your upload and download speed globally, all are averages and not burst protected (usually burst on start).
#Low = ~.05MB/s, Medium = ~.5MB/s, High = ~1.5MB/s
UploadRateLimit = "Unlimited" #Options are "Low", "Medium", "High", "Unlimited" #Unlimited is default
DownloadRateLimit = "Unlimited"
[goTorrentWebUI]
#Basic goTorrentWebUI authentication (not terribly secure, implemented in JS, password is hashed to SHA256, not salted, basically don't depend on this if you require very good security)
WebUIAuth = false # bool, if false no authentication is required for the webUI
WebUIUser = "admin"
WebUIPassword = "Password1"
[notifications]
PushBulletToken = "o.8sUHemPkTCaty3u7KnyvEBN19EkeT63g" #add your pushbullet api token here to notify of torrent completion to pushbullet
[reverseProxy]
#This is for setting up goTorrent behind a reverse Proxy (with SSL, reverse proxy with no SSL will require editing the WSS connection to a WS connection manually)
ProxyEnabled = false #bool, either false or true
#URL is CASE SENSITIVE
BaseURL = "derajnet.duckdns.org/gopher/" # MUST be in the format (if you have a subdomain, and must have trailing slash) "yoursubdomain.domain.org/subroute/"
[EncryptionPolicy]
DisableEncryption = false
ForceEncryption = false
PreferNoEncryption = false
[torrentClientConfig]
DownloadDir = 'downloading' #the full OR relative path where the torrent server stores in-progress torrents
Seed = false #boolean #seed after download
# Never send chunks to peers.
NoUpload = false #boolean
#User-provided Client peer ID. If not present, one is generated automatically.
PeerID = "" #string
#The address to listen for new uTP and TCP bittorrent protocol connections. DHT shares a UDP socket with uTP unless configured otherwise.
ListenAddr = "" #Leave Blank for default, syntax "HOST:PORT"
#Don't announce to trackers. This only leaves DHT to discover peers.
DisableTrackers = false #boolean
DisablePEX = false # boolean
# Don't create a DHT.
NoDHT = false #boolean
#For the bittorrent protocol.
DisableUTP = false #bool
#For the bittorrent protocol.
DisableTCP = false #bool
#Called to instantiate storage for each added torrent. Builtin backends
# are in the storage package. If not set, the "file" implementation is used.
DefaultStorage = "storage.ClientImpl"
#encryption policy
IPBlocklist = "" #of type iplist.Ranger
DisableIPv6 = false #boolean
Debug = false #boolean
#HTTP *http.Client
HTTPUserAgent = "" # HTTPUserAgent changes default UserAgent for HTTP requests
ExtendedHandshakeClientVersion = ""
Bep20 = ""
# Overrides the default DHT configuration, see dhtServerConfig #advanced.. so be careful
DHTConfig = "" # default is "dht.ServerConfig"
[dhtServerConfig]
# Set NodeId Manually. Caller must ensure that if NodeId does not conform to DHT Security Extensions, that NoSecurity is also set.
NodeId = "" #[20]byte
Conn = "" # https:#godoc.org/net#PacketConn #not implemented
# Don't respond to queries from other nodes.
Passive = false # boolean
# the default addresses are "router.utorrent.com:6881","router.bittorrent.com:6881","dht.transmissionbt.com:6881","dht.aelitis.com:6881",
#https:#github.com/anacrolix/dht/blob/master/dht.go
StartingNodes = "dht.GlobalBootstrapAddrs"
#Disable the DHT security extension: http:#www.libtorrent.org/dht_sec.html.
NoSecurity = false
#Initial IP blocklist to use. Applied before serving and bootstrapping begins.
IPBlocklist = "" #of type iplist.Ranger
#Used to secure the server's ID. Defaults to the Conn's LocalAddr(). Set to the IP that remote nodes will see,
#as that IP is what they'll use to validate our ID.
PublicIP = "" #net.IP
#Hook received queries. Return true if you don't want to propagate to the default handlers.
OnQuery = "func(query *krpc.Msg, source net.Addr) (propagate bool)"
#Called when a peer successfully announces to us.
OnAnnouncePeer = "func(infoHash metainfo.Hash, peer Peer)"
#How long to wait before re-sending queries that haven't received a response. Defaults to a random value between 4.5 and 5.5s.
QueryResendDelay = "func() time.Duration"

View File

@@ -22,7 +22,7 @@ func InitializeCronEngine() *cron.Cron {
} }
//CheckTorrentWatchFolder adds torrents from a watch folder //TODO see if you can use filepath.Abs instead of changing directory //CheckTorrentWatchFolder adds torrents from a watch folder //TODO see if you can use filepath.Abs instead of changing directory
func CheckTorrentWatchFolder(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings) { func CheckTorrentWatchFolder(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings, torrentQueues Storage.TorrentQueues) {
c.AddFunc("@every 5m", func() { c.AddFunc("@every 5m", func() {
Logger.WithFields(logrus.Fields{"Watch Folder": config.TorrentWatchFolder}).Info("Running the watch folder cron job") Logger.WithFields(logrus.Fields{"Watch Folder": config.TorrentWatchFolder}).Info("Running the watch folder cron job")
torrentFiles, err := ioutil.ReadDir(config.TorrentWatchFolder) torrentFiles, err := ioutil.ReadDir(config.TorrentWatchFolder)
@@ -50,15 +50,59 @@ func CheckTorrentWatchFolder(c *cron.Cron, db *storm.DB, tclient *torrent.Client
os.Remove(fullFilePathAbs) //delete the torrent after adding it and copying it over os.Remove(fullFilePathAbs) //delete the torrent after adding it and copying it over
Logger.WithFields(logrus.Fields{"Source Folder": fullFilePathAbs, "Destination Folder": fullNewFilePathAbs, "Torrent": file.Name()}).Info("Added torrent from watch folder, and moved torrent file") Logger.WithFields(logrus.Fields{"Source Folder": fullFilePathAbs, "Destination Folder": fullNewFilePathAbs, "Torrent": file.Name()}).Info("Added torrent from watch folder, and moved torrent file")
StartTorrent(clientTorrent, torrentLocalStorage, db, "file", fullNewFilePathAbs, config.DefaultMoveFolder, "default", config) AddTorrent(clientTorrent, torrentLocalStorage, db, "file", fullNewFilePathAbs, config.DefaultMoveFolder, "default", config)
} }
} }
}) })
} }
//CheckTorrentsCron runs a upload ratio check, a queue check (essentially anything that should not be frontend dependent)
func CheckTorrentsCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, config Settings.FullClientSettings) {
c.AddFunc("@every 30s", func() {
Logger.Debug("Running a torrent Ratio and Queue Check")
torrentLocalArray := Storage.FetchAllStoredTorrents(db)
torrentQueues := Storage.FetchQueues(db)
for _, singleTorrentFromStorage := range torrentLocalArray {
var singleTorrent *torrent.Torrent
for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent
if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() {
singleTorrent = liveTorrent
}
}
calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
bytesCompleted := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
if float64(singleTorrentFromStorage.UploadedBytes)/float64(bytesCompleted) >= config.SeedRatioStop && singleTorrentFromStorage.TorrentUploadLimit == true { //If storage shows torrent stopped or if it is over the seeding ratio AND is under the global limit
Logger.WithFields(logrus.Fields{"Action: Stopping torrent due to seed Ratio": singleTorrentFromStorage.TorrentName}).Info()
StopTorrent(singleTorrent, singleTorrentFromStorage, db)
}
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus == "Queued" {
Logger.WithFields(logrus.Fields{"Action: Adding Torrent to Active Queue": singleTorrentFromStorage.TorrentName}).Info()
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
}
if (calculatedCompletedSize == singleTorrentFromStorage.TorrentSize) && (singleTorrentFromStorage.TorrentMoved == false) { //if we are done downloading and haven't moved torrent yet
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName}).Info("Torrent Completed, moving...")
tStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) //Todo... find a better way to do this in the go-routine currently just to make sure it doesn't trigger multiple times
tStorage.TorrentMoved = true
Storage.UpdateStorageTick(db, tStorage)
go func() { //moving torrent in separate go-routine then verifying that the data is still there and correct
err := MoveAndLeaveSymlink(config, singleTorrent.InfoHash().String(), db, false, "") //can take some time to move file so running this in another thread TODO make this a goroutine and skip this block if the routine is still running
if err != nil { //If we fail, print the error and attempt a retry
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName, "error": err}).Error("Failed to move Torrent!")
VerifyData(singleTorrent)
tStorage.TorrentMoved = false
Storage.UpdateStorageTick(db, tStorage)
}
}()
}
}
ValidateQueues(db, config, tclient) //Ensure we don't have too many in activeQueue
})
}
//RefreshRSSCron refreshes all of the RSS feeds on an hourly basis //RefreshRSSCron refreshes all of the RSS feeds on an hourly basis
func RefreshRSSCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings) { func RefreshRSSCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings, torrentQueues Storage.TorrentQueues) {
c.AddFunc("@hourly", func() { c.AddFunc("@hourly", func() {
torrentHashHistory := Storage.FetchHashHistory(db) torrentHashHistory := Storage.FetchHashHistory(db)
RSSFeedStore := Storage.FetchRSSFeeds(db) RSSFeedStore := Storage.FetchRSSFeeds(db)
@@ -86,7 +130,7 @@ func RefreshRSSCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrent
Logger.WithFields(logrus.Fields{"err": err, "Torrent": RSSTorrent.Title}).Warn("Unable to add torrent to torrent client!") Logger.WithFields(logrus.Fields{"err": err, "Torrent": RSSTorrent.Title}).Warn("Unable to add torrent to torrent client!")
break //break out of the loop entirely for this message since we hit an error break //break out of the loop entirely for this message since we hit an error
} }
StartTorrent(clientTorrent, torrentLocalStorage, db, "magnet", "", config.DefaultMoveFolder, "RSS", config) //TODO let user specify torrent default storage location and let change on fly AddTorrent(clientTorrent, torrentLocalStorage, db, "magnet", "", config.DefaultMoveFolder, "RSS", config) //TODO let user specify torrent default storage location and let change on fly
singleFeed.Torrents = append(singleFeed.Torrents, singleRSSTorrent) singleFeed.Torrents = append(singleFeed.Torrents, singleRSSTorrent)
} }

View File

@@ -3,7 +3,6 @@ package engine
import ( import (
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"github.com/asdine/storm" "github.com/asdine/storm"
Settings "github.com/deranjer/goTorrent/settings" Settings "github.com/deranjer/goTorrent/settings"
@@ -15,7 +14,7 @@ import (
) )
//MoveAndLeaveSymlink takes the file from the default download dir and moves it to the user specified directory and then leaves a symlink behind. //MoveAndLeaveSymlink takes the file from the default download dir and moves it to the user specified directory and then leaves a symlink behind.
func MoveAndLeaveSymlink(config Settings.FullClientSettings, tHash string, db *storm.DB, moveDone bool, oldPath string) { //moveDone and oldPath are for moving a completed torrent func MoveAndLeaveSymlink(config Settings.FullClientSettings, tHash string, db *storm.DB, moveDone bool, oldPath string) error { //moveDone and oldPath are for moving a completed torrent
tStorage := Storage.FetchTorrentFromStorage(db, tHash) tStorage := Storage.FetchTorrentFromStorage(db, tHash)
Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName}).Info("Move and Create symlink started for torrent") Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName}).Info("Move and Create symlink started for torrent")
var oldFilePath string var oldFilePath string
@@ -25,6 +24,8 @@ func MoveAndLeaveSymlink(config Settings.FullClientSettings, tHash string, db *s
oldFilePath, err = filepath.Abs(oldFilePathTemp) oldFilePath, err = filepath.Abs(oldFilePathTemp)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName, "Filepath": oldFilePath}).Error("Cannot create absolute file path!") Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName, "Filepath": oldFilePath}).Error("Cannot create absolute file path!")
moveDone = false
return err
} }
} else { } else {
oldFilePathTemp := filepath.Join(config.TorrentConfig.DataDir, tStorage.TorrentName) oldFilePathTemp := filepath.Join(config.TorrentConfig.DataDir, tStorage.TorrentName)
@@ -32,41 +33,57 @@ func MoveAndLeaveSymlink(config Settings.FullClientSettings, tHash string, db *s
oldFilePath, err = filepath.Abs(oldFilePathTemp) oldFilePath, err = filepath.Abs(oldFilePathTemp)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName, "Filepath": oldFilePath}).Error("Cannot create absolute file path!") Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName, "Filepath": oldFilePath}).Error("Cannot create absolute file path!")
moveDone = false
return err
} }
} }
newFilePathTemp := filepath.Join(tStorage.StoragePath, tStorage.TorrentName) newFilePathTemp := filepath.Join(tStorage.StoragePath, tStorage.TorrentName)
newFilePath, err := filepath.Abs(newFilePathTemp) newFilePath, err := filepath.Abs(newFilePathTemp)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName, "Filepath": newFilePath}).Error("Cannot create absolute file path for new file path!") Logger.WithFields(logrus.Fields{"Torrent Name": tStorage.TorrentName, "Filepath": newFilePath}).Error("Cannot create absolute file path for new file path!")
moveDone = false
return err
} }
_, err = os.Stat(tStorage.StoragePath) _, err = os.Stat(tStorage.StoragePath)
if os.IsNotExist(err) { if os.IsNotExist(err) {
err := os.MkdirAll(tStorage.StoragePath, 0755) err := os.MkdirAll(tStorage.StoragePath, 0777)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"New File Path": newFilePath, "error": err}).Error("Cannot create new directory") Logger.WithFields(logrus.Fields{"New File Path": newFilePath, "error": err}).Error("Cannot create new directory")
moveDone = false
return err
} }
} }
oldFileInfo, err := os.Stat(oldFilePath) oldFileInfo, err := os.Stat(oldFilePath)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"Old File info": oldFileInfo, "Old File Path": oldFilePath, "error": err}).Error("Cannot find the old file to copy/symlink!") Logger.WithFields(logrus.Fields{"Old File info": oldFileInfo, "Old File Path": oldFilePath, "error": err}).Error("Cannot find the old file to copy/symlink!")
return moveDone = false
return err
} }
if oldFilePath != newFilePath { if oldFilePath != newFilePath {
newFilePathDir := filepath.Dir(newFilePath) newFilePathDir := filepath.Dir(newFilePath)
os.Mkdir(newFilePathDir, 0755) os.Mkdir(newFilePathDir, 0777)
err := folderCopy.Copy(oldFilePath, newFilePath) //copy the folder to the new location err := folderCopy.Copy(oldFilePath, newFilePath) //copy the folder to the new location
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"Old File Path": oldFilePath, "New File Path": newFilePath, "error": err}).Error("Error Copying Folder!") Logger.WithFields(logrus.Fields{"Old File Path": oldFilePath, "New File Path": newFilePath, "error": err}).Error("Error Copying Folder!")
return err
} }
os.Chmod(newFilePath, 0777) err = filepath.Walk(newFilePath, func(path string, info os.FileInfo, err error) error { //Walking the file path to change the permissions
if runtime.GOOS != "windows" { //TODO the windows symlink is broken on windows 10 creator edition, so on the other platforms create symlink (windows will copy) until Go1.11 if err != nil {
Logger.WithFields(logrus.Fields{"file": path, "error": err}).Error("Potentially non-critical error, continuing..")
}
os.Chmod(path, 0777)
return nil
})
/* if runtime.GOOS != "windows" { //TODO the windows symlink is broken on windows 10 creator edition, so on the other platforms create symlink (windows will copy) until Go1.11
os.RemoveAll(oldFilePath) os.RemoveAll(oldFilePath)
err = os.Symlink(newFilePath, oldFilePath) err = os.Symlink(newFilePath, oldFilePath)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"Old File Path": oldFilePath, "New File Path": newFilePath, "error": err}).Error("Error creating symlink") Logger.WithFields(logrus.Fields{"Old File Path": oldFilePath, "New File Path": newFilePath, "error": err}).Error("Error creating symlink")
moveDone = false
return err
} }
} } */
if moveDone == false { if moveDone == false {
tStorage.TorrentMoved = true //TODO error handling instead of just saying torrent was moved when it was not tStorage.TorrentMoved = true //TODO error handling instead of just saying torrent was moved when it was not
notifyUser(tStorage, config, db) //Only notify if we haven't moved yet, don't want to push notify user every time user uses change storage button notifyUser(tStorage, config, db) //Only notify if we haven't moved yet, don't want to push notify user every time user uses change storage button
@@ -75,7 +92,7 @@ func MoveAndLeaveSymlink(config Settings.FullClientSettings, tHash string, db *s
tStorage.StoragePath = filepath.Dir(newFilePath) tStorage.StoragePath = filepath.Dir(newFilePath)
Storage.UpdateStorageTick(db, tStorage) Storage.UpdateStorageTick(db, tStorage)
} }
return nil
} }
func notifyUser(tStorage Storage.TorrentLocal, config Settings.FullClientSettings, db *storm.DB) { func notifyUser(tStorage Storage.TorrentLocal, config Settings.FullClientSettings, db *storm.DB) {

View File

@@ -1,47 +0,0 @@
package engine
import (
"testing"
"github.com/asdine/storm"
Settings "github.com/deranjer/goTorrent/settings"
Storage "github.com/deranjer/goTorrent/storage"
)
func TestMoveAndLeaveSymlink(t *testing.T) {
type args struct {
config Settings.FullClientSettings
tStorage Storage.TorrentLocal
db *storm.DB
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
MoveAndLeaveSymlink(tt.args.config, tt.args.tStorage, tt.args.db)
})
}
}
func Test_notifyUser(t *testing.T) {
type args struct {
tStorage Storage.TorrentLocal
config Settings.FullClientSettings
db *storm.DB
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
notifyUser(tt.args.tStorage, tt.args.config, tt.args.db)
})
}
}

View File

@@ -129,18 +129,19 @@ func readTorrentFileFromDB(element *Storage.TorrentLocal, tclient *torrent.Clien
return singleTorrent, nil return singleTorrent, nil
} }
//StartTorrent creates the storage.db entry and starts A NEW TORRENT and adds to the running torrent array //AddTorrent creates the storage.db entry and starts A NEW TORRENT and adds to the running torrent array
func StartTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.TorrentLocal, torrentDbStorage *storm.DB, torrentType, torrentFilePathAbs, torrentStoragePath, labelValue string, config Settings.FullClientSettings) { func AddTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.TorrentLocal, db *storm.DB, torrentType, torrentFilePathAbs, torrentStoragePath, labelValue string, config Settings.FullClientSettings) {
timedOut := timeOutInfo(clientTorrent, 45) //seeing if adding the torrent times out (giving 45 seconds) timedOut := timeOutInfo(clientTorrent, 45) //seeing if adding the torrent times out (giving 45 seconds)
if timedOut { //if we fail to add the torrent return if timedOut { //if we fail to add the torrent return
return return
} }
var TempHash metainfo.Hash var TempHash metainfo.Hash
TempHash = clientTorrent.InfoHash() TempHash = clientTorrent.InfoHash()
allStoredTorrents := Storage.FetchAllStoredTorrents(torrentDbStorage) fmt.Println("GOT INFOHASH", TempHash.String())
allStoredTorrents := Storage.FetchAllStoredTorrents(db)
for _, runningTorrentHashes := range allStoredTorrents { for _, runningTorrentHashes := range allStoredTorrents {
if runningTorrentHashes.Hash == TempHash.String() { if runningTorrentHashes.Hash == TempHash.String() {
Logger.WithFields(logrus.Fields{"Hash": TempHash.String()}).Error("Torrent has duplicate hash to already running torrent... will not add to storage") Logger.WithFields(logrus.Fields{"Hash": TempHash.String()}).Info("Torrent has duplicate hash to already running torrent... will not add to storage")
return return
} }
} }
@@ -164,7 +165,7 @@ func StartTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.To
} }
torrentLocalStorage.TorrentFile = torrentfile //storing the entire file in to database torrentLocalStorage.TorrentFile = torrentfile //storing the entire file in to database
} }
Logger.WithFields(logrus.Fields{"Storage Path": torrentStoragePath, "Torrent Name": clientTorrent.Name()}).Info("Adding Torrent with following storage path") Logger.WithFields(logrus.Fields{"Storage Path": torrentStoragePath, "Torrent Name": clientTorrent.Name()}).Info("Adding Torrent with following storage path, to active Queue")
torrentFiles := clientTorrent.Files() //storing all of the files in the database along with the priority torrentFiles := clientTorrent.Files() //storing all of the files in the database along with the priority
var TorrentFilePriorityArray = []Storage.TorrentFilePriority{} var TorrentFilePriorityArray = []Storage.TorrentFilePriority{}
for _, singleFile := range torrentFiles { //creating the database setup for the file array for _, singleFile := range torrentFiles { //creating the database setup for the file array
@@ -175,22 +176,14 @@ func StartTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.To
TorrentFilePriorityArray = append(TorrentFilePriorityArray, torrentFilePriority) TorrentFilePriorityArray = append(TorrentFilePriorityArray, torrentFilePriority)
} }
torrentLocalStorage.TorrentFilePriority = TorrentFilePriorityArray torrentLocalStorage.TorrentFilePriority = TorrentFilePriorityArray
Storage.AddTorrentLocalStorage(torrentDbStorage, torrentLocalStorage) //writing all of the data to the database //torrentQueues := Storage.FetchQueues(db)
clientTorrent.DownloadAll() //set all pieces to download AddTorrentToActive(&torrentLocalStorage, clientTorrent, db)
NumPieces := clientTorrent.NumPieces() //find the number of pieces Storage.AddTorrentLocalStorage(db, torrentLocalStorage) //writing all of the data to the database
clientTorrent.CancelPieces(1, NumPieces) //cancel all of the pieces to use file priority
for _, singleFile := range clientTorrent.Files() { //setting all of the file priorities to normal
singleFile.SetPriority(torrent.PiecePriorityNormal)
}
fmt.Println("Downloading ALL") //starting the download
CreateServerPushMessage(ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "success", Payload: "Torrent added!"}, Conn)
} }
//CreateInitialTorrentArray adds all the torrents on program start from the database //CreateInitialTorrentArray adds all the torrents on program start from the database
func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, db *storm.DB) { func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, db *storm.DB, config Settings.FullClientSettings) {
for _, singleTorrentFromStorage := range TorrentLocalArray { for _, singleTorrentFromStorage := range TorrentLocalArray {
var singleTorrent *torrent.Torrent var singleTorrent *torrent.Torrent
var err error var err error
@@ -205,7 +198,6 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
if err != nil { if err != nil {
continue continue
} }
} }
if len(singleTorrentFromStorage.InfoBytes) == 0 { //TODO.. kind of a fringe scenario.. not sure if needed since the db should always have the infobytes if len(singleTorrentFromStorage.InfoBytes) == 0 { //TODO.. kind of a fringe scenario.. not sure if needed since the db should always have the infobytes
timeOut := timeOutInfo(singleTorrent, 45) timeOut := timeOutInfo(singleTorrent, 45)
@@ -220,66 +212,103 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"torrentFile": singleTorrent.Name(), "error": err}).Error("Unable to add infobytes to the torrent!") Logger.WithFields(logrus.Fields{"torrentFile": singleTorrent.Name(), "error": err}).Error("Unable to add infobytes to the torrent!")
} }
if singleTorrentFromStorage.TorrentStatus != "Completed" && singleTorrentFromStorage.TorrentStatus != "Stopped" { torrentQueues := Storage.FetchQueues(db)
fmt.Println("Starting torrent as download", singleTorrent.Name()) if singleTorrentFromStorage.TorrentStatus == "Stopped" {
singleTorrent.DownloadAll() //set all of the pieces to download (piece prio is NE to file prio) singleTorrent.SetMaxEstablishedConns(0)
NumPieces := singleTorrent.NumPieces() //find the number of pieces continue
singleTorrent.CancelPieces(1, NumPieces) //cancel all of the pieces to use file priority }
for _, singleFile := range singleTorrent.Files() { //setting all of the file priorities to normal if singleTorrentFromStorage.TorrentStatus == "ForceStart" {
singleFile.SetPriority(torrent.PiecePriorityNormal) AddTorrentToForceStart(singleTorrentFromStorage, singleTorrent, db)
}
if len(torrentQueues.ActiveTorrents) == 0 && len(torrentQueues.QueuedTorrents) == 0 { // If empty, run through all the torrents and assign them
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents {
if singleTorrentFromStorage.TorrentStatus == "Completed" || singleTorrentFromStorage.TorrentStatus == "Seeding" {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Completed Torrents have lower priority, adding to Queued")
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
} else {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue (Initial Torrent Load)")
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
} }
} else { } else {
fmt.Println("Torrent status is....", singleTorrentFromStorage.TorrentStatus) Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Last resort for torrent, adding to Queued")
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
}
} else { //If we already have a queue set up then assign torrents to queue
if singleTorrentFromStorage.TorrentStatus == "Queued" {
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
} else {
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue (Initial Torrent Load Second)")
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
} else {
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
}
}
RemoveDuplicatesFromQueues(db)
}
Storage.UpdateStorageTick(db, *singleTorrentFromStorage)
}
torrentQueues := Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && len(torrentQueues.QueuedTorrents) > 0 { //after all the torrents are added, see if out active torrent list isn't full, then add from the queue
Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "Current : ": torrentQueues.ActiveTorrents}).Info("Adding Torrents from queue to active to fill...")
maxCanSend := config.MaxActiveTorrents - len(torrentQueues.ActiveTorrents)
if maxCanSend > len(torrentQueues.QueuedTorrents) {
maxCanSend = len(torrentQueues.QueuedTorrents)
}
torrentsToStart := make([]string, maxCanSend)
copy(torrentsToStart, torrentQueues.QueuedTorrents[len(torrentsToStart)-1:])
for _, torrentStart := range torrentsToStart {
for _, singleTorrent := range tclient.Torrents() {
if singleTorrent.InfoHash().String() == torrentStart {
singleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, torrentStart)
AddTorrentToActive(&singleTorrentFromStorage, singleTorrent, db)
}
}
} }
} }
SetFilePriority(tclient, db) //Setting the desired file priority from storage SetFilePriority(tclient, db) //Setting the desired file priority from storage
Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "Current : ": torrentQueues.ActiveTorrents}).Debug("Queue after all initial torrents have been added")
} }
//CreateRunningTorrentArray creates the entire torrent list to pass to client //CreateRunningTorrentArray creates the entire torrent list to pass to client
func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, PreviousTorrentArray []ClientDB, config Settings.FullClientSettings, db *storm.DB) (RunningTorrentArray []ClientDB) { func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, PreviousTorrentArray []ClientDB, config Settings.FullClientSettings, db *storm.DB) (RunningTorrentArray []ClientDB) {
torrentQueues := Storage.FetchQueues(db)
Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "TorrentQueues": torrentQueues}).Debug("Current TorrentQueues")
for _, singleTorrentFromStorage := range TorrentLocalArray { for _, singleTorrentFromStorage := range TorrentLocalArray {
torrentQueues := Storage.FetchQueues(db)
var singleTorrent *torrent.Torrent var singleTorrent *torrent.Torrent
var TempHash metainfo.Hash
for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent
if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() { if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() {
singleTorrent = liveTorrent singleTorrent = liveTorrent
} }
} }
tickUpdateStruct := Storage.TorrentLocal{} //we are shoving the tick updates into a torrentlocal struct to pass to storage happens at the end of the routine tickUpdateStruct := Storage.TorrentLocal{} //we are shoving the tick updates into a torrentlocal struct to pass to storage happens at the end of the routine
fullClientDB := new(ClientDB) fullClientDB := new(ClientDB)
//singleTorrentStorageInfo := Storage.FetchTorrentFromStorage(db, TempHash.String()) //pulling the single torrent info from storage () //Handling deleted torrents here
if singleTorrentFromStorage.TorrentStatus == "Dropped" { if singleTorrentFromStorage.TorrentStatus == "Dropped" {
Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting just the torrent") Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting just the torrent")
DeleteTorrentFromQueues(singleTorrentFromStorage.Hash, db)
singleTorrent.Drop() singleTorrent.Drop()
Storage.DelTorrentLocalStorage(db, singleTorrentFromStorage.Hash) Storage.DelTorrentLocalStorage(db, singleTorrentFromStorage.Hash)
} }
if singleTorrentFromStorage.TorrentStatus == "DroppedData" { if singleTorrentFromStorage.TorrentStatus == "DroppedData" {
Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting just the torrent") Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting torrent and data")
singleTorrent.Drop() singleTorrent.Drop()
DeleteTorrentFromQueues(singleTorrentFromStorage.Hash, db)
Storage.DelTorrentLocalStorageAndFiles(db, singleTorrentFromStorage.Hash, Config.TorrentConfig.DataDir) Storage.DelTorrentLocalStorageAndFiles(db, singleTorrentFromStorage.Hash, Config.TorrentConfig.DataDir)
} }
if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder
fullClientDB.SourceType = "Torrent File" fullClientDB.SourceType = "Torrent File"
} else { } else {
fullClientDB.SourceType = "Magnet Link" fullClientDB.SourceType = "Magnet Link"
} }
var TempHash metainfo.Hash
TempHash = singleTorrent.InfoHash()
calculatedTotalSize := CalculateDownloadSize(singleTorrentFromStorage, singleTorrent) calculatedTotalSize := CalculateDownloadSize(singleTorrentFromStorage, singleTorrent)
calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent) calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
TempHash = singleTorrent.InfoHash()
if (calculatedCompletedSize == singleTorrentFromStorage.TorrentSize) && (singleTorrentFromStorage.TorrentMoved == false) { //if we are done downloading and haven't moved torrent yet
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName}).Info("Torrent Completed, moving...")
go MoveAndLeaveSymlink(config, singleTorrent.InfoHash().String(), db, false, "") //can take some time to move file so running this in another thread TODO make this a goroutine and skip this block if the routine is still running
}
fullStruct := singleTorrent.Stats() fullStruct := singleTorrent.Stats()
activePeersString := strconv.Itoa(fullStruct.ActivePeers) //converting to strings activePeersString := strconv.Itoa(fullStruct.ActivePeers) //converting to strings
totalPeersString := fmt.Sprintf("%v", fullStruct.TotalPeers) totalPeersString := fmt.Sprintf("%v", fullStruct.TotalPeers)
fullClientDB.StoragePath = singleTorrentFromStorage.StoragePath fullClientDB.StoragePath = singleTorrentFromStorage.StoragePath
@@ -292,8 +321,8 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
PercentDone := fmt.Sprintf("%.2f", float32(calculatedCompletedSize)/float32(calculatedTotalSize)) PercentDone := fmt.Sprintf("%.2f", float32(calculatedCompletedSize)/float32(calculatedTotalSize))
fullClientDB.TorrentHash = TempHash fullClientDB.TorrentHash = TempHash
fullClientDB.PercentDone = PercentDone fullClientDB.PercentDone = PercentDone
fullClientDB.DataBytesRead = fullStruct.ConnStats.BytesReadData //used for calculations not passed to client calculating up/down speed fullClientDB.DataBytesRead = fullStruct.ConnStats.BytesReadData.Int64() //used for calculations not passed to client calculating up/down speed
fullClientDB.DataBytesWritten = fullStruct.ConnStats.BytesWrittenData //used for calculations not passed to client calculating up/down speed fullClientDB.DataBytesWritten = fullStruct.ConnStats.BytesWrittenData.Int64() //used for calculations not passed to client calculating up/down speed
fullClientDB.ActivePeers = activePeersString + " / (" + totalPeersString + ")" fullClientDB.ActivePeers = activePeersString + " / (" + totalPeersString + ")"
fullClientDB.TorrentHashString = TempHash.String() fullClientDB.TorrentHashString = TempHash.String()
fullClientDB.TorrentName = singleTorrentFromStorage.TorrentName fullClientDB.TorrentName = singleTorrentFromStorage.TorrentName
@@ -307,7 +336,7 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
TempHash := singleTorrent.InfoHash() TempHash := singleTorrent.InfoHash()
if previousElement.TorrentHashString == TempHash.String() { //matching previous to new if previousElement.TorrentHashString == TempHash.String() { //matching previous to new
CalculateTorrentSpeed(singleTorrent, fullClientDB, previousElement, calculatedCompletedSize) CalculateTorrentSpeed(singleTorrent, fullClientDB, previousElement, calculatedCompletedSize)
fullClientDB.TotalUploadedBytes = singleTorrentFromStorage.UploadedBytes + (fullStruct.ConnStats.BytesWrittenData - previousElement.DataBytesWritten) fullClientDB.TotalUploadedBytes = singleTorrentFromStorage.UploadedBytes + (fullStruct.ConnStats.BytesWrittenData.Int64() - previousElement.DataBytesWritten)
} }
} }
} }
@@ -316,7 +345,7 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
fullClientDB.TotalUploadedSize = HumanizeBytes(float32(fullClientDB.TotalUploadedBytes)) fullClientDB.TotalUploadedSize = HumanizeBytes(float32(fullClientDB.TotalUploadedBytes))
fullClientDB.UploadRatio = CalculateUploadRatio(singleTorrent, fullClientDB) //calculate the upload ratio fullClientDB.UploadRatio = CalculateUploadRatio(singleTorrent, fullClientDB) //calculate the upload ratio
CalculateTorrentStatus(singleTorrent, fullClientDB, config, singleTorrentFromStorage, calculatedCompletedSize, calculatedTotalSize) CalculateTorrentStatus(singleTorrent, fullClientDB, config, singleTorrentFromStorage, calculatedCompletedSize, calculatedTotalSize, torrentQueues, db) //add torrents to the queue, remove from queue, etc
tickUpdateStruct.UploadRatio = fullClientDB.UploadRatio tickUpdateStruct.UploadRatio = fullClientDB.UploadRatio
tickUpdateStruct.TorrentSize = calculatedTotalSize tickUpdateStruct.TorrentSize = calculatedTotalSize

View File

@@ -23,6 +23,11 @@ func secondsToMinutes(inSeconds int64) string {
return str return str
} }
//VerifyData just verifies the data of a torrent by hash
func VerifyData(singleTorrent *torrent.Torrent) {
singleTorrent.VerifyData()
}
//MakeRange creates a range of pieces to set their priority based on a file //MakeRange creates a range of pieces to set their priority based on a file
func MakeRange(min, max int) []int { func MakeRange(min, max int) []int {
a := make([]int, max-min+1) a := make([]int, max-min+1)
@@ -69,7 +74,7 @@ func CopyFile(srcFile string, destFile string) { //TODO move this to our importe
} }
//SetFilePriority sets the priorities for all of the files in a torrent //SetFilePriority sets the priorities for all of the files in all of the torrents
func SetFilePriority(t *torrent.Client, db *storm.DB) { func SetFilePriority(t *torrent.Client, db *storm.DB) {
storedTorrents := Storage.FetchAllStoredTorrents(db) storedTorrents := Storage.FetchAllStoredTorrents(db)
for _, singleTorrent := range t.Torrents() { for _, singleTorrent := range t.Torrents() {
@@ -104,7 +109,7 @@ func CalculateTorrentSpeed(t *torrent.Torrent, c *ClientDB, oc ClientDB, complet
dt := float32(now.Sub(oc.UpdatedAt)) // get the delta time length between now and last updated dt := float32(now.Sub(oc.UpdatedAt)) // get the delta time length between now and last updated
db := float32(bytes - oc.BytesCompleted) //getting the delta bytes db := float32(bytes - oc.BytesCompleted) //getting the delta bytes
rate := db * (float32(time.Second) / dt) // converting into seconds rate := db * (float32(time.Second) / dt) // converting into seconds
dbU := float32(bytesUpload - oc.DataBytesWritten) dbU := float32(bytesUpload.Int64() - oc.DataBytesWritten)
rateUpload := dbU * (float32(time.Second) / dt) rateUpload := dbU * (float32(time.Second) / dt)
if rate >= 0 { if rate >= 0 {
rateMB := rate / 1024 / 1024 //creating MB to calculate ETA rateMB := rate / 1024 / 1024 //creating MB to calculate ETA
@@ -179,18 +184,238 @@ func CalculateUploadRatio(t *torrent.Torrent, c *ClientDB) string {
return uploadRatio return uploadRatio
} }
//CalculateTorrentStatus is used to determine what the STATUS column of the frontend will display ll2 //StopTorrent stops the torrent, updates the database and sends a message. Since stoptorrent is called by each loop (individually) no need to call an array
func CalculateTorrentStatus(t *torrent.Torrent, c *ClientDB, config Settings.FullClientSettings, tFromStorage *storage.TorrentLocal, bytesCompleted int64, totalSize int64) { func StopTorrent(singleTorrent *torrent.Torrent, torrentLocalStorage *Storage.TorrentLocal, db *storm.DB) {
if (tFromStorage.TorrentStatus == "Stopped") || (float64(c.TotalUploadedBytes)/float64(bytesCompleted) >= config.SeedRatioStop && tFromStorage.TorrentUploadLimit == true) { //If storage shows torrent stopped or if it is over the seeding ratio AND is under the global limit if torrentLocalStorage.TorrentStatus == "Stopped" { //if we are already stopped
c.Status = "Stopped" Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Already Stopped, returning...")
c.MaxConnections = 0 return
t.SetMaxEstablishedConns(0) }
torrentLocalStorage.TorrentStatus = "Stopped"
torrentLocalStorage.MaxConnections = 0
singleTorrent.SetMaxEstablishedConns(0)
DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db)
Storage.UpdateStorageTick(db, *torrentLocalStorage)
CreateServerPushMessage(ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "success", Payload: "Torrent Stopped!"}, Conn)
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Stopped Success!")
}
} else { //Only has 2 states in storage, stopped or running, so we know it should be running, and the websocket request handled updating the database with connections and status //AddTorrentToForceStart forces torrent to be high priority on start
func AddTorrentToForceStart(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for index, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active remove from active
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:index], torrentQueues.ActiveTorrents[index+1:]...)
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //Removing from the queued torrents if in queued torrents
if queuedTorrentHash == singleTorrent.InfoHash().String() {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
singleTorrent.NewReader()
singleTorrent.SetMaxEstablishedConns(80)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents, singleTorrent.InfoHash().String())
torrentLocalStorage.TorrentStatus = "ForceStart"
torrentLocalStorage.MaxConnections = 80
for _, file := range singleTorrent.Files() {
for _, sentFile := range torrentLocalStorage.TorrentFilePriority {
if file.DisplayPath() == sentFile.TorrentFilePath {
switch sentFile.TorrentFilePriority {
case "High":
file.SetPriority(torrent.PiecePriorityHigh)
case "Normal":
file.SetPriority(torrent.PiecePriorityNormal)
case "Cancel":
file.SetPriority(torrent.PiecePriorityNone)
default:
file.SetPriority(torrent.PiecePriorityNormal)
}
}
}
}
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to ForceStart Queue")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
Storage.UpdateQueues(db, torrentQueues)
}
//AddTorrentToActive adds a torrent to the active slice
func AddTorrentToActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
if torrentLocalStorage.TorrentStatus == "Stopped" {
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent set as stopped, skipping add")
return
}
for _, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active skip
return
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //Removing from the queued torrents if in queued torrents
if queuedTorrentHash == singleTorrent.InfoHash().String() {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
singleTorrent.NewReader()
singleTorrent.SetMaxEstablishedConns(80)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents, singleTorrent.InfoHash().String())
torrentLocalStorage.TorrentStatus = "Running"
torrentLocalStorage.MaxConnections = 80
for _, file := range singleTorrent.Files() {
for _, sentFile := range torrentLocalStorage.TorrentFilePriority {
if file.DisplayPath() == sentFile.TorrentFilePath {
switch sentFile.TorrentFilePriority {
case "High":
file.SetPriority(torrent.PiecePriorityHigh)
case "Normal":
file.SetPriority(torrent.PiecePriorityNormal)
case "Cancel":
file.SetPriority(torrent.PiecePriorityNone)
default:
file.SetPriority(torrent.PiecePriorityNormal)
}
}
}
}
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to Active Queue (Manual Call)")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
Storage.UpdateQueues(db, torrentQueues)
}
//RemoveTorrentFromActive forces a torrent to be removed from the active list if the max limit is already there and user forces a new torrent to be added
func RemoveTorrentFromActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for x, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() {
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents, torrentHash)
torrentLocalStorage.TorrentStatus = "Queued"
torrentLocalStorage.MaxConnections = 0
singleTorrent.SetMaxEstablishedConns(0)
Storage.UpdateQueues(db, torrentQueues)
//AddTorrentToQueue(torrentLocalStorage, singleTorrent, db) //Adding the lasttorrent from active to queued
Storage.UpdateStorageTick(db, *torrentLocalStorage)
}
}
}
//DeleteTorrentFromQueues deletes the torrent from all queues (for a stop or delete action)
func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for x, torrentHashActive := range torrentQueues.ActiveTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.
if torrentHash == torrentHashActive {
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)
Logger.Info("Removing Torrent from Active: ", torrentHash)
}
}
for x, torrentHashQueued := range torrentQueues.QueuedTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.
if torrentHash == torrentHashQueued {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)
Logger.Info("Removing Torrent from Queued", torrentHash)
}
}
for x, torrentHashActive := range torrentQueues.ForcedTorrents { //FOR EXTRA CAUTION deleting it from all queues in case a mistake occurred.
if torrentHash == torrentHashActive {
torrentQueues.ForcedTorrents = append(torrentQueues.ForcedTorrents[:x], torrentQueues.ForcedTorrents[x+1:]...)
Logger.Info("Removing Torrent from Forced: ", torrentHash)
}
}
Storage.UpdateQueues(db, torrentQueues)
Logger.WithFields(logrus.Fields{"Torrent Hash": torrentHash, "TorrentQueues": torrentQueues}).Info("Removing Torrent from all Queues")
}
//AddTorrentToQueue adds a torrent to the queue
func AddTorrentToQueue(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for _, torrentHash := range torrentQueues.QueuedTorrents {
if singleTorrent.InfoHash().String() == torrentHash { //don't add duplicate to que but do everything else (TODO, maybe find a better way?)
singleTorrent.SetMaxEstablishedConns(0)
torrentLocalStorage.MaxConnections = 0
torrentLocalStorage.TorrentStatus = "Queued"
Logger.WithFields(logrus.Fields{"TorrentName": torrentLocalStorage.TorrentName}).Info("Adding torrent to the queue, not active")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
return
}
}
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents, singleTorrent.InfoHash().String())
singleTorrent.SetMaxEstablishedConns(0)
torrentLocalStorage.MaxConnections = 0
torrentLocalStorage.TorrentStatus = "Queued"
Logger.WithFields(logrus.Fields{"TorrentName": torrentLocalStorage.TorrentName}).Info("Adding torrent to the queue, not active")
Storage.UpdateQueues(db, torrentQueues)
Storage.UpdateStorageTick(db, *torrentLocalStorage)
}
//RemoveDuplicatesFromQueues removes any duplicates from torrentQueues.QueuedTorrents (which will happen if it is read in from DB)
func RemoveDuplicatesFromQueues(db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for _, torrentHash := range torrentQueues.ActiveTorrents {
for i, queuedHash := range torrentQueues.QueuedTorrents {
if torrentHash == queuedHash {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:i], torrentQueues.QueuedTorrents[i+1:]...)
}
}
}
Storage.UpdateQueues(db, torrentQueues)
}
//ValidateQueues is a sanity check that runs every tick to make sure the queues are in order... tried to avoid this but seems to be required
func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {
torrentQueues := Storage.FetchQueues(db)
for len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {
removeTorrent := torrentQueues.ActiveTorrents[:1]
for _, singleTorrent := range tclient.Torrents() {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
singleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])
RemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)
}
}
}
torrentQueues = Storage.FetchQueues(db)
for _, singleTorrent := range tclient.Torrents() {
singleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
if singleTorrentFromStorage.TorrentStatus == "Stopped" {
continue
}
for _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one
if singleTorrent.InfoHash().String() == queuedTorrent {
if singleTorrent.BytesMissing() > 0 {
for _, activeTorrent := range torrentQueues.ActiveTorrents {
for _, singleActiveTorrent := range tclient.Torrents() {
if activeTorrent == singleActiveTorrent.InfoHash().String() {
if singleActiveTorrent.Seeding() == true {
singleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)
Logger.WithFields(logrus.Fields{"TorrentName": singleActiveTFS.TorrentName}).Info("Seeding, Removing from active to add queued")
RemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)
singleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)
Logger.WithFields(logrus.Fields{"TorrentName": singleQueuedTFS.TorrentName}).Info("Adding torrent to the queue, not active")
AddTorrentToActive(&singleQueuedTFS, singleTorrent, db)
}
}
}
}
}
}
}
}
}
//CalculateTorrentStatus is used to determine what the STATUS column of the frontend will display ll2
func CalculateTorrentStatus(t *torrent.Torrent, c *ClientDB, config Settings.FullClientSettings, tFromStorage *storage.TorrentLocal, bytesCompleted int64, totalSize int64, torrentQueues Storage.TorrentQueues, db *storm.DB) {
if tFromStorage.TorrentStatus == "Stopped" {
c.Status = "Stopped"
return
}
//Only has 2 states in storage, stopped or running, so we know it should be running, and the websocket request handled updating the database with connections and status
for _, torrentHash := range torrentQueues.QueuedTorrents {
if tFromStorage.Hash == torrentHash {
c.Status = "Queued"
return
}
}
bytesMissing := totalSize - bytesCompleted bytesMissing := totalSize - bytesCompleted
c.MaxConnections = 80 c.MaxConnections = 80
t.SetMaxEstablishedConns(80) t.SetMaxEstablishedConns(80)
//t.DownloadAll() //ensure that we are setting the torrent to download
if t.Seeding() && t.Stats().ActivePeers > 0 && bytesMissing == 0 { if t.Seeding() && t.Stats().ActivePeers > 0 && bytesMissing == 0 {
c.Status = "Seeding" c.Status = "Seeding"
} else if t.Stats().ActivePeers > 0 && bytesMissing > 0 { } else if t.Stats().ActivePeers > 0 && bytesMissing > 0 {
@@ -202,5 +427,4 @@ func CalculateTorrentStatus(t *torrent.Torrent, c *ClientDB, config Settings.Ful
} else { } else {
c.Status = "Unknown" c.Status = "Unknown"
} }
}
} }

30
go.mod Normal file
View File

@@ -0,0 +1,30 @@
module github.com/deranjer/goTorrent
go 1.12
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/DataDog/zstd v1.3.5 // indirect
github.com/PuerkitoBio/goquery v1.5.0 // indirect
github.com/Sereal/Sereal v0.0.0-20190226181601-237c2cca198f // indirect
github.com/anacrolix/dht v1.0.1
github.com/anacrolix/torrent v1.1.1
github.com/asdine/storm v2.1.2+incompatible
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/golang/protobuf v1.3.1 // indirect
github.com/gorilla/handlers v1.4.0
github.com/gorilla/mux v1.7.0
github.com/gorilla/websocket v1.4.0
github.com/mitsuse/pushbullet-go v0.1.0
github.com/mmcdole/gofeed v1.0.0-beta2
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf // indirect
github.com/otiai10/copy v1.0.1
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 // indirect
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967
github.com/sirupsen/logrus v1.4.0
github.com/spf13/viper v1.3.2
github.com/vmihailenco/msgpack v4.0.3+incompatible // indirect
go.etcd.io/bbolt v1.3.2 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/appengine v1.5.0 // indirect
)

230
go.sum Normal file
View File

@@ -0,0 +1,230 @@
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
bou.ke/monkey v1.0.1 h1:zEMLInw9xvNakzUUPjfS4Ds6jYPqCFx3m7bRmG5NH2U=
bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14=
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78=
github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
github.com/Sereal/Sereal v0.0.0-20190226181601-237c2cca198f h1:99C4f5FJQChWyzMSpZPU4eUv3kjFmjxyWy8t2rlbUcs=
github.com/Sereal/Sereal v0.0.0-20190226181601-237c2cca198f/go.mod h1:D0JMgToj/WdxCgd30Kc1UcA9E+WdZoJqeVOuYW7iTBM=
github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI=
github.com/anacrolix/dht v0.0.0-20181129074040-b09db78595aa/go.mod h1:Ayu4t+5TsHQ07/P8XzRJqVofv7lU4R1ZTT7KW5+SPFA=
github.com/anacrolix/dht v1.0.1 h1:a7zVMiZWfPiToAUbjMZYeI3UvmsDP3j8vH5EDIAjM9c=
github.com/anacrolix/dht v1.0.1/go.mod h1:dtcIktBFD8YD/7ZcE5nQuuGGfLxcwa8+18mHl+GU+KA=
github.com/anacrolix/dht/v2 v2.0.1 h1:gOHJ+OKqJ4Eb48OYStZm4AlWr1/nSA2TWlzb/+t36SA=
github.com/anacrolix/dht/v2 v2.0.1/go.mod h1:GbTT8BaEtfqab/LPd5tY41f3GvYeii3mmDUK300Ycyo=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa h1:xCaATLKmn39QqLs3tUZYr6eKvezJV+FYvVOLTklxK6U=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E=
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05 h1:Zoniih3jyqtr3I0xFoMvw1USWpg+CbI/zOrcLudr0lc=
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05/go.mod h1:POY/GPlrFKRxnOKH1sGAB+NBWMoP+sI+hHJxgcgWbWw=
github.com/anacrolix/log v0.0.0-20180412014343-2323884b361d/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
github.com/anacrolix/log v0.1.0/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
github.com/anacrolix/log v0.2.0 h1:LzaW6XTEk2zcmLZkcZPkJ2mDdnZkOdOTeBH7Kt81ouU=
github.com/anacrolix/log v0.2.0/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk=
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
github.com/anacrolix/missinggo v0.0.0-20181129073415-3237bf955fed/go.mod h1:IN+9GUe7OxKMIs/XeXEbT/rMUolmJzmlZiXHS7FwD/Y=
github.com/anacrolix/missinggo v0.2.1-0.20190310234110-9fbdc9f242a8/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/missinggo v1.1.0 h1:0lZbaNa6zTR1bELAIzCNmRGAtkHuLDPJqTiTtXoAIx8=
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0 h1:Fa1XqqLW62lQzEDlNA+QcdJbkfJcxQN0YC8983kj5tU=
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc=
github.com/anacrolix/sync v0.0.0-20171108081538-eee974e4f8c1/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778 h1:XpCDEixzXOB8yaTW/4YBzKrJdMcFI0DzpPTYNv75wzk=
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/tagflag v0.0.0-20180803105420-3a8ff5428f76/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb/go.mod h1:3vcFVxgOASslNXHdivT8spyMRBanMCenHRpe0u5vpBs=
github.com/anacrolix/torrent v1.0.1/go.mod h1:ZYV1Z2Wx3jXYSh26mDvneAbk8XIUxfvoVil2GW962zY=
github.com/anacrolix/torrent v1.1.1 h1:f54cvN3950x72hOB8UvzRwEbF5AY3VMj4vPyntgt24Q=
github.com/anacrolix/torrent v1.1.1/go.mod h1:XdYEuC3KuxFQZrQ6iUBXnwKr3IyxeyUlVH6RT8FhyaU=
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572 h1:kpt6TQTVi6gognY+svubHfxxpq0DLU9AfTQyZVc3UOc=
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asdine/storm v2.1.2+incompatible h1:dczuIkyqwY2LrtXPz8ixMrU/OFgZp71kbKTHGrXYt/Q=
github.com/asdine/storm v2.1.2+incompatible/go.mod h1:RarYDc9hq1UPLImuiXK3BIWPJLdIygvV3PsInK0FbVQ=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c h1:FUUopH4brHNO2kJoNN3pV+OBEYmgraLT/KHZrMM69r0=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elgatito/upnp v0.0.0-20180711183757-2f244d205f9a h1:2Zw3pxDRTs4nX1WCLAEm27UN0hvjZSge7EaUUQexRZw=
github.com/elgatito/upnp v0.0.0-20180711183757-2f244d205f9a/go.mod h1:afkYpY8JAIL4341N7Zj9xJ5yTovsg6BkWfBFlCzIoF4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4=
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e h1:SiEs4J3BKVIeaWrH3tKaz3QLZhJ68iJ/A4xrzIoE5+Y=
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9 h1:Z0f701LpR4dqO92bP6TnIe3ZURClzJtBhds8R8u1HBE=
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.4.0 h1:XulKRWSQK5uChr4pEgSE4Tc/OcmnU9GJuSwdog/tZsA=
github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8=
github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/ipfs/go-ipfs v0.4.18/go.mod h1:iXzbK+Wa6eePj3jQg/uY6Uoq5iOwY+GToD/bgaRadto=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-sqlite3 v1.7.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitsuse/pushbullet-go v0.1.0 h1:W9izHOpz8uilRBgbYSnqb+LZK/l8Ad4slRTCBFpItG0=
github.com/mitsuse/pushbullet-go v0.1.0/go.mod h1:sJ6Y3IROSfSQNLY/8gtYjq4Gs49DFnrxaqxQA6DVgnM=
github.com/mmcdole/gofeed v1.0.0-beta2 h1:CjQ0ADhAwNSb08zknAkGOEYqr8zfZKfrzgk9BxpWP2E=
github.com/mmcdole/gofeed v1.0.0-beta2/go.mod h1:/BF9JneEL2/flujm8XHoxUcghdTV6vvb3xx/vKyChFU=
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf h1:sWGE2v+hO0Nd4yFU/S/mDBM5plIU8v/Qhfz41hkDIAI=
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf/go.mod h1:pasqhqstspkosTneA62Nc+2p9SOBBYAPbnmRRWPQ0V8=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/otiai10/copy v1.0.1 h1:gtBjD8aq4nychvRZ2CyJvFWAw0aja+VHazDdruZKGZA=
github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/mint v1.2.3 h1:PsrRBmrxR68kyNu6YlqYHbNlItc5vOkuS6LBEsNttVA=
github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967 h1:x7xEyJDP7Hv3LVgvWhzioQqbC/KtuUhTigKlH/8ehhE=
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac h1:wbW+Bybf9pXxnCFAOWZTqkRjAc7rAIwo2e1ArUhiHxg=
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/vmihailenco/msgpack v4.0.3+incompatible h1:g+G529Dqo4BY2Gxn5GKENa/3NVK+mu/6hM7G3jEWszQ=
github.com/vmihailenco/msgpack v4.0.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b h1:ZWpVMTsK0ey5WJCu+vVdfMldWq7/ezaOcjnKWIHWVkE=
golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca h1:o2TLx1bGN3W+Ei0EMU5fShLupLmTOU95KvJJmfYhAzM=
golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -21,6 +21,7 @@ let serverMessage = [];
let serverPushMessage = []; let serverPushMessage = [];
let webSocketState = false; let webSocketState = false;
let settingsFile = []; let settingsFile = [];
let tokenReturn = "";
var torrentListRequest = { var torrentListRequest = {
MessageType: "torrentListRequest" MessageType: "torrentListRequest"
@@ -29,7 +30,7 @@ var torrentListRequest = {
//websocket is started in kickwebsocket.js and is picked up here so "ws" is already defined 22 //websocket is started in kickwebsocket.js and is picked up here so "ws" is already defined
ws.onmessage = function (evt) { //When we recieve a message from the websocket ws.onmessage = function (evt) { //When we recieve a message from the websocket
var serverMessage = JSON.parse(evt.data) var serverMessage = JSON.parse(evt.data)
console.log("message", serverMessage.MessageType) console.log("message", serverMessage.MessageType)
@@ -134,6 +135,10 @@ ws.onmessage = function (evt) { //When we recieve a message from the websocket
settingsFile = []; settingsFile = [];
console.log("Settings File Returned", serverMessage) console.log("Settings File Returned", serverMessage)
settingsFile = serverMessage.Config settingsFile = serverMessage.Config
case "TokenReturn":
tokenReturn = serverMessage.TokenReturn
console.log("Token Returned", serverMessage)
} }
} }
@@ -222,10 +227,15 @@ class BackendSocket extends React.Component {
console.log("PROPSSERVER", this.props.serverPushMessage, "SERVERPUSH", serverPushMessage) console.log("PROPSSERVER", this.props.serverPushMessage, "SERVERPUSH", serverPushMessage)
this.props.newServerMessage(serverPushMessage) this.props.newServerMessage(serverPushMessage)
} }
if (this.props.settingsModalOpen) { //TODO don't really need to updaate every tick currently until we can edit config if (this.props.settingsModalOpen) { //TODO don't really need to update every tick currently until we can edit config
this.props.newSettingsFile(settingsFile) this.props.newSettingsFile(settingsFile)
} }
if (tokenReturn != ""){ //If we get a return token
console.log("Dispatching token return", tokenReturn)
this.props.newTokenReturn(tokenReturn)
}
ws.send(JSON.stringify(torrentListRequest))//talking to the server to get the torrent list ws.send(JSON.stringify(torrentListRequest))//talking to the server to get the torrent list
if (ws.readyState === ws.CLOSED){ //if our websocket gets closed inform the user if (ws.readyState === ws.CLOSED){ //if our websocket gets closed inform the user
webSocketState = false webSocketState = false
@@ -264,6 +274,9 @@ class BackendSocket extends React.Component {
if (nextProps.selectionHashes.length === 1){ //if we have a selection pass it on for the tabs to verify if (nextProps.selectionHashes.length === 1){ //if we have a selection pass it on for the tabs to verify
this.selectionHandler(nextProps.selectionHashes, nextProps.selectedTab) this.selectionHandler(nextProps.selectionHashes, nextProps.selectedTab)
} }
if (nextProps.tokenReturn != this.props.tokenReturn){ //clearing out the token if we switch from the API tab
tokenReturn = nextProps.tokenReturn
}
} }
@@ -287,6 +300,7 @@ const mapStateToProps = state => {
RSSTorrentList: state.RSSTorrentList, RSSTorrentList: state.RSSTorrentList,
serverPushMessage: state.serverPushMessage, serverPushMessage: state.serverPushMessage,
settingsModalOpen: state.settingsModalOpen, settingsModalOpen: state.settingsModalOpen,
tokenReturn: state.tokenReturn,
}; };
} }
@@ -301,9 +315,8 @@ const mapDispatchToProps = dispatch => {
RSSTorrentList: (RSSTorrentList) => dispatch({type: actionTypes.RSS_TORRENT_LIST, RSSTorrentList}), RSSTorrentList: (RSSTorrentList) => dispatch({type: actionTypes.RSS_TORRENT_LIST, RSSTorrentList}),
newServerMessage: (serverPushMessage) => dispatch({type: actionTypes.SERVER_MESSAGE, serverPushMessage}), newServerMessage: (serverPushMessage) => dispatch({type: actionTypes.SERVER_MESSAGE, serverPushMessage}),
webSocketStateUpdate: (webSocketState) => dispatch({type: actionTypes.WEBSOCKET_STATE, webSocketState}), webSocketStateUpdate: (webSocketState) => dispatch({type: actionTypes.WEBSOCKET_STATE, webSocketState}),
newSettingsFile: (settingsFile) => dispatch({type: actionTypes.NEW_SETTINGS_FILE, settingsFile}) newSettingsFile: (settingsFile) => dispatch({type: actionTypes.NEW_SETTINGS_FILE, settingsFile}),
//changeSelection: (selection) => dispatch({type: actionTypes.CHANGE_SELECTION, selection}),//forcing an update to the buttons newTokenReturn: (tokenReturn) => dispatch({type: actionTypes.TOKEN_RETURN, tokenReturn}),
} }
} }

View File

@@ -3,14 +3,18 @@ import ReactDOM from 'react-dom';
import { withStyles } from 'material-ui/styles'; import { withStyles } from 'material-ui/styles';
import Paper from 'material-ui/Paper'; import Paper from 'material-ui/Paper';
import Grid from 'material-ui/Grid'; import Grid from 'material-ui/Grid';
import Button from 'material-ui/Button';
import TextField from 'material-ui/TextField';
import {connect} from 'react-redux'; import {connect} from 'react-redux';
import * as actionTypes from '../../../../store/actions';
const styles = theme => ({ const styles = theme => ({
root: { root: {
flexGrow: 1, flexGrow: 1,
marginTop: 0, marginTop: 0,
padding: 10,
}, },
paper: { paper: {
padding: 16, padding: 16,
@@ -23,19 +27,48 @@ const styles = theme => ({
}); });
class APISettingsTab extends React.PureComponent { class APISettingsTab extends React.Component {
state = {
clientName: "",
};
requestNewKey = (keyName) => { generateKey = (event) => {
let newAuthTokenRequest = {
MessageType: "newAuthToken",
Payload: {"ClientName": this.state.clientName}
}
console.log("Sending New Auth Request: ", newAuthTokenRequest);
ws.send(JSON.stringify(newAuthTokenRequest));
this.setState({clientName: ""})
} }
setClientName = (event) => {
this.setState({clientName: event.target.value})
}
componentWillUnmount = () => {
this.props.newTokenReturn("")
}
render() { render() {
const { classes } = this.props; const { classes } = this.props;
return ( return (
<div className={classes.root}> <div className={classes.root}>
Not yet implemented! <TextField style ={{width: '50%', paddingRight: '10px'}} id="clientName" type="text" label="Client Name" placeholder="Client Name associated with the key" onChange={this.setClientName} />
<Button variant="raised" color="primary" onClick={this.generateKey}>
Generate Key
</Button>
<Paper style = {{padding: '10px'}}> <span className={classes.floatLeft}>{this.props.tokenReturn} </span></Paper>
<Grid container spacing={16}>
<Grid item xs={12} sm={4}>
</Grid>
</Grid>
</div> </div>
); );
} }
@@ -44,9 +77,15 @@ class APISettingsTab extends React.PureComponent {
const mapStateToProps = state => { const mapStateToProps = state => {
return { return {
settingsFile: state.settingsFile, tokenReturn: state.tokenReturn,
}; };
} }
export default withStyles(styles)(connect(mapStateToProps)(APISettingsTab)) const mapDispatchToProps = dispatch => {
return {
newTokenReturn: (tokenReturn) => dispatch({type: actionTypes.TOKEN_RETURN, tokenReturn}),
}
}
export default withStyles(styles)(connect(mapStateToProps, mapDispatchToProps)(APISettingsTab))

View File

@@ -35,6 +35,7 @@ class ClientSettingsTab extends React.PureComponent {
<Paper className={classes.paper}>HTTP Port: <span className={classes.floatRight}>{this.props.settingsFile["HTTPAddr"]} </span> </Paper> <Paper className={classes.paper}>HTTP Port: <span className={classes.floatRight}>{this.props.settingsFile["HTTPAddr"]} </span> </Paper>
<Paper className={classes.paper}>Use Proxy: <span className={classes.floatRight}>{String(this.props.settingsFile["UseProxy"])} </span> </Paper> <Paper className={classes.paper}>Use Proxy: <span className={classes.floatRight}>{String(this.props.settingsFile["UseProxy"])} </span> </Paper>
<Paper className={classes.paper}>Base URL: <span className={classes.floatRight}>{this.props.settingsFile["BaseURL"]} </span> </Paper> <Paper className={classes.paper}>Base URL: <span className={classes.floatRight}>{this.props.settingsFile["BaseURL"]} </span> </Paper>
<Paper className={classes.paper}>Max Active Torrents: <span className={classes.floatRight}>{this.props.settingsFile["MaxActiveTorrents"]} </span> </Paper>
</Grid> </Grid>

View File

@@ -53,7 +53,7 @@ class LoggingSettingsTab extends React.Component {
<Grid container spacing={8}> <Grid container spacing={8}>
<Grid item xs={12} sm={4}> <Grid item xs={12} sm={4}>
<Paper className={classes.paper}>Logging Output: <span className={classes.floatRight}>{this.props.settingsFile["LoggingOutput"]} </span></Paper> <Paper className={classes.paper}>Logging Output: <span className={classes.floatRight}>{this.props.settingsFile["LoggingOutput"]} </span></Paper>
<Paper className={classes.paper}>Logging Level: <span className={classes.floatRight}>{logLevel} </span> </Paper> <Paper className={classes.paper}>Logging Level: <span className={classes.floatRight}>{this.props.settingsFile["LoggingLevel"]} </span> </Paper>
</Grid> </Grid>

View File

@@ -15,3 +15,4 @@ export const NEW_SETTINGS_FILE = 'NEW_SETTINGS_FILE';
export const RSS_TORRENT_LIST = 'RSS_TORRENT_LIST'; export const RSS_TORRENT_LIST = 'RSS_TORRENT_LIST';
export const SERVER_MESSAGE = 'SERVER_MESSAGE'; export const SERVER_MESSAGE = 'SERVER_MESSAGE';
export const WEBSOCKET_STATE = 'WEBSOCKET_STATE'; export const WEBSOCKET_STATE = 'WEBSOCKET_STATE';
export const TOKEN_RETURN = 'TOKEN_RETURN';

View File

@@ -130,6 +130,13 @@ const reducer = (state = initialState, action) => {
serverPushMessage: action.serverPushMessage serverPushMessage: action.serverPushMessage
} }
case actionTypes.TOKEN_RETURN:
console.log("New token return", action.tokenReturn)
return {
... state,
tokenReturn: action.tokenReturn
}
case actionTypes.SET_BUTTON_STATE: case actionTypes.SET_BUTTON_STATE:
if (action.buttonState.length === 0) { //if selection is empty buttons will be default and selectionHashes will be blanked out and pushed to redux if (action.buttonState.length === 0) { //if selection is empty buttons will be default and selectionHashes will be blanked out and pushed to redux
let buttonStateFinal = state.buttonStateDefault //if no selection dispatch that to redux let buttonStateFinal = state.buttonStateDefault //if no selection dispatch that to redux
@@ -144,10 +151,9 @@ const reducer = (state = initialState, action) => {
selectedRows.push(state.torrentList[element]) //pushing the selected rows out of torrentlist selectedRows.push(state.torrentList[element]) //pushing the selected rows out of torrentlist
}); });
let buttonStateTest = selectedRows.filter(element => {
let buttonStateTest = selectedRows.filter(element => { //TODO fix this bad mess... we literally just need to filter for stopped and go from there
let result = [] let result = []
if (element.Status === "Downloading" || element.Status === "Awaiting Peers" || element.Status === "Seeding" || element.Status === "Completed"){ if (element.Status === "Downloading" || element.Status === "Awaiting Peers" || element.Status === "Seeding" || element.Status === "Completed" || element.Status === "Queued"){
result.push(element.Status) result.push(element.Status)
return result return result
} }
@@ -157,7 +163,6 @@ const reducer = (state = initialState, action) => {
if (buttonStateTest.length > 0 && buttonStateTest2.length === 0){ if (buttonStateTest.length > 0 && buttonStateTest2.length === 0){
let buttonStateFinal = [{startButton: "default", stopButton: "primary", deleteButton: "secondary", fSeedButton: "default", fRecheckButton: "primary"}] let buttonStateFinal = [{startButton: "default", stopButton: "primary", deleteButton: "secondary", fSeedButton: "default", fRecheckButton: "primary"}]
console.log("ButtonStateFil")
return { return {
...state, ...state,
buttonState: buttonStateFinal buttonState: buttonStateFinal

View File

@@ -136,7 +136,7 @@ class TorrentListTable extends React.Component {
<TableColumnReordering order={this.state.columnOrder} onOrderChange={this.changeColumnOrder} /> <TableColumnReordering order={this.state.columnOrder} onOrderChange={this.changeColumnOrder} />
<IntegratedSelection /> <IntegratedSelection />
<TableSelection selectByRowClick highlightSelected showSelectAll /> <TableSelection selectByRowClick highlightSelected showSelectAll />
<TableHeaderRow allowSorting allowResizing allowDragging /> <TableHeaderRow showSortingControls allowSorting allowResizing allowDragging />
</Grid> </Grid>
</Paper> </Paper>
); );

153
main.go
View File

@@ -33,6 +33,7 @@ var (
//Authenticated stores the value of the result of the client that connects to the server //Authenticated stores the value of the result of the client that connects to the server
Authenticated = false Authenticated = false
APP_ID = os.Getenv("APP_ID") APP_ID = os.Getenv("APP_ID")
sendJSON = make(chan interface{}) //channel for JSON messages
) )
var upgrader = websocket.Upgrader{ var upgrader = websocket.Upgrader{
@@ -48,10 +49,17 @@ func serveHome(w http.ResponseWriter, r *http.Request) {
s1.ExecuteTemplate(w, "base", map[string]string{"APP_ID": APP_ID}) s1.ExecuteTemplate(w, "base", map[string]string{"APP_ID": APP_ID})
} }
//HandleMessages creates a queue of JSON messages from the client and executes them in order
func handleMessages(conn *websocket.Conn) {
for {
msgJSON := <-sendJSON
conn.WriteJSON(msgJSON)
}
}
func handleAuthentication(conn *websocket.Conn, db *storm.DB) { func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
msg := Engine.Message{} msg := Engine.Message{}
err := conn.ReadJSON(&msg) err := conn.ReadJSON(&msg)
conn.WriteJSON(msg) //TODO just for testing, remove
payloadData, ok := msg.Payload.(map[string]interface{}) payloadData, ok := msg.Payload.(map[string]interface{})
clientAuthToken, tokenOk := payloadData["ClientAuthString"].(string) clientAuthToken, tokenOk := payloadData["ClientAuthString"].(string)
fmt.Println("ClientAuthToken:", clientAuthToken, "TokenOkay", tokenOk, "PayloadData", payloadData, "PayloadData Okay?", ok) fmt.Println("ClientAuthToken:", clientAuthToken, "TokenOkay", tokenOk, "PayloadData", payloadData, "PayloadData Okay?", ok)
@@ -65,6 +73,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": clientAuthToken}).Error("Unable to read authentication message") Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": clientAuthToken}).Error("Unable to read authentication message")
} }
fmt.Println("Authstring", clientAuthToken) fmt.Println("Authstring", clientAuthToken)
//clientAuthToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnROYW1lIjoiZ29Ub3JyZW50V2ViVUkiLCJpc3MiOiJnb1RvcnJlbnRTZXJ2ZXIifQ.Lfqp9tm06CY4XfrqnNDeVLkq9c7rsbibDrUdPko8ffQ"
signingKeyStruct := Storage.FetchJWTTokens(db) signingKeyStruct := Storage.FetchJWTTokens(db)
singingKey := signingKeyStruct.SigningKey singingKey := signingKeyStruct.SigningKey
token, err := jwt.Parse(clientAuthToken, func(token *jwt.Token) (interface{}, error) { token, err := jwt.Parse(clientAuthToken, func(token *jwt.Token) (interface{}, error) {
@@ -77,6 +86,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Parsing of Token failed, ensure you have the correct token! Closing Connection"} authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Parsing of Token failed, ensure you have the correct token! Closing Connection"}
conn.WriteJSON(authFail) conn.WriteJSON(authFail)
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": token}).Error("Unable to parse token!") Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": token}).Error("Unable to parse token!")
fmt.Println("ENTIRE SUPPLIED TOKEN:", token, "CLIENTAUTHTOKEN", clientAuthToken)
conn.Close() conn.Close()
return return
} }
@@ -86,7 +96,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
fmt.Println("Claims", claims["ClientName"], claims["Issuer"]) fmt.Println("Claims", claims["ClientName"], claims["Issuer"])
Authenticated = true Authenticated = true
} else { } else {
Logger.WithFields(logrus.Fields{"error": err}).Error("Authentication Error occured, cannot complete!") Logger.WithFields(logrus.Fields{"error": err}).Error("Authentication Error occurred, cannot complete!")
} }
} }
@@ -94,6 +104,7 @@ func main() {
Engine.Logger = Logger //Injecting the logger into all the packages Engine.Logger = Logger //Injecting the logger into all the packages
Storage.Logger = Logger Storage.Logger = Logger
Settings.Logger = Logger Settings.Logger = Logger
var torrentQueues = Storage.TorrentQueues{}
Config := Settings.FullClientSettingsNew() //grabbing from settings.go Config := Settings.FullClientSettingsNew() //grabbing from settings.go
Engine.Config = Config Engine.Config = Config
if Config.LoggingOutput == "file" { if Config.LoggingOutput == "file" {
@@ -103,17 +114,18 @@ func main() {
if err != nil { if err != nil {
fmt.Println("Unable to create 'log' folder for logging.... please check permissions.. forcing output to stdout", err) fmt.Println("Unable to create 'log' folder for logging.... please check permissions.. forcing output to stdout", err)
Logger.Out = os.Stdout Logger.Out = os.Stdout
}
} else { } else {
os.Remove("logs/server.log") //cleanup the old log on every restart os.Remove("logs/server.log") //cleanup the old log on every restart
file, err := os.OpenFile("logs/server.log", os.O_CREATE|os.O_WRONLY, 0755) //creating the log file file, err := os.OpenFile("logs/server.log", os.O_CREATE|os.O_WRONLY, 0755) //creating the log file
defer file.Close() //TODO.. since we write to this constantly how does close work? //defer file.Close() //TODO.. since we write to this constantly how does close work?
if err != nil { if err != nil {
fmt.Println("Unable to create file for logging.... please check permissions.. forcing output to stdout") fmt.Println("Unable to create file for logging.... please check permissions.. forcing output to stdout")
Logger.Out = os.Stdout Logger.Out = os.Stdout
} }
fmt.Println("Logging to file logs/server.log")
Logger.Out = file Logger.Out = file
} }
}
} else { } else {
Logger.Out = os.Stdout Logger.Out = os.Stdout
} }
@@ -122,26 +134,33 @@ func main() {
httpAddr := Config.HTTPAddr httpAddr := Config.HTTPAddr
os.MkdirAll(Config.TFileUploadFolder, 0755) //creating a directory to store uploaded torrent files os.MkdirAll(Config.TFileUploadFolder, 0755) //creating a directory to store uploaded torrent files
os.MkdirAll(Config.TorrentWatchFolder, 0755) //creating a directory to watch for added .torrent files os.MkdirAll(Config.TorrentWatchFolder, 0755) //creating a directory to watch for added .torrent files
//Logger.WithFields(logrus.Fields{"Config": Config}).Info("Torrent Client Config has been generated...") Logger.WithFields(logrus.Fields{"Config": Config}).Info("Torrent Client Config has been generated...")
tclient, err := torrent.NewClient(&Config.TorrentConfig) //pulling out the torrent specific config to use tclient, err := torrent.NewClient(&Config.TorrentConfig) //pulling out the torrent specific config to use
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatalf("Error creating torrent client: %s") Logger.WithFields(logrus.Fields{"error": err}).Fatalf("Error creating torrent client: %s")
} }
fmt.Printf("%+v\n", Config.TorrentConfig) //fmt.Printf("%+v\n", Config.TorrentConfig)
db, err := storm.Open("storage.db") //initializing the boltDB store that contains all the added torrents db, err := storm.Open("storage.db") //initializing the boltDB store that contains all the added torrents
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Error opening/creating storage.db") Logger.WithFields(logrus.Fields{"error": err}).Fatal("Error opening/creating storage.db")
} else {
Logger.WithFields(logrus.Fields{"error": err}).Info("Opening or creating storage.db...")
} }
defer db.Close() //defering closing the database until the program closes defer db.Close() //defering closing the database until the program closes
err = db.One("ID", 5, &torrentQueues)
if err != nil { //Create the torrent que database
Logger.WithFields(logrus.Fields{"error": err}).Info("No Queue database found, assuming first run, creating database")
torrentQueues.ID = 5
db.Save(&torrentQueues)
}
tokens := Storage.IssuedTokensList{} //if first run setting up the authentication tokens tokens := Storage.IssuedTokensList{} //if first run setting up the authentication tokens
var signingKey []byte var signingKey []byte
err = db.One("ID", 3, &tokens) err = db.One("ID", 3, &tokens)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"RSSFeedStore": tokens, "error": err}).Info("No Tokens database found, assuming first run, generating token...") Logger.WithFields(logrus.Fields{"RSSFeedStore": tokens, "error": err}).Info("No Tokens database found, assuming first run, generating token...")
fmt.Println("Error", err)
fmt.Println("MAIN TOKEN: %+v\n", tokens)
tokens.ID = 3 //creating the initial store tokens.ID = 3 //creating the initial store
claims := Settings.GoTorrentClaims{ claims := Settings.GoTorrentClaims{
"goTorrentWebUI", "goTorrentWebUI",
@@ -188,12 +207,14 @@ func main() {
TorrentLocalArray := Storage.FetchAllStoredTorrents(db) //pulling in all the already added torrents - this is an array of ALL of the local storage torrents, they will be added back in via hash TorrentLocalArray := Storage.FetchAllStoredTorrents(db) //pulling in all the already added torrents - this is an array of ALL of the local storage torrents, they will be added back in via hash
if TorrentLocalArray != nil { //the first creation of the running torrent array //since we are adding all of them in we use a coroutine... just allows the web ui to load then it will load in the torrents if TorrentLocalArray != nil { //the first creation of the running torrent array //since we are adding all of them in we use a coroutine... just allows the web ui to load then it will load in the torrents
go Engine.CreateInitialTorrentArray(tclient, TorrentLocalArray, db) //adding all of the stored torrents into the torrent client Engine.CreateInitialTorrentArray(tclient, TorrentLocalArray, db, Config) //adding all of the stored torrents into the torrent client
//TODO add GO to this
} else { } else {
Logger.Info("Database is empty, no torrents loaded") Logger.Info("Database is empty, no torrents loaded")
} }
Engine.CheckTorrentWatchFolder(cronEngine, db, tclient, torrentLocalStorage, Config) //Every 5 minutes the engine will check the specified folder for new .torrent files Engine.CheckTorrentWatchFolder(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) //Every 5 minutes the engine will check the specified folder for new .torrent files
Engine.RefreshRSSCron(cronEngine, db, tclient, torrentLocalStorage, Config) // Refresing the RSS feeds on an hourly basis to add torrents that show up in the RSS feed Engine.RefreshRSSCron(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) // Refresing the RSS feeds on an hourly basis to add torrents that show up in the RSS feed
Engine.CheckTorrentsCron(cronEngine, db, tclient, Config) //Every 30 seconds all torrents are checked to see if queue changes need to be made or they need to be stopped due to ratio
router := mux.NewRouter() //setting up the handler for the web backend router := mux.NewRouter() //setting up the handler for the web backend
router.HandleFunc("/", serveHome) //Serving the main page for our SPA router.HandleFunc("/", serveHome) //Serving the main page for our SPA
@@ -231,6 +252,8 @@ func main() {
Engine.Conn = conn Engine.Conn = conn
Storage.Conn = conn Storage.Conn = conn
go handleMessages(conn) //Starting the message channel to handle all the JSON requests from the client
MessageLoop: //Tagging this so we can continue out of it with any errors we encounter that are failing MessageLoop: //Tagging this so we can continue out of it with any errors we encounter that are failing
for { for {
runningTorrents := tclient.Torrents() //getting running torrents here since multiple cases ask for the running torrents runningTorrents := tclient.Torrents() //getting running torrents here since multiple cases ask for the running torrents
@@ -264,35 +287,37 @@ func main() {
Logger.WithFields(logrus.Fields{"clientName": payloadData["ClientName"].(string)}).Info("New Auth Token creation request") Logger.WithFields(logrus.Fields{"clientName": payloadData["ClientName"].(string)}).Info("New Auth Token creation request")
fmt.Println("Signing Key", signingKey) fmt.Println("Signing Key", signingKey)
token := Settings.GenerateToken(claims, signingKey) token := Settings.GenerateToken(claims, signingKey)
tokenReturn := Settings.TokenReturn{TokenReturn: token} tokenReturn := Settings.TokenReturn{MessageType: "TokenReturn", TokenReturn: token}
tokensDB := Storage.FetchJWTTokens(db) tokensDB := Storage.FetchJWTTokens(db)
tokensDB.TokenNames = append(tokens.TokenNames, Storage.SingleToken{payloadData["ClientName"].(string)}) tokensDB.TokenNames = append(tokens.TokenNames, Storage.SingleToken{payloadData["ClientName"].(string)})
db.Update(&tokensDB) //adding the new token client name to the database db.Update(&tokensDB) //adding the new token client name to the database
conn.WriteJSON(tokenReturn) sendJSON <- tokenReturn
case "torrentListRequest": case "torrentListRequest": //This will run automatically if a webUI is open
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested TorrentList Update") Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested TorrentList Update")
TorrentLocalArray = Storage.FetchAllStoredTorrents(db) //Required to re-read th database since we write to the DB and this will pull the changes from it go func() { //running updates in separate thread so can still accept commands
TorrentLocalArray = Storage.FetchAllStoredTorrents(db) //Required to re-read the database since we write to the DB and this will pull the changes from it
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well
PreviousTorrentArray = RunningTorrentArray PreviousTorrentArray = RunningTorrentArray
torrentlistArray := Engine.TorrentList{MessageType: "torrentList", ClientDBstruct: RunningTorrentArray, Totaltorrents: len(RunningTorrentArray)} torrentlistArray := Engine.TorrentList{MessageType: "torrentList", ClientDBstruct: RunningTorrentArray, Totaltorrents: len(RunningTorrentArray)}
Logger.WithFields(logrus.Fields{"torrentList": torrentlistArray, "previousTorrentList": PreviousTorrentArray}).Debug("Previous and Current Torrent Lists for sending to client") Logger.WithFields(logrus.Fields{"torrentList": torrentlistArray, "previousTorrentList": PreviousTorrentArray}).Debug("Previous and Current Torrent Lists for sending to client")
conn.WriteJSON(torrentlistArray) sendJSON <- torrentlistArray
}()
case "torrentFileListRequest": //client requested a filelist update case "torrentFileListRequest": //client requested a filelist update
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested FileList Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested FileList Update")
fileListArrayRequest := payloadData["FileListHash"].(string) fileListArrayRequest := payloadData["FileListHash"].(string)
FileListArray := Engine.CreateFileListArray(tclient, fileListArrayRequest, db, Config) FileListArray := Engine.CreateFileListArray(tclient, fileListArrayRequest, db, Config)
conn.WriteJSON(FileListArray) //writing the JSON to the client sendJSON <- FileListArray
case "torrentPeerListRequest": case "torrentPeerListRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested PeerList Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested PeerList Update")
peerListArrayRequest := payloadData["PeerListHash"].(string) peerListArrayRequest := payloadData["PeerListHash"].(string)
torrentPeerList := Engine.CreatePeerListArray(tclient, peerListArrayRequest) torrentPeerList := Engine.CreatePeerListArray(tclient, peerListArrayRequest)
conn.WriteJSON(torrentPeerList) sendJSON <- torrentPeerList
case "fetchTorrentsByLabel": //TODO test this to make sure it works case "fetchTorrentsByLabel":
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested Torrents by Label") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Torrents by Label")
label := payloadData["Label"].(string) label := payloadData["Label"].(string)
torrentsByLabel := Storage.FetchTorrentsByLabel(db, label) torrentsByLabel := Storage.FetchTorrentsByLabel(db, label)
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db)
@@ -304,10 +329,10 @@ func main() {
} }
} }
} }
conn.WriteJSON(labelRunningArray) sendJSON <- labelRunningArray
case "changeStorageValue": case "changeStorageValue":
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested Storage Location Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Storage Location Update")
newStorageLocation := payloadData["StorageValue"].(string) newStorageLocation := payloadData["StorageValue"].(string)
hashes := payloadData["ChangeStorageHashes"].([]interface{}) hashes := payloadData["ChangeStorageHashes"].([]interface{})
for _, singleHash := range hashes { for _, singleHash := range hashes {
@@ -328,12 +353,12 @@ func main() {
} }
case "settingsFileRequest": case "settingsFileRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested Settings File") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Settings File")
clientSettingsFile := Engine.SettingsFile{MessageType: "settingsFile", Config: Config} clientSettingsFile := Engine.SettingsFile{MessageType: "settingsFile", Config: Config}
conn.WriteJSON(clientSettingsFile) sendJSON <- clientSettingsFile
case "rssFeedRequest": case "rssFeedRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested RSS Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested RSS Update")
RSSList := Storage.FetchRSSFeeds(db) RSSList := Storage.FetchRSSFeeds(db)
RSSJSONFeed := Engine.RSSJSONList{MessageType: "rssList", TotalRSSFeeds: len(RSSList.RSSFeeds)} RSSJSONFeed := Engine.RSSJSONList{MessageType: "rssList", TotalRSSFeeds: len(RSSList.RSSFeeds)}
RSSsingleFeed := Engine.RSSFeedsNames{} RSSsingleFeed := Engine.RSSFeedsNames{}
@@ -342,13 +367,13 @@ func main() {
RSSsingleFeed.RSSFeedURL = singleFeed.URL RSSsingleFeed.RSSFeedURL = singleFeed.URL
RSSJSONFeed.RSSFeeds = append(RSSJSONFeed.RSSFeeds, RSSsingleFeed) RSSJSONFeed.RSSFeeds = append(RSSJSONFeed.RSSFeeds, RSSsingleFeed)
} }
conn.WriteJSON(RSSJSONFeed) sendJSON <- RSSJSONFeed
case "addRSSFeed": case "addRSSFeed":
newRSSFeed := payloadData["RSSURL"].(string) newRSSFeed := payloadData["RSSURL"].(string)
Logger.WithFields(logrus.Fields{"message": newRSSFeed}).Debug("Client Added RSS Feed") Logger.WithFields(logrus.Fields{"message": newRSSFeed}).Info("Client Added RSS Feed")
fullRSSFeeds := Storage.FetchRSSFeeds(db) fullRSSFeeds := Storage.FetchRSSFeeds(db)
Logger.WithFields(logrus.Fields{"RSSFeeds": fullRSSFeeds}).Debug("Pulled Full RSS Feeds") Logger.WithFields(logrus.Fields{"RSSFeeds": fullRSSFeeds}).Info("Pulled Full RSS Feeds")
for _, singleFeed := range fullRSSFeeds.RSSFeeds { for _, singleFeed := range fullRSSFeeds.RSSFeeds {
if newRSSFeed == singleFeed.URL || newRSSFeed == "" { if newRSSFeed == singleFeed.URL || newRSSFeed == "" {
Logger.WithFields(logrus.Fields{"RSSFeed": newRSSFeed}).Warn("Empty URL or Duplicate RSS URL to one already in database! Rejecting submission") Logger.WithFields(logrus.Fields{"RSSFeed": newRSSFeed}).Warn("Empty URL or Duplicate RSS URL to one already in database! Rejecting submission")
@@ -375,7 +400,7 @@ func main() {
case "deleteRSSFeed": case "deleteRSSFeed":
deleteRSSFeed := payloadData["RSSURL"].(string) deleteRSSFeed := payloadData["RSSURL"].(string)
Logger.WithFields(logrus.Fields{"message": deleteRSSFeed}).Debug("Deleting RSS Feed") Logger.WithFields(logrus.Fields{"message": deleteRSSFeed}).Info("Deleting RSS Feed")
Storage.DeleteRSSFeed(db, deleteRSSFeed) Storage.DeleteRSSFeed(db, deleteRSSFeed)
fullRSSFeeds := Storage.FetchRSSFeeds(db) fullRSSFeeds := Storage.FetchRSSFeeds(db)
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Deleting RSS feed..."}, conn) Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Deleting RSS feed..."}, conn)
@@ -386,8 +411,8 @@ func main() {
Logger.WithFields(logrus.Fields{"RSSFeed": RSSFeedURL}).Info("Requesting torrentList for feed..") Logger.WithFields(logrus.Fields{"RSSFeed": RSSFeedURL}).Info("Requesting torrentList for feed..")
UpdatedRSSFeed := Engine.RefreshSingleRSSFeed(db, Storage.FetchSpecificRSSFeed(db, RSSFeedURL)) UpdatedRSSFeed := Engine.RefreshSingleRSSFeed(db, Storage.FetchSpecificRSSFeed(db, RSSFeedURL))
TorrentRSSList := Engine.SingleRSSFeedMessage{MessageType: "rssTorrentList", URL: RSSFeedURL, Name: UpdatedRSSFeed.Name, TotalTorrents: len(UpdatedRSSFeed.Torrents), Torrents: UpdatedRSSFeed.Torrents} TorrentRSSList := Engine.SingleRSSFeedMessage{MessageType: "rssTorrentList", URL: RSSFeedURL, Name: UpdatedRSSFeed.Name, TotalTorrents: len(UpdatedRSSFeed.Torrents), Torrents: UpdatedRSSFeed.Torrents}
Logger.WithFields(logrus.Fields{"TorrentRSSList": TorrentRSSList}).Debug("Returning Torrent list from RSSFeed to client") Logger.WithFields(logrus.Fields{"TorrentRSSList": TorrentRSSList}).Info("Returning Torrent list from RSSFeed to client")
conn.WriteJSON(TorrentRSSList) sendJSON <- TorrentRSSList
case "magnetLinkSubmit": //if we detect a magnet link we will be adding a magnet torrent case "magnetLinkSubmit": //if we detect a magnet link we will be adding a magnet torrent
storageValue, ok := payloadData["StorageValue"].(string) storageValue, ok := payloadData["StorageValue"].(string)
@@ -418,8 +443,18 @@ func main() {
} }
Logger.WithFields(logrus.Fields{"clientTorrent": clientTorrent, "magnetLink": magnetLink}).Info("Adding torrent to client!") Logger.WithFields(logrus.Fields{"clientTorrent": clientTorrent, "magnetLink": magnetLink}).Info("Adding torrent to client!")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received MagnetLink"}, conn) Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received MagnetLink"}, conn)
Engine.StartTorrent(clientTorrent, torrentLocalStorage, db, "magnet", "", storageValue, labelValue, Config) //starting the torrent and creating local DB entry if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Name: ": clientTorrent.Name()}).Info("Adding New torrent to active, pushing other torrent to queue")
removeTorrent := torrentQueues.ActiveTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
}
}
}
go Engine.AddTorrent(clientTorrent, torrentLocalStorage, db, "magnet", "", storageValue, labelValue, Config) //starting the torrent and creating local DB entry
} }
case "torrentFileSubmit": case "torrentFileSubmit":
@@ -465,7 +500,18 @@ func main() {
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add Torrent to torrent server"}, conn) Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add Torrent to torrent server"}, conn)
} }
Logger.WithFields(logrus.Fields{"clienttorrent": clientTorrent.Name(), "filename": filePathAbs}).Info("Added torrent") Logger.WithFields(logrus.Fields{"clienttorrent": clientTorrent.Name(), "filename": filePathAbs}).Info("Added torrent")
Engine.StartTorrent(clientTorrent, torrentLocalStorage, db, "file", filePathAbs, storageValue, labelValue, Config) if len(torrentQueues.ActiveTorrents) >= Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Name: ": clientTorrent.Name()}).Info("Adding New torrent to active, pushing other torrent to queue")
removeTorrent := torrentQueues.ActiveTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
go Engine.AddTorrent(clientTorrent, torrentLocalStorage, db, "file", filePathAbs, storageValue, labelValue, Config)
}
}
}
case "stopTorrents": case "stopTorrents":
torrentHashes := payloadData["TorrentHashes"].([]interface{}) torrentHashes := payloadData["TorrentHashes"].([]interface{})
@@ -475,9 +521,7 @@ func main() {
if singleTorrent.InfoHash().String() == singleSelection { if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for stopping torrents") Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for stopping torrents")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
oldTorrentInfo.TorrentStatus = "Stopped" Engine.StopTorrent(singleTorrent, &oldTorrentInfo, db)
oldTorrentInfo.MaxConnections = 0
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
} }
} }
} }
@@ -491,6 +535,8 @@ func main() {
for _, singleSelection := range torrentHashes { for _, singleSelection := range torrentHashes {
if singleTorrent.InfoHash().String() == singleSelection { if singleTorrent.InfoHash().String() == singleSelection {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
torrentQueues = Storage.FetchQueues(db)
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for deleting torrents") Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for deleting torrents")
if withData { if withData {
oldTorrentInfo.TorrentStatus = "DroppedData" //Will be cleaned up the next engine loop since deleting a torrent mid loop can cause issues oldTorrentInfo.TorrentStatus = "DroppedData" //Will be cleaned up the next engine loop since deleting a torrent mid loop can cause issues
@@ -498,6 +544,7 @@ func main() {
oldTorrentInfo.TorrentStatus = "Dropped" oldTorrentInfo.TorrentStatus = "Dropped"
} }
Storage.UpdateStorageTick(db, oldTorrentInfo) Storage.UpdateStorageTick(db, oldTorrentInfo)
Storage.UpdateQueues(db, torrentQueues)
} }
} }
} }
@@ -509,17 +556,31 @@ func main() {
for _, singleTorrent := range runningTorrents { for _, singleTorrent := range runningTorrents {
for _, singleSelection := range torrentHashes { for _, singleSelection := range torrentHashes {
if singleTorrent.InfoHash().String() == singleSelection { if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Debug("Found matching torrent to start") Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Info("Found matching torrent to start")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
oldTorrentInfo.TorrentStatus = "Running"
oldTorrentInfo.MaxConnections = 80
Logger.WithFields(logrus.Fields{"Torrent": oldTorrentInfo.TorrentName}).Info("Changing database to torrent running with 80 max connections") Logger.WithFields(logrus.Fields{"Torrent": oldTorrentInfo.TorrentName}).Info("Changing database to torrent running with 80 max connections")
oldTorrentInfo.TorrentStatus = "ForceStart"
oldTorrentInfo.MaxConnections = 80
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
Engine.AddTorrentToForceStart(&oldTorrentInfo, singleTorrent, db)
}
torrentQueues = Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents { //Since we are starting a new torrent stop the last torrent in the que if running is full
//removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
}
}
} }
} }
} }
case "forceUploadTorrents": case "forceUploadTorrents": //TODO allow force to override total limit of queued torrents?
torrentHashes := payloadData["TorrentHashes"].([]interface{}) torrentHashes := payloadData["TorrentHashes"].([]interface{})
Logger.WithFields(logrus.Fields{"selection": msg.Payload}).Info("Matched for force Uploading Torrents") Logger.WithFields(logrus.Fields{"selection": msg.Payload}).Info("Matched for force Uploading Torrents")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Force Start Request"}, conn) Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Force Start Request"}, conn)
@@ -531,7 +592,6 @@ func main() {
oldTorrentInfo.TorrentUploadLimit = false // no upload limit for this torrent oldTorrentInfo.TorrentUploadLimit = false // no upload limit for this torrent
oldTorrentInfo.TorrentStatus = "Running" oldTorrentInfo.TorrentStatus = "Running"
oldTorrentInfo.MaxConnections = 80 oldTorrentInfo.MaxConnections = 80
fmt.Println("OldtorrentinfoName", oldTorrentInfo.TorrentName)
Logger.WithFields(logrus.Fields{"NewMax": oldTorrentInfo.MaxConnections, "Torrent": oldTorrentInfo.TorrentName}).Info("Setting max connection from zero to 80") Logger.WithFields(logrus.Fields{"NewMax": oldTorrentInfo.MaxConnections, "Torrent": oldTorrentInfo.TorrentName}).Info("Setting max connection from zero to 80")
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
} }
@@ -581,14 +641,13 @@ func main() {
} }
default: default:
//conn.Close()
Logger.WithFields(logrus.Fields{"message": msg}).Info("Unrecognized Message from client... ignoring") Logger.WithFields(logrus.Fields{"message": msg}).Info("Unrecognized Message from client... ignoring")
return return
} }
} }
}) })
if Config.UseProxy { if Config.UseReverseProxy {
err := http.ListenAndServe(httpAddr, handlers.ProxyHeaders(router)) err := http.ListenAndServe(httpAddr, handlers.ProxyHeaders(router))
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server!") Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server!")
@@ -596,7 +655,7 @@ func main() {
} else { } else {
err := http.ListenAndServe(httpAddr, nil) //Can't send proxy headers if not used since that can be a security issue err := http.ListenAndServe(httpAddr, nil) //Can't send proxy headers if not used since that can be a security issue
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server with no proxy headers!") Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server! (Maybe wrong IP in config, port already in use?) (Config: Not using proxy, see error for more details)")
} }
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -8,12 +8,15 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
//AuthRequest is a struct sent by a client with an authstring (JWT Token) to validate they have access to the server
type AuthRequest struct { type AuthRequest struct {
MessageType string `json:"MessageType"` MessageType string `json:"MessageType"`
AuthString string `json:"AuthString"` AuthString string `json:"AuthString"`
} }
//TokenReturn is a struct sent by the server to a client with a new generated authstring
type TokenReturn struct { type TokenReturn struct {
MessageType string `json:"MessageType"`
TokenReturn string `json:"TokenReturn"` TokenReturn string `json:"TokenReturn"`
} }

View File

@@ -42,7 +42,7 @@ func GenerateClientConfigFile(config FullClientSettings, authString string) {
` `
} }
if config.UseProxy { if config.UseReverseProxy {
clientFile = ` clientFile = `
ClientAuthString = "` + authString + `" ClientAuthString = "` + authString + `"
` + webUIAuth + ` ` + webUIAuth + `

View File

@@ -4,6 +4,7 @@ import (
"crypto/sha256" "crypto/sha256"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"golang.org/x/time/rate" "golang.org/x/time/rate"
@@ -17,12 +18,15 @@ import (
//Logger is the injected variable for global logger //Logger is the injected variable for global logger
var Logger *logrus.Logger var Logger *logrus.Logger
//ClientConnectSettings contains all the settings for connecting and authenticating to the server
type ClientConnectSettings struct { type ClientConnectSettings struct {
HTTPAddr string HTTPAddr string
HTTPAddrIP string HTTPAddrIP string
UseProxy bool UseReverseProxy bool
UseSocksProxy bool
WebsocketClientPort string WebsocketClientPort string
BaseURL string BaseURL string
SocksProxyURL string
ClientUsername string ClientUsername string
ClientPassword string ClientPassword string
PushBulletToken string `json:"-"` PushBulletToken string `json:"-"`
@@ -34,12 +38,13 @@ type FullClientSettings struct {
LoggingLevel logrus.Level LoggingLevel logrus.Level
LoggingOutput string LoggingOutput string
Version int Version int
TorrentConfig torrent.Config `json:"-"` TorrentConfig torrent.ClientConfig `json:"-"`
TFileUploadFolder string TFileUploadFolder string
SeedRatioStop float64 SeedRatioStop float64
DefaultMoveFolder string DefaultMoveFolder string
TorrentWatchFolder string TorrentWatchFolder string
ClientConnectSettings ClientConnectSettings
MaxActiveTorrents int
} }
//default is called if there is a parsing error //default is called if there is a parsing error
@@ -47,16 +52,16 @@ func defaultConfig() FullClientSettings {
var Config FullClientSettings var Config FullClientSettings
Config.ID = 4 //Unique ID for StormDB Config.ID = 4 //Unique ID for StormDB
Config.Version = 1.0 Config.Version = 1.0
Config.LoggingLevel = 3 //Warn level Config.LoggingLevel = logrus.WarnLevel //Warn level
Config.TorrentConfig.DataDir = "downloads" //the absolute or relative path of the default download directory for torrents Config.TorrentConfig.DataDir = "downloads" //the absolute or relative path of the default download directory for torrents
Config.TFileUploadFolder = "uploadedTorrents" Config.TFileUploadFolder = "uploadedTorrents"
Config.TorrentConfig.Seed = true Config.TorrentConfig.Seed = true
Config.HTTPAddr = ":8000" Config.HTTPAddr = ":8000"
Config.SeedRatioStop = 1.50 Config.SeedRatioStop = 1.50
Config.TorrentConfig.DHTConfig = dht.ServerConfig{ //Config.TorrentConfig.DhtStartingNodes = dht.StartingNodesGetter{
StartingNodes: dht.GlobalBootstrapAddrs, // StartingNodes: dht.GlobalBootstrapAddrs,
} //}
return Config return Config
} }
@@ -70,6 +75,8 @@ func dhtServerSettings(dhtConfig dht.ServerConfig) dht.ServerConfig {
func calculateRateLimiters(uploadRate, downloadRate string) (*rate.Limiter, *rate.Limiter) { //TODO reorg func calculateRateLimiters(uploadRate, downloadRate string) (*rate.Limiter, *rate.Limiter) { //TODO reorg
var uploadRateLimiterSize int var uploadRateLimiterSize int
var downloadRateLimiterSize int var downloadRateLimiterSize int
var downloadRateLimiter *rate.Limiter
var uploadRateLimiter *rate.Limiter
switch uploadRate { switch uploadRate {
case "Low": case "Low":
@@ -79,8 +86,8 @@ func calculateRateLimiters(uploadRate, downloadRate string) (*rate.Limiter, *rat
case "High": case "High":
uploadRateLimiterSize = 1500000 uploadRateLimiterSize = 1500000
default: default:
downloadRateLimiter := rate.NewLimiter(rate.Inf, 0) downloadRateLimiter = rate.NewLimiter(rate.Inf, 0)
uploadRateLimiter := rate.NewLimiter(rate.Inf, 0) uploadRateLimiter = rate.NewLimiter(rate.Inf, 0)
return downloadRateLimiter, uploadRateLimiter return downloadRateLimiter, uploadRateLimiter
} }
@@ -89,17 +96,16 @@ func calculateRateLimiters(uploadRate, downloadRate string) (*rate.Limiter, *rat
downloadRateLimiterSize = 50000 downloadRateLimiterSize = 50000
case "Medium": case "Medium":
downloadRateLimiterSize = 500000 downloadRateLimiterSize = 500000
fmt.Println("Medium Rate Limit...")
case "High": case "High":
downloadRateLimiterSize = 1500000 downloadRateLimiterSize = 1500000
default: default:
downloadRateLimiter := rate.NewLimiter(rate.Inf, 0) downloadRateLimiter = rate.NewLimiter(rate.Inf, 0)
uploadRateLimiter := rate.NewLimiter(rate.Inf, 0) uploadRateLimiter = rate.NewLimiter(rate.Inf, 0)
return downloadRateLimiter, uploadRateLimiter return downloadRateLimiter, uploadRateLimiter
} }
var limitPerSecondUl = rate.Limit(uploadRateLimiterSize) uploadRateLimiter = rate.NewLimiter(rate.Limit(uploadRateLimiterSize), uploadRateLimiterSize)
uploadRateLimiter := rate.NewLimiter(limitPerSecondUl, uploadRateLimiterSize) downloadRateLimiter = rate.NewLimiter(rate.Limit(downloadRateLimiterSize), downloadRateLimiterSize)
var limitPerSecondDl = rate.Limit(uploadRateLimiterSize)
downloadRateLimiter := rate.NewLimiter(limitPerSecondDl, downloadRateLimiterSize)
return downloadRateLimiter, uploadRateLimiter return downloadRateLimiter, uploadRateLimiter
} }
@@ -116,6 +122,7 @@ func FullClientSettingsNew() FullClientSettings {
var httpAddr string var httpAddr string
var baseURL string var baseURL string
var socksProxyURLBase string
var websocketClientPort string var websocketClientPort string
var logLevel logrus.Level var logLevel logrus.Level
//logging //logging
@@ -123,24 +130,25 @@ func FullClientSettingsNew() FullClientSettings {
logOutput := viper.GetString("serverConfig.LogOutput") logOutput := viper.GetString("serverConfig.LogOutput")
switch logLevelString { //Options = Debug 5, Info 4, Warn 3, Error 2, Fatal 1, Panic 0 switch logLevelString { //Options = Debug 5, Info 4, Warn 3, Error 2, Fatal 1, Panic 0
case "Panic": case "Panic":
logLevel = 0 logLevel = logrus.PanicLevel
case "Fatal": case "Fatal":
logLevel = 1 logLevel = logrus.FatalLevel
case "Error": case "Error":
logLevel = 2 logLevel = logrus.ErrorLevel
case "Warn": case "Warn":
logLevel = 3 logLevel = logrus.WarnLevel
case "Info": case "Info":
logLevel = 4 logLevel = logrus.InfoLevel
case "Debug": case "Debug":
logLevel = 5 logLevel = logrus.DebugLevel
default: default:
logLevel = 3 logLevel = logrus.WarnLevel
} }
//HTTP, proxy //HTTP, proxy
httpAddrIP := viper.GetString("serverConfig.ServerAddr") httpAddrIP := viper.GetString("serverConfig.ServerAddr")
httpAddrPort := viper.GetString("serverConfig.ServerPort") httpAddrPortRaw := viper.GetString("serverConfig.ServerPort")
httpAddrPort := ":" + httpAddrPortRaw //adding the separator required for running the webui
httpAddr = httpAddrIP + httpAddrPort httpAddr = httpAddrIP + httpAddrPort
proxySet := viper.GetBool("reverseProxy.ProxyEnabled") proxySet := viper.GetBool("reverseProxy.ProxyEnabled")
websocketClientPort = strings.TrimLeft(viper.GetString("serverConfig.ServerPort"), ":") //Trimming off the colon in front of the port websocketClientPort = strings.TrimLeft(viper.GetString("serverConfig.ServerPort"), ":") //Trimming off the colon in front of the port
@@ -148,6 +156,10 @@ func FullClientSettingsNew() FullClientSettings {
baseURL = viper.GetString("reverseProxy.BaseURL") baseURL = viper.GetString("reverseProxy.BaseURL")
fmt.Println("WebsocketClientPort", viper.GetString("serverConfig.ServerPort")) fmt.Println("WebsocketClientPort", viper.GetString("serverConfig.ServerPort"))
} }
socksProxySet := viper.GetBool("socksProxy.ProxyEnabled")
if socksProxySet {
socksProxyURLBase = viper.GetString("reverseProxy.BaseURL")
}
//Client Authentication //Client Authentication
clientAuthEnabled := viper.GetBool("goTorrentWebUI.WebUIAuth") clientAuthEnabled := viper.GetBool("goTorrentWebUI.WebUIAuth")
var webUIUser string var webUIUser string
@@ -177,9 +189,9 @@ func FullClientSettingsNew() FullClientSettings {
//Rate Limiters //Rate Limiters
//var uploadRateLimiter *rate.Limiter //var uploadRateLimiter *rate.Limiter
//var downloadRateLimiter *rate.Limiter //var downloadRateLimiter *rate.Limiter
//uploadRate := viper.GetString("serverConfig.UploadRateLimit") uploadRate := viper.GetString("serverConfig.UploadRateLimit")
//downloadRate := viper.GetString("serverConfig.DownloadRateLimit") downloadRate := viper.GetString("serverConfig.DownloadRateLimit")
//downloadRateLimiter, uploadRateLimiter = calculateRateLimiters(uploadRate, downloadRate) downloadRateLimiter, uploadRateLimiter := calculateRateLimiters(uploadRate, downloadRate)
//Internals //Internals
dataDir := filepath.ToSlash(viper.GetString("torrentClientConfig.DownloadDir")) //Converting the string literal into a filepath dataDir := filepath.ToSlash(viper.GetString("torrentClientConfig.DownloadDir")) //Converting the string literal into a filepath
dataDirAbs, err := filepath.Abs(dataDir) //Converting to an absolute file path dataDirAbs, err := filepath.Abs(dataDir) //Converting to an absolute file path
@@ -191,6 +203,7 @@ func FullClientSettingsNew() FullClientSettings {
noDHT := viper.GetBool("torrentClientConfig.NoDHT") noDHT := viper.GetBool("torrentClientConfig.NoDHT")
noUpload := viper.GetBool("torrentClientConfig.NoUpload") noUpload := viper.GetBool("torrentClientConfig.NoUpload")
seed := viper.GetBool("torrentClientConfig.Seed") seed := viper.GetBool("torrentClientConfig.Seed")
maxActiveTorrents := viper.GetInt("serverConfig.MaxActiveTorrents")
peerID := viper.GetString("torrentClientConfig.PeerID") peerID := viper.GetString("torrentClientConfig.PeerID")
disableUTP := viper.GetBool("torrentClientConfig.DisableUTP") disableUTP := viper.GetBool("torrentClientConfig.DisableUTP")
@@ -198,13 +211,18 @@ func FullClientSettingsNew() FullClientSettings {
disableIPv6 := viper.GetBool("torrentClientConfig.DisableIPv6") disableIPv6 := viper.GetBool("torrentClientConfig.DisableIPv6")
debug := viper.GetBool("torrentClientConfig.Debug") debug := viper.GetBool("torrentClientConfig.Debug")
dhtServerConfig := dht.ServerConfig{ //dhtServerConfig := dht.StartingNodesGetter()
StartingNodes: dht.GlobalBootstrapAddrs,
} //if viper.IsSet("DHTConfig") {
if viper.IsSet("DHTConfig") { // fmt.Println("Reading in custom DHT config")
fmt.Println("Reading in custom DHT config") // dhtServerConfig = dhtServerSettings(dhtServerConfig)
dhtServerConfig = dhtServerSettings(dhtServerConfig) //}
strippedDHTPort := strings.TrimPrefix(listenAddr, ":")
DHTPortInt64, err := strconv.ParseInt(strippedDHTPort, 10, 0)
if err != nil {
fmt.Println("Failed creating 64-bit integer for goTorrent Port!", err)
} }
DHTPortInt := int(DHTPortInt64) //converting to integer
encryptionPolicy := torrent.EncryptionPolicy{ encryptionPolicy := torrent.EncryptionPolicy{
DisableEncryption: viper.GetBool("EncryptionPolicy.DisableEncryption"), DisableEncryption: viper.GetBool("EncryptionPolicy.DisableEncryption"),
@@ -212,23 +230,22 @@ func FullClientSettingsNew() FullClientSettings {
PreferNoEncryption: viper.GetBool("EncryptionPolicy.PreferNoEncryption"), PreferNoEncryption: viper.GetBool("EncryptionPolicy.PreferNoEncryption"),
} }
tConfig := torrent.Config{ tConfig := torrent.NewDefaultClientConfig()
DataDir: dataDirAbs,
ListenAddr: listenAddr, tConfig.DataDir = dataDirAbs
DisablePEX: disablePex, tConfig.ListenPort = DHTPortInt
NoDHT: noDHT, tConfig.DisablePEX = disablePex
DHTConfig: dhtServerConfig, tConfig.NoDHT = noDHT
NoUpload: noUpload, tConfig.NoUpload = noUpload
Seed: seed, tConfig.Seed = seed
//UploadRateLimiter: uploadRateLimiter, tConfig.UploadRateLimiter = uploadRateLimiter
//DownloadRateLimiter: downloadRateLimiter, tConfig.DownloadRateLimiter = downloadRateLimiter
PeerID: peerID, tConfig.PeerID = peerID
DisableUTP: disableUTP, tConfig.DisableUTP = disableUTP
DisableTCP: disableTCP, tConfig.DisableTCP = disableTCP
DisableIPv6: disableIPv6, tConfig.DisableIPv6 = disableIPv6
Debug: debug, tConfig.Debug = debug
EncryptionPolicy: encryptionPolicy, tConfig.EncryptionPolicy = encryptionPolicy
}
Config := FullClientSettings{ Config := FullClientSettings{
LoggingLevel: logLevel, LoggingLevel: logLevel,
@@ -237,17 +254,20 @@ func FullClientSettingsNew() FullClientSettings {
ClientConnectSettings: ClientConnectSettings{ ClientConnectSettings: ClientConnectSettings{
HTTPAddr: httpAddr, HTTPAddr: httpAddr,
HTTPAddrIP: httpAddrIP, HTTPAddrIP: httpAddrIP,
UseProxy: proxySet, UseReverseProxy: proxySet,
UseSocksProxy: socksProxySet,
WebsocketClientPort: websocketClientPort, WebsocketClientPort: websocketClientPort,
ClientUsername: webUIUser, ClientUsername: webUIUser,
ClientPassword: webUIPasswordHash, ClientPassword: webUIPasswordHash,
BaseURL: baseURL, BaseURL: baseURL,
SocksProxyURL: socksProxyURLBase,
PushBulletToken: pushBulletToken, PushBulletToken: pushBulletToken,
}, },
TFileUploadFolder: "uploadedTorrents", TFileUploadFolder: "uploadedTorrents",
TorrentConfig: tConfig, TorrentConfig: *tConfig,
DefaultMoveFolder: defaultMoveFolderAbs, DefaultMoveFolder: defaultMoveFolderAbs,
TorrentWatchFolder: torrentWatchFolderAbs, TorrentWatchFolder: torrentWatchFolderAbs,
MaxActiveTorrents: maxActiveTorrents,
} }
return Config return Config

View File

@@ -1,6 +1,7 @@
package storage package storage
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
@@ -16,6 +17,14 @@ var Logger *logrus.Logger
//Conn is the global websocket connection used to push server notification messages //Conn is the global websocket connection used to push server notification messages
var Conn *websocket.Conn var Conn *websocket.Conn
//TorrentQueues contains the active and queued torrent hashes in slices
type TorrentQueues struct {
ID int `storm:"id,unique"` //storm requires unique ID (will be 5)
ActiveTorrents []string
QueuedTorrents []string
ForcedTorrents []string
}
//IssuedTokensList contains a slice of all the tokens issues to applications //IssuedTokensList contains a slice of all the tokens issues to applications
type IssuedTokensList struct { type IssuedTokensList struct {
ID int `storm:"id,unique"` //storm requires unique ID (will be 3) to save although there will only be one of these ID int `storm:"id,unique"` //storm requires unique ID (will be 3) to save although there will only be one of these
@@ -69,20 +78,20 @@ type TorrentLocal struct {
DateAdded string DateAdded string
StoragePath string //The absolute value of the path where the torrent will be moved when completed StoragePath string //The absolute value of the path where the torrent will be moved when completed
TempStoragePath string //The absolute path of where the torrent is temporarily stored as it is downloaded TempStoragePath string //The absolute path of where the torrent is temporarily stored as it is downloaded
TorrentMoved bool TorrentMoved bool //If completed has the torrent been moved to the end location
TorrentName string TorrentName string
TorrentStatus string TorrentStatus string //"Stopped", "Running", "ForceStart"
TorrentUploadLimit bool //if true this torrent will bypass the upload storage limit (effectively unlimited) TorrentUploadLimit bool //if true this torrent will bypass the upload storage limit (effectively unlimited)
MaxConnections int MaxConnections int //Max connections that the torrent can have to it at one time
TorrentType string //magnet or .torrent file TorrentType string //magnet or .torrent file
TorrentFileName string //Should be just the name of the torrent TorrentFileName string //Should be just the name of the torrent
TorrentFile []byte TorrentFile []byte //If torrent was from .torrent file, store the entire file for re-adding on restart
Label string Label string //User enterable label to sort torrents by
UploadedBytes int64 UploadedBytes int64 //Total amount the client has uploaded on this torrent
DownloadedBytes int64 DownloadedBytes int64 //Total amount the client has downloaded on this torrent
TorrentSize int64 //If we cancel a file change the download size since we won't be downloading that file TorrentSize int64 //If we cancel a file change the download size since we won't be downloading that file
UploadRatio string UploadRatio string
TorrentFilePriority []TorrentFilePriority TorrentFilePriority []TorrentFilePriority //Slice of all the files the torrent contains and the priority of each file
} }
//SaveConfig saves the config to the database to compare for changes to settings.toml on restart //SaveConfig saves the config to the database to compare for changes to settings.toml on restart
@@ -94,6 +103,26 @@ func SaveConfig(torrentStorage *storm.DB, config Settings.FullClientSettings) {
} }
} }
//UpdateQueues Saves the slice of hashes that contain the active Torrents
func UpdateQueues(db *storm.DB, torrentQueues TorrentQueues) {
torrentQueues.ID = 5
err := db.Save(&torrentQueues)
if err != nil {
Logger.WithFields(logrus.Fields{"database": db, "error": err}).Error("Unable to write Queues to database!")
}
}
//FetchQueues fetches the activetorrent and queuedtorrent slices from the database
func FetchQueues(db *storm.DB) TorrentQueues {
torrentQueues := TorrentQueues{}
err := db.One("ID", 5, &torrentQueues)
if err != nil {
Logger.WithFields(logrus.Fields{"database": db, "error": err}).Error("Unable to read Database into torrentQueues!")
return torrentQueues
}
return torrentQueues
}
//FetchConfig fetches the client config from the database //FetchConfig fetches the client config from the database
func FetchConfig(torrentStorage *storm.DB) (Settings.FullClientSettings, error) { func FetchConfig(torrentStorage *storm.DB) (Settings.FullClientSettings, error) {
config := Settings.FullClientSettings{} config := Settings.FullClientSettings{}
@@ -119,11 +148,11 @@ func FetchAllStoredTorrents(torrentStorage *storm.DB) (torrentLocalArray []*Torr
//AddTorrentLocalStorage is called when adding a new torrent via any method, requires the boltdb pointer and the torrentlocal struct //AddTorrentLocalStorage is called when adding a new torrent via any method, requires the boltdb pointer and the torrentlocal struct
func AddTorrentLocalStorage(torrentStorage *storm.DB, local TorrentLocal) { func AddTorrentLocalStorage(torrentStorage *storm.DB, local TorrentLocal) {
Logger.WithFields(logrus.Fields{"Storage Path": local.StoragePath, "Torrent": local.TorrentName, "File(if file)": local.TorrentFileName}).Info("Adding new Torrent to database") Logger.WithFields(logrus.Fields{"Storage Path": local.StoragePath, "Torrent": local.TorrentName, "File(if file)": local.TorrentFileName}).Info("Adding new Torrent to database")
fmt.Println("ENTIRE TORRENT", local)
err := torrentStorage.Save(&local) err := torrentStorage.Save(&local)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"database": torrentStorage, "error": err}).Error("Error adding new Torrent to database!") Logger.WithFields(logrus.Fields{"database": torrentStorage, "error": err}).Error("Error adding new Torrent to database!")
} }
} }
//DelTorrentLocalStorage is called to delete a torrent when we fail (for whatever reason to load the information for it). Deleted by HASH matching. //DelTorrentLocalStorage is called to delete a torrent when we fail (for whatever reason to load the information for it). Deleted by HASH matching.
@@ -166,6 +195,8 @@ func UpdateStorageTick(torrentStorage *storm.DB, torrentLocal TorrentLocal) {
err := torrentStorage.Update(&torrentLocal) err := torrentStorage.Update(&torrentLocal)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"UpdateContents": torrentLocal, "error": err}).Error("Error performing tick update to database!") Logger.WithFields(logrus.Fields{"UpdateContents": torrentLocal, "error": err}).Error("Error performing tick update to database!")
} else {
Logger.WithFields(logrus.Fields{"UpdateContents": torrentLocal, "error": err}).Debug("Performed Update to database!")
} }
} }