9 Commits

10 changed files with 547 additions and 190 deletions

View File

@@ -1,9 +1,9 @@
[serverConfig] [serverConfig]
ServerPort = ":8000" #leave format as is it expects a string with colon ServerPort = "8000" #Required to input as string
ServerAddr = "192.168.1.8" #Put in the IP address you want to bind to ServerAddr = "192.168.1.8" #Put in the IP address you want to bind to as string
LogLevel = "Info" # Options = Debug, Info, Warn, Error, Fatal, Panic LogLevel = "Debug" # Options = Debug, Info, Warn, Error, Fatal, Panic
LogOutput = "stdout" #Options = file, stdout #file will print it to logs/server.log LogOutput = "file" #Options = file, stdout #file will print it to logs/server.log
SeedRatioStop = 1.50 #automatically stops the torrent after it reaches this seeding ratio SeedRatioStop = 1.50 #automatically stops the torrent after it reaches this seeding ratio
@@ -35,6 +35,13 @@
#URL is CASE SENSITIVE #URL is CASE SENSITIVE
BaseURL = "domain.com/subroute/" # MUST be in the format (if you have a subdomain, and must have trailing slash) "yoursubdomain.domain.org/subroute/" BaseURL = "domain.com/subroute/" # MUST be in the format (if you have a subdomain, and must have trailing slash) "yoursubdomain.domain.org/subroute/"
[socksProxy]
SocksProxyEnabled = false #bool, either false or true
# Sets usage of Socks5 Proxy. Authentication should be included in the url if needed.
# Examples: socks5://demo:demo@192.168.99.100:1080
# http://proxy.domain.com:3128
SocksProxyURL = ""
[EncryptionPolicy] [EncryptionPolicy]
DisableEncryption = false DisableEncryption = false

View File

@@ -57,6 +57,50 @@ func CheckTorrentWatchFolder(c *cron.Cron, db *storm.DB, tclient *torrent.Client
}) })
} }
//CheckTorrentsCron runs a upload ratio check, a queue check (essentially anything that should not be frontend dependent)
func CheckTorrentsCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, config Settings.FullClientSettings) {
c.AddFunc("@every 30s", func() {
Logger.Debug("Running a torrent Ratio and Queue Check")
torrentLocalArray := Storage.FetchAllStoredTorrents(db)
torrentQueues := Storage.FetchQueues(db)
for _, singleTorrentFromStorage := range torrentLocalArray {
var singleTorrent *torrent.Torrent
for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent
if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() {
singleTorrent = liveTorrent
}
}
calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
bytesCompleted := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
if float64(singleTorrentFromStorage.UploadedBytes)/float64(bytesCompleted) >= config.SeedRatioStop && singleTorrentFromStorage.TorrentUploadLimit == true { //If storage shows torrent stopped or if it is over the seeding ratio AND is under the global limit
Logger.WithFields(logrus.Fields{"Action: Stopping torrent due to seed Ratio": singleTorrentFromStorage.TorrentName}).Info()
StopTorrent(singleTorrent, singleTorrentFromStorage, db)
}
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus == "Queued" {
Logger.WithFields(logrus.Fields{"Action: Adding Torrent to Active Queue": singleTorrentFromStorage.TorrentName}).Info()
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
}
if (calculatedCompletedSize == singleTorrentFromStorage.TorrentSize) && (singleTorrentFromStorage.TorrentMoved == false) { //if we are done downloading and haven't moved torrent yet
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName}).Info("Torrent Completed, moving...")
tStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) //Todo... find a better way to do this in the go-routine currently just to make sure it doesn't trigger multiple times
tStorage.TorrentMoved = true
Storage.UpdateStorageTick(db, tStorage)
go func() { //moving torrent in separate go-routine then verifying that the data is still there and correct
err := MoveAndLeaveSymlink(config, singleTorrent.InfoHash().String(), db, false, "") //can take some time to move file so running this in another thread TODO make this a goroutine and skip this block if the routine is still running
if err != nil { //If we fail, print the error and attempt a retry
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName, "error": err}).Error("Failed to move Torrent!")
VerifyData(singleTorrent)
tStorage.TorrentMoved = false
Storage.UpdateStorageTick(db, tStorage)
}
}()
}
}
ValidateQueues(db, config, tclient) //Ensure we don't have too many in activeQueue
})
}
//RefreshRSSCron refreshes all of the RSS feeds on an hourly basis //RefreshRSSCron refreshes all of the RSS feeds on an hourly basis
func RefreshRSSCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings, torrentQueues Storage.TorrentQueues) { func RefreshRSSCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings, torrentQueues Storage.TorrentQueues) {
c.AddFunc("@hourly", func() { c.AddFunc("@hourly", func() {

View File

@@ -32,10 +32,6 @@ func CreateServerPushMessage(message ServerPushMessage, conn *websocket.Conn) {
conn.WriteJSON(message) conn.WriteJSON(message)
} }
func QueueJSONMessage(conn *websocket.Conn){
}
//RefreshSingleRSSFeed refreshing a single RSS feed to send to the client (so no updating database) mainly by updating the torrent list to display any changes //RefreshSingleRSSFeed refreshing a single RSS feed to send to the client (so no updating database) mainly by updating the torrent list to display any changes
func RefreshSingleRSSFeed(db *storm.DB, RSSFeed Storage.SingleRSSFeed) Storage.SingleRSSFeed { //Todo.. duplicate as cron job... any way to merge these to reduce duplication? func RefreshSingleRSSFeed(db *storm.DB, RSSFeed Storage.SingleRSSFeed) Storage.SingleRSSFeed { //Todo.. duplicate as cron job... any way to merge these to reduce duplication?
singleRSSFeed := Storage.SingleRSSFeed{URL: RSSFeed.URL, Name: RSSFeed.Name} singleRSSFeed := Storage.SingleRSSFeed{URL: RSSFeed.URL, Name: RSSFeed.Name}
@@ -141,6 +137,7 @@ func AddTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.Torr
} }
var TempHash metainfo.Hash var TempHash metainfo.Hash
TempHash = clientTorrent.InfoHash() TempHash = clientTorrent.InfoHash()
fmt.Println("GOT INFOHASH", TempHash.String())
allStoredTorrents := Storage.FetchAllStoredTorrents(db) allStoredTorrents := Storage.FetchAllStoredTorrents(db)
for _, runningTorrentHashes := range allStoredTorrents { for _, runningTorrentHashes := range allStoredTorrents {
if runningTorrentHashes.Hash == TempHash.String() { if runningTorrentHashes.Hash == TempHash.String() {
@@ -188,7 +185,6 @@ func AddTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.Torr
//CreateInitialTorrentArray adds all the torrents on program start from the database //CreateInitialTorrentArray adds all the torrents on program start from the database
func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, db *storm.DB, config Settings.FullClientSettings) { func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, db *storm.DB, config Settings.FullClientSettings) {
for _, singleTorrentFromStorage := range TorrentLocalArray { for _, singleTorrentFromStorage := range TorrentLocalArray {
var singleTorrent *torrent.Torrent var singleTorrent *torrent.Torrent
var err error var err error
if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder
@@ -202,7 +198,6 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
if err != nil { if err != nil {
continue continue
} }
} }
if len(singleTorrentFromStorage.InfoBytes) == 0 { //TODO.. kind of a fringe scenario.. not sure if needed since the db should always have the infobytes if len(singleTorrentFromStorage.InfoBytes) == 0 { //TODO.. kind of a fringe scenario.. not sure if needed since the db should always have the infobytes
timeOut := timeOutInfo(singleTorrent, 45) timeOut := timeOutInfo(singleTorrent, 45)
@@ -218,13 +213,20 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
Logger.WithFields(logrus.Fields{"torrentFile": singleTorrent.Name(), "error": err}).Error("Unable to add infobytes to the torrent!") Logger.WithFields(logrus.Fields{"torrentFile": singleTorrent.Name(), "error": err}).Error("Unable to add infobytes to the torrent!")
} }
torrentQueues := Storage.FetchQueues(db) torrentQueues := Storage.FetchQueues(db)
if singleTorrentFromStorage.TorrentStatus == "Stopped" {
singleTorrent.SetMaxEstablishedConns(0)
continue
}
if singleTorrentFromStorage.TorrentStatus == "ForceStart" {
AddTorrentToForceStart(singleTorrentFromStorage, singleTorrent, db)
}
if len(torrentQueues.ActiveTorrents) == 0 && len(torrentQueues.QueuedTorrents) == 0 { // If empty, run through all the torrents and assign them if len(torrentQueues.ActiveTorrents) == 0 && len(torrentQueues.QueuedTorrents) == 0 { // If empty, run through all the torrents and assign them
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus != "Stopped" { if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents {
if singleTorrentFromStorage.TorrentStatus == "Completed" || singleTorrentFromStorage.TorrentStatus == "Seeding" { if singleTorrentFromStorage.TorrentStatus == "Completed" || singleTorrentFromStorage.TorrentStatus == "Seeding" {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Completed Torrents have lower priority, adding to Queued") Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Completed Torrents have lower priority, adding to Queued")
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db) AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
} else { } else {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue") Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue (Initial Torrent Load)")
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db) AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
} }
} else { } else {
@@ -235,7 +237,8 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
if singleTorrentFromStorage.TorrentStatus == "Queued" { if singleTorrentFromStorage.TorrentStatus == "Queued" {
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db) AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
} else { } else {
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus != "Stopped" { if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue (Initial Torrent Load Second)")
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db) AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
} else { } else {
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db) AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
@@ -247,7 +250,7 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
} }
torrentQueues := Storage.FetchQueues(db) torrentQueues := Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && len(torrentQueues.QueuedTorrents) > 0 { //after all the torrents are added, see if out active torrent list isn't full, then add from the queue if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && len(torrentQueues.QueuedTorrents) > 0 { //after all the torrents are added, see if out active torrent list isn't full, then add from the queue
Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "Current : ": torrentQueues.ActiveTorrents}).Debug("Adding Torrents from queue to active to fill...") Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "Current : ": torrentQueues.ActiveTorrents}).Info("Adding Torrents from queue to active to fill...")
maxCanSend := config.MaxActiveTorrents - len(torrentQueues.ActiveTorrents) maxCanSend := config.MaxActiveTorrents - len(torrentQueues.ActiveTorrents)
if maxCanSend > len(torrentQueues.QueuedTorrents) { if maxCanSend > len(torrentQueues.QueuedTorrents) {
maxCanSend = len(torrentQueues.QueuedTorrents) maxCanSend = len(torrentQueues.QueuedTorrents)
@@ -274,7 +277,7 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
for _, singleTorrentFromStorage := range TorrentLocalArray { for _, singleTorrentFromStorage := range TorrentLocalArray {
torrentQueues := Storage.FetchQueues(db) torrentQueues := Storage.FetchQueues(db)
var singleTorrent *torrent.Torrent var singleTorrent *torrent.Torrent
var TempHash metainfo.Hash
for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent
if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() { if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() {
singleTorrent = liveTorrent singleTorrent = liveTorrent
@@ -285,40 +288,26 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
//Handling deleted torrents here //Handling deleted torrents here
if singleTorrentFromStorage.TorrentStatus == "Dropped" { if singleTorrentFromStorage.TorrentStatus == "Dropped" {
Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting just the torrent") Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting just the torrent")
DeleteTorrentFromQueues(singleTorrentFromStorage.Hash, db)
singleTorrent.Drop() singleTorrent.Drop()
Storage.DelTorrentLocalStorage(db, singleTorrentFromStorage.Hash) Storage.DelTorrentLocalStorage(db, singleTorrentFromStorage.Hash)
} }
if singleTorrentFromStorage.TorrentStatus == "DroppedData" { if singleTorrentFromStorage.TorrentStatus == "DroppedData" {
Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting torrent and data") Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting torrent and data")
singleTorrent.Drop() singleTorrent.Drop()
DeleteTorrentFromQueues(singleTorrentFromStorage.Hash, db)
Storage.DelTorrentLocalStorageAndFiles(db, singleTorrentFromStorage.Hash, Config.TorrentConfig.DataDir) Storage.DelTorrentLocalStorageAndFiles(db, singleTorrentFromStorage.Hash, Config.TorrentConfig.DataDir)
} }
if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder
fullClientDB.SourceType = "Torrent File" fullClientDB.SourceType = "Torrent File"
} else { } else {
fullClientDB.SourceType = "Magnet Link" fullClientDB.SourceType = "Magnet Link"
} }
var TempHash metainfo.Hash
TempHash = singleTorrent.InfoHash()
calculatedTotalSize := CalculateDownloadSize(singleTorrentFromStorage, singleTorrent) calculatedTotalSize := CalculateDownloadSize(singleTorrentFromStorage, singleTorrent)
calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent) calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
TempHash = singleTorrent.InfoHash()
if (calculatedCompletedSize == singleTorrentFromStorage.TorrentSize) && (singleTorrentFromStorage.TorrentMoved == false) { //if we are done downloading and haven't moved torrent yet
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName}).Info("Torrent Completed, moving...")
tStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) //Todo... find a better way to do this in the go-routine currently just to make sure it doesn't trigger multiple times
tStorage.TorrentMoved = true
Storage.UpdateStorageTick(db, tStorage)
go func() { //moving torrent in separate go-routine then verifying that the data is still there and correct
err := MoveAndLeaveSymlink(config, singleTorrent.InfoHash().String(), db, false, "") //can take some time to move file so running this in another thread TODO make this a goroutine and skip this block if the routine is still running
if err != nil { //If we fail, print the error and attempt a retry
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName, "error": err}).Error("Failed to move Torrent!")
VerifyData(singleTorrent)
tStorage.TorrentMoved = false
Storage.UpdateStorageTick(db, tStorage)
}
}()
}
fullStruct := singleTorrent.Stats() fullStruct := singleTorrent.Stats()
activePeersString := strconv.Itoa(fullStruct.ActivePeers) //converting to strings activePeersString := strconv.Itoa(fullStruct.ActivePeers) //converting to strings
totalPeersString := fmt.Sprintf("%v", fullStruct.TotalPeers) totalPeersString := fmt.Sprintf("%v", fullStruct.TotalPeers)
@@ -332,8 +321,8 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
PercentDone := fmt.Sprintf("%.2f", float32(calculatedCompletedSize)/float32(calculatedTotalSize)) PercentDone := fmt.Sprintf("%.2f", float32(calculatedCompletedSize)/float32(calculatedTotalSize))
fullClientDB.TorrentHash = TempHash fullClientDB.TorrentHash = TempHash
fullClientDB.PercentDone = PercentDone fullClientDB.PercentDone = PercentDone
fullClientDB.DataBytesRead = fullStruct.ConnStats.BytesReadData //used for calculations not passed to client calculating up/down speed fullClientDB.DataBytesRead = fullStruct.ConnStats.BytesReadData.Int64() //used for calculations not passed to client calculating up/down speed
fullClientDB.DataBytesWritten = fullStruct.ConnStats.BytesWrittenData //used for calculations not passed to client calculating up/down speed fullClientDB.DataBytesWritten = fullStruct.ConnStats.BytesWrittenData.Int64() //used for calculations not passed to client calculating up/down speed
fullClientDB.ActivePeers = activePeersString + " / (" + totalPeersString + ")" fullClientDB.ActivePeers = activePeersString + " / (" + totalPeersString + ")"
fullClientDB.TorrentHashString = TempHash.String() fullClientDB.TorrentHashString = TempHash.String()
fullClientDB.TorrentName = singleTorrentFromStorage.TorrentName fullClientDB.TorrentName = singleTorrentFromStorage.TorrentName
@@ -347,25 +336,12 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
TempHash := singleTorrent.InfoHash() TempHash := singleTorrent.InfoHash()
if previousElement.TorrentHashString == TempHash.String() { //matching previous to new if previousElement.TorrentHashString == TempHash.String() { //matching previous to new
CalculateTorrentSpeed(singleTorrent, fullClientDB, previousElement, calculatedCompletedSize) CalculateTorrentSpeed(singleTorrent, fullClientDB, previousElement, calculatedCompletedSize)
fullClientDB.TotalUploadedBytes = singleTorrentFromStorage.UploadedBytes + (fullStruct.ConnStats.BytesWrittenData - previousElement.DataBytesWritten) fullClientDB.TotalUploadedBytes = singleTorrentFromStorage.UploadedBytes + (fullStruct.ConnStats.BytesWrittenData.Int64() - previousElement.DataBytesWritten)
} }
} }
} }
CalculateTorrentETA(singleTorrentFromStorage.TorrentSize, calculatedCompletedSize, fullClientDB) //needs to be here since we need the speed calculated before we can estimate the eta. CalculateTorrentETA(singleTorrentFromStorage.TorrentSize, calculatedCompletedSize, fullClientDB) //needs to be here since we need the speed calculated before we can estimate the eta.
if (len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents) && (len(torrentQueues.QueuedTorrents) > 0) { //If there is room for another torrent in active torrents, add it.
var newTorrentHash string
for _, torrentHash := range torrentQueues.QueuedTorrents {
if singleTorrentFromStorage.TorrentStatus != "Stopped" {
newTorrentHash = torrentHash
}
}
for _, torrent := range tclient.Torrents() {
if newTorrentHash == torrent.InfoHash().String() {
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
}
}
}
fullClientDB.TotalUploadedSize = HumanizeBytes(float32(fullClientDB.TotalUploadedBytes)) fullClientDB.TotalUploadedSize = HumanizeBytes(float32(fullClientDB.TotalUploadedBytes))
fullClientDB.UploadRatio = CalculateUploadRatio(singleTorrent, fullClientDB) //calculate the upload ratio fullClientDB.UploadRatio = CalculateUploadRatio(singleTorrent, fullClientDB) //calculate the upload ratio
@@ -380,7 +356,6 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
RunningTorrentArray = append(RunningTorrentArray, *fullClientDB) RunningTorrentArray = append(RunningTorrentArray, *fullClientDB)
} }
ValidateQueues(db, config, tclient) //Ensure we don't have too many in activeQueue
return RunningTorrentArray return RunningTorrentArray
} }

View File

@@ -109,7 +109,7 @@ func CalculateTorrentSpeed(t *torrent.Torrent, c *ClientDB, oc ClientDB, complet
dt := float32(now.Sub(oc.UpdatedAt)) // get the delta time length between now and last updated dt := float32(now.Sub(oc.UpdatedAt)) // get the delta time length between now and last updated
db := float32(bytes - oc.BytesCompleted) //getting the delta bytes db := float32(bytes - oc.BytesCompleted) //getting the delta bytes
rate := db * (float32(time.Second) / dt) // converting into seconds rate := db * (float32(time.Second) / dt) // converting into seconds
dbU := float32(bytesUpload - oc.DataBytesWritten) dbU := float32(bytesUpload.Int64() - oc.DataBytesWritten)
rateUpload := dbU * (float32(time.Second) / dt) rateUpload := dbU * (float32(time.Second) / dt)
if rate >= 0 { if rate >= 0 {
rateMB := rate / 1024 / 1024 //creating MB to calculate ETA rateMB := rate / 1024 / 1024 //creating MB to calculate ETA
@@ -186,7 +186,6 @@ func CalculateUploadRatio(t *torrent.Torrent, c *ClientDB) string {
//StopTorrent stops the torrent, updates the database and sends a message. Since stoptorrent is called by each loop (individually) no need to call an array //StopTorrent stops the torrent, updates the database and sends a message. Since stoptorrent is called by each loop (individually) no need to call an array
func StopTorrent(singleTorrent *torrent.Torrent, torrentLocalStorage *Storage.TorrentLocal, db *storm.DB) { func StopTorrent(singleTorrent *torrent.Torrent, torrentLocalStorage *Storage.TorrentLocal, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
if torrentLocalStorage.TorrentStatus == "Stopped" { //if we are already stopped if torrentLocalStorage.TorrentStatus == "Stopped" { //if we are already stopped
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Already Stopped, returning...") Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Already Stopped, returning...")
return return
@@ -194,24 +193,58 @@ func StopTorrent(singleTorrent *torrent.Torrent, torrentLocalStorage *Storage.To
torrentLocalStorage.TorrentStatus = "Stopped" torrentLocalStorage.TorrentStatus = "Stopped"
torrentLocalStorage.MaxConnections = 0 torrentLocalStorage.MaxConnections = 0
singleTorrent.SetMaxEstablishedConns(0) singleTorrent.SetMaxEstablishedConns(0)
for _, torrentHash := range torrentQueues.ActiveTorrents { //pulling it out of activetorrents
if torrentHash == singleTorrent.InfoHash().String() {
DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db) DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db)
}
}
for _, torrentHash := range torrentQueues.QueuedTorrents { //pulling it out of queuedTorrent
if torrentHash == singleTorrent.InfoHash().String() {
DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db)
}
}
Storage.UpdateStorageTick(db, *torrentLocalStorage) Storage.UpdateStorageTick(db, *torrentLocalStorage)
CreateServerPushMessage(ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "success", Payload: "Torrent Stopped!"}, Conn) CreateServerPushMessage(ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "success", Payload: "Torrent Stopped!"}, Conn)
return Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Stopped Success!")
}
//AddTorrentToForceStart forces torrent to be high priority on start
func AddTorrentToForceStart(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for index, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active remove from active
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:index], torrentQueues.ActiveTorrents[index+1:]...)
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //Removing from the queued torrents if in queued torrents
if queuedTorrentHash == singleTorrent.InfoHash().String() {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
singleTorrent.NewReader()
singleTorrent.SetMaxEstablishedConns(80)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents, singleTorrent.InfoHash().String())
torrentLocalStorage.TorrentStatus = "ForceStart"
torrentLocalStorage.MaxConnections = 80
for _, file := range singleTorrent.Files() {
for _, sentFile := range torrentLocalStorage.TorrentFilePriority {
if file.DisplayPath() == sentFile.TorrentFilePath {
switch sentFile.TorrentFilePriority {
case "High":
file.SetPriority(torrent.PiecePriorityHigh)
case "Normal":
file.SetPriority(torrent.PiecePriorityNormal)
case "Cancel":
file.SetPriority(torrent.PiecePriorityNone)
default:
file.SetPriority(torrent.PiecePriorityNormal)
}
}
}
}
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to ForceStart Queue")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
Storage.UpdateQueues(db, torrentQueues)
} }
//AddTorrentToActive adds a torrent to the active slice //AddTorrentToActive adds a torrent to the active slice
func AddTorrentToActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) { func AddTorrentToActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db) torrentQueues := Storage.FetchQueues(db)
if torrentLocalStorage.TorrentStatus == "Stopped" {
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent set as stopped, skipping add")
return
}
for _, torrentHash := range torrentQueues.ActiveTorrents { for _, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active skip if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active skip
return return
@@ -243,7 +276,8 @@ func AddTorrentToActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent
} }
} }
} }
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to Active Queue") Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to Active Queue (Manual Call)")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
Storage.UpdateQueues(db, torrentQueues) Storage.UpdateQueues(db, torrentQueues)
} }
@@ -268,20 +302,26 @@ func RemoveTorrentFromActive(torrentLocalStorage *Storage.TorrentLocal, singleTo
//DeleteTorrentFromQueues deletes the torrent from all queues (for a stop or delete action) //DeleteTorrentFromQueues deletes the torrent from all queues (for a stop or delete action)
func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) { func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db) torrentQueues := Storage.FetchQueues(db)
for x, torrentHashActive := range torrentQueues.ActiveTorrents { for x, torrentHashActive := range torrentQueues.ActiveTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.
if torrentHash == torrentHashActive { if torrentHash == torrentHashActive {
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...) torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)
Storage.UpdateQueues(db, torrentQueues) Logger.Info("Removing Torrent from Active: ", torrentHash)
} else { }
for x, torrentHashQueued := range torrentQueues.QueuedTorrents { }
for x, torrentHashQueued := range torrentQueues.QueuedTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.
if torrentHash == torrentHashQueued { if torrentHash == torrentHashQueued {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...) torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)
Logger.Info("Removing Torrent from Queued", torrentHash)
}
}
for x, torrentHashActive := range torrentQueues.ForcedTorrents { //FOR EXTRA CAUTION deleting it from all queues in case a mistake occurred.
if torrentHash == torrentHashActive {
torrentQueues.ForcedTorrents = append(torrentQueues.ForcedTorrents[:x], torrentQueues.ForcedTorrents[x+1:]...)
Logger.Info("Removing Torrent from Forced: ", torrentHash)
}
}
Storage.UpdateQueues(db, torrentQueues) Storage.UpdateQueues(db, torrentQueues)
} Logger.WithFields(logrus.Fields{"Torrent Hash": torrentHash, "TorrentQueues": torrentQueues}).Info("Removing Torrent from all Queues")
}
}
}
Logger.WithFields(logrus.Fields{"Torrent Hash": torrentHash}).Info("Removing Torrent from all Queues")
} }
//AddTorrentToQueue adds a torrent to the queue //AddTorrentToQueue adds a torrent to the queue
@@ -331,6 +371,33 @@ func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *t
} }
} }
} }
torrentQueues = Storage.FetchQueues(db)
for _, singleTorrent := range tclient.Torrents() {
singleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
if singleTorrentFromStorage.TorrentStatus == "Stopped" {
continue
}
for _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one
if singleTorrent.InfoHash().String() == queuedTorrent {
if singleTorrent.BytesMissing() > 0 {
for _, activeTorrent := range torrentQueues.ActiveTorrents {
for _, singleActiveTorrent := range tclient.Torrents() {
if activeTorrent == singleActiveTorrent.InfoHash().String() {
if singleActiveTorrent.Seeding() == true {
singleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)
Logger.WithFields(logrus.Fields{"TorrentName": singleActiveTFS.TorrentName}).Info("Seeding, Removing from active to add queued")
RemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)
singleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)
Logger.WithFields(logrus.Fields{"TorrentName": singleQueuedTFS.TorrentName}).Info("Adding torrent to the queue, not active")
AddTorrentToActive(&singleQueuedTFS, singleTorrent, db)
}
}
}
}
}
}
}
}
} }
//CalculateTorrentStatus is used to determine what the STATUS column of the frontend will display ll2 //CalculateTorrentStatus is used to determine what the STATUS column of the frontend will display ll2
@@ -339,19 +406,13 @@ func CalculateTorrentStatus(t *torrent.Torrent, c *ClientDB, config Settings.Ful
c.Status = "Stopped" c.Status = "Stopped"
return return
} }
if float64(c.TotalUploadedBytes)/float64(bytesCompleted) >= config.SeedRatioStop && tFromStorage.TorrentUploadLimit == true { //If storage shows torrent stopped or if it is over the seeding ratio AND is under the global limit //Only has 2 states in storage, stopped or running, so we know it should be running, and the websocket request handled updating the database with connections and status
StopTorrent(t, tFromStorage, db)
} else { //Only has 2 states in storage, stopped or running, so we know it should be running, and the websocket request handled updating the database with connections and status
for _, torrentHash := range torrentQueues.QueuedTorrents { for _, torrentHash := range torrentQueues.QueuedTorrents {
if tFromStorage.Hash == torrentHash { if tFromStorage.Hash == torrentHash {
c.Status = "Queued" c.Status = "Queued"
return return
} }
} }
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && tFromStorage.TorrentStatus == "Queued" {
AddTorrentToActive(tFromStorage, t, db)
}
bytesMissing := totalSize - bytesCompleted bytesMissing := totalSize - bytesCompleted
c.MaxConnections = 80 c.MaxConnections = 80
t.SetMaxEstablishedConns(80) t.SetMaxEstablishedConns(80)
@@ -367,4 +428,3 @@ func CalculateTorrentStatus(t *torrent.Torrent, c *ClientDB, config Settings.Ful
c.Status = "Unknown" c.Status = "Unknown"
} }
} }
}

30
go.mod Normal file
View File

@@ -0,0 +1,30 @@
module github.com/deranjer/goTorrent
go 1.12
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/DataDog/zstd v1.3.5 // indirect
github.com/PuerkitoBio/goquery v1.5.0 // indirect
github.com/Sereal/Sereal v0.0.0-20190226181601-237c2cca198f // indirect
github.com/anacrolix/dht v1.0.1
github.com/anacrolix/torrent v1.1.1
github.com/asdine/storm v2.1.2+incompatible
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/golang/protobuf v1.3.1 // indirect
github.com/gorilla/handlers v1.4.0
github.com/gorilla/mux v1.7.0
github.com/gorilla/websocket v1.4.0
github.com/mitsuse/pushbullet-go v0.1.0
github.com/mmcdole/gofeed v1.0.0-beta2
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf // indirect
github.com/otiai10/copy v1.0.1
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 // indirect
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967
github.com/sirupsen/logrus v1.4.0
github.com/spf13/viper v1.3.2
github.com/vmihailenco/msgpack v4.0.3+incompatible // indirect
go.etcd.io/bbolt v1.3.2 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/appengine v1.5.0 // indirect
)

230
go.sum Normal file
View File

@@ -0,0 +1,230 @@
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
bou.ke/monkey v1.0.1 h1:zEMLInw9xvNakzUUPjfS4Ds6jYPqCFx3m7bRmG5NH2U=
bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14=
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78=
github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
github.com/Sereal/Sereal v0.0.0-20190226181601-237c2cca198f h1:99C4f5FJQChWyzMSpZPU4eUv3kjFmjxyWy8t2rlbUcs=
github.com/Sereal/Sereal v0.0.0-20190226181601-237c2cca198f/go.mod h1:D0JMgToj/WdxCgd30Kc1UcA9E+WdZoJqeVOuYW7iTBM=
github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI=
github.com/anacrolix/dht v0.0.0-20181129074040-b09db78595aa/go.mod h1:Ayu4t+5TsHQ07/P8XzRJqVofv7lU4R1ZTT7KW5+SPFA=
github.com/anacrolix/dht v1.0.1 h1:a7zVMiZWfPiToAUbjMZYeI3UvmsDP3j8vH5EDIAjM9c=
github.com/anacrolix/dht v1.0.1/go.mod h1:dtcIktBFD8YD/7ZcE5nQuuGGfLxcwa8+18mHl+GU+KA=
github.com/anacrolix/dht/v2 v2.0.1 h1:gOHJ+OKqJ4Eb48OYStZm4AlWr1/nSA2TWlzb/+t36SA=
github.com/anacrolix/dht/v2 v2.0.1/go.mod h1:GbTT8BaEtfqab/LPd5tY41f3GvYeii3mmDUK300Ycyo=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa h1:xCaATLKmn39QqLs3tUZYr6eKvezJV+FYvVOLTklxK6U=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E=
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05 h1:Zoniih3jyqtr3I0xFoMvw1USWpg+CbI/zOrcLudr0lc=
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05/go.mod h1:POY/GPlrFKRxnOKH1sGAB+NBWMoP+sI+hHJxgcgWbWw=
github.com/anacrolix/log v0.0.0-20180412014343-2323884b361d/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
github.com/anacrolix/log v0.1.0/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
github.com/anacrolix/log v0.2.0 h1:LzaW6XTEk2zcmLZkcZPkJ2mDdnZkOdOTeBH7Kt81ouU=
github.com/anacrolix/log v0.2.0/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk=
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
github.com/anacrolix/missinggo v0.0.0-20181129073415-3237bf955fed/go.mod h1:IN+9GUe7OxKMIs/XeXEbT/rMUolmJzmlZiXHS7FwD/Y=
github.com/anacrolix/missinggo v0.2.1-0.20190310234110-9fbdc9f242a8/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/missinggo v1.1.0 h1:0lZbaNa6zTR1bELAIzCNmRGAtkHuLDPJqTiTtXoAIx8=
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0 h1:Fa1XqqLW62lQzEDlNA+QcdJbkfJcxQN0YC8983kj5tU=
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc=
github.com/anacrolix/sync v0.0.0-20171108081538-eee974e4f8c1/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778 h1:XpCDEixzXOB8yaTW/4YBzKrJdMcFI0DzpPTYNv75wzk=
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/tagflag v0.0.0-20180803105420-3a8ff5428f76/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb/go.mod h1:3vcFVxgOASslNXHdivT8spyMRBanMCenHRpe0u5vpBs=
github.com/anacrolix/torrent v1.0.1/go.mod h1:ZYV1Z2Wx3jXYSh26mDvneAbk8XIUxfvoVil2GW962zY=
github.com/anacrolix/torrent v1.1.1 h1:f54cvN3950x72hOB8UvzRwEbF5AY3VMj4vPyntgt24Q=
github.com/anacrolix/torrent v1.1.1/go.mod h1:XdYEuC3KuxFQZrQ6iUBXnwKr3IyxeyUlVH6RT8FhyaU=
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572 h1:kpt6TQTVi6gognY+svubHfxxpq0DLU9AfTQyZVc3UOc=
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asdine/storm v2.1.2+incompatible h1:dczuIkyqwY2LrtXPz8ixMrU/OFgZp71kbKTHGrXYt/Q=
github.com/asdine/storm v2.1.2+incompatible/go.mod h1:RarYDc9hq1UPLImuiXK3BIWPJLdIygvV3PsInK0FbVQ=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c h1:FUUopH4brHNO2kJoNN3pV+OBEYmgraLT/KHZrMM69r0=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elgatito/upnp v0.0.0-20180711183757-2f244d205f9a h1:2Zw3pxDRTs4nX1WCLAEm27UN0hvjZSge7EaUUQexRZw=
github.com/elgatito/upnp v0.0.0-20180711183757-2f244d205f9a/go.mod h1:afkYpY8JAIL4341N7Zj9xJ5yTovsg6BkWfBFlCzIoF4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4=
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e h1:SiEs4J3BKVIeaWrH3tKaz3QLZhJ68iJ/A4xrzIoE5+Y=
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9 h1:Z0f701LpR4dqO92bP6TnIe3ZURClzJtBhds8R8u1HBE=
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.4.0 h1:XulKRWSQK5uChr4pEgSE4Tc/OcmnU9GJuSwdog/tZsA=
github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8=
github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/ipfs/go-ipfs v0.4.18/go.mod h1:iXzbK+Wa6eePj3jQg/uY6Uoq5iOwY+GToD/bgaRadto=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-sqlite3 v1.7.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitsuse/pushbullet-go v0.1.0 h1:W9izHOpz8uilRBgbYSnqb+LZK/l8Ad4slRTCBFpItG0=
github.com/mitsuse/pushbullet-go v0.1.0/go.mod h1:sJ6Y3IROSfSQNLY/8gtYjq4Gs49DFnrxaqxQA6DVgnM=
github.com/mmcdole/gofeed v1.0.0-beta2 h1:CjQ0ADhAwNSb08zknAkGOEYqr8zfZKfrzgk9BxpWP2E=
github.com/mmcdole/gofeed v1.0.0-beta2/go.mod h1:/BF9JneEL2/flujm8XHoxUcghdTV6vvb3xx/vKyChFU=
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf h1:sWGE2v+hO0Nd4yFU/S/mDBM5plIU8v/Qhfz41hkDIAI=
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf/go.mod h1:pasqhqstspkosTneA62Nc+2p9SOBBYAPbnmRRWPQ0V8=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/otiai10/copy v1.0.1 h1:gtBjD8aq4nychvRZ2CyJvFWAw0aja+VHazDdruZKGZA=
github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/mint v1.2.3 h1:PsrRBmrxR68kyNu6YlqYHbNlItc5vOkuS6LBEsNttVA=
github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967 h1:x7xEyJDP7Hv3LVgvWhzioQqbC/KtuUhTigKlH/8ehhE=
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac h1:wbW+Bybf9pXxnCFAOWZTqkRjAc7rAIwo2e1ArUhiHxg=
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/vmihailenco/msgpack v4.0.3+incompatible h1:g+G529Dqo4BY2Gxn5GKENa/3NVK+mu/6hM7G3jEWszQ=
github.com/vmihailenco/msgpack v4.0.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b h1:ZWpVMTsK0ey5WJCu+vVdfMldWq7/ezaOcjnKWIHWVkE=
golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca h1:o2TLx1bGN3W+Ei0EMU5fShLupLmTOU95KvJJmfYhAzM=
golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

71
main.go
View File

@@ -33,6 +33,7 @@ var (
//Authenticated stores the value of the result of the client that connects to the server //Authenticated stores the value of the result of the client that connects to the server
Authenticated = false Authenticated = false
APP_ID = os.Getenv("APP_ID") APP_ID = os.Getenv("APP_ID")
sendJSON = make(chan interface{}) //channel for JSON messages
) )
var upgrader = websocket.Upgrader{ var upgrader = websocket.Upgrader{
@@ -48,10 +49,17 @@ func serveHome(w http.ResponseWriter, r *http.Request) {
s1.ExecuteTemplate(w, "base", map[string]string{"APP_ID": APP_ID}) s1.ExecuteTemplate(w, "base", map[string]string{"APP_ID": APP_ID})
} }
//HandleMessages creates a queue of JSON messages from the client and executes them in order
func handleMessages(conn *websocket.Conn) {
for {
msgJSON := <-sendJSON
conn.WriteJSON(msgJSON)
}
}
func handleAuthentication(conn *websocket.Conn, db *storm.DB) { func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
msg := Engine.Message{} msg := Engine.Message{}
err := conn.ReadJSON(&msg) err := conn.ReadJSON(&msg)
//conn.WriteJSON(msg) //TODO just for testing, remove
payloadData, ok := msg.Payload.(map[string]interface{}) payloadData, ok := msg.Payload.(map[string]interface{})
clientAuthToken, tokenOk := payloadData["ClientAuthString"].(string) clientAuthToken, tokenOk := payloadData["ClientAuthString"].(string)
fmt.Println("ClientAuthToken:", clientAuthToken, "TokenOkay", tokenOk, "PayloadData", payloadData, "PayloadData Okay?", ok) fmt.Println("ClientAuthToken:", clientAuthToken, "TokenOkay", tokenOk, "PayloadData", payloadData, "PayloadData Okay?", ok)
@@ -65,6 +73,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": clientAuthToken}).Error("Unable to read authentication message") Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": clientAuthToken}).Error("Unable to read authentication message")
} }
fmt.Println("Authstring", clientAuthToken) fmt.Println("Authstring", clientAuthToken)
//clientAuthToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnROYW1lIjoiZ29Ub3JyZW50V2ViVUkiLCJpc3MiOiJnb1RvcnJlbnRTZXJ2ZXIifQ.Lfqp9tm06CY4XfrqnNDeVLkq9c7rsbibDrUdPko8ffQ"
signingKeyStruct := Storage.FetchJWTTokens(db) signingKeyStruct := Storage.FetchJWTTokens(db)
singingKey := signingKeyStruct.SigningKey singingKey := signingKeyStruct.SigningKey
token, err := jwt.Parse(clientAuthToken, func(token *jwt.Token) (interface{}, error) { token, err := jwt.Parse(clientAuthToken, func(token *jwt.Token) (interface{}, error) {
@@ -77,6 +86,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Parsing of Token failed, ensure you have the correct token! Closing Connection"} authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Parsing of Token failed, ensure you have the correct token! Closing Connection"}
conn.WriteJSON(authFail) conn.WriteJSON(authFail)
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": token}).Error("Unable to parse token!") Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": token}).Error("Unable to parse token!")
fmt.Println("ENTIRE SUPPLIED TOKEN:", token, "CLIENTAUTHTOKEN", clientAuthToken)
conn.Close() conn.Close()
return return
} }
@@ -86,7 +96,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
fmt.Println("Claims", claims["ClientName"], claims["Issuer"]) fmt.Println("Claims", claims["ClientName"], claims["Issuer"])
Authenticated = true Authenticated = true
} else { } else {
Logger.WithFields(logrus.Fields{"error": err}).Error("Authentication Error occured, cannot complete!") Logger.WithFields(logrus.Fields{"error": err}).Error("Authentication Error occurred, cannot complete!")
} }
} }
@@ -204,6 +214,7 @@ func main() {
} }
Engine.CheckTorrentWatchFolder(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) //Every 5 minutes the engine will check the specified folder for new .torrent files Engine.CheckTorrentWatchFolder(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) //Every 5 minutes the engine will check the specified folder for new .torrent files
Engine.RefreshRSSCron(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) // Refresing the RSS feeds on an hourly basis to add torrents that show up in the RSS feed Engine.RefreshRSSCron(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) // Refresing the RSS feeds on an hourly basis to add torrents that show up in the RSS feed
Engine.CheckTorrentsCron(cronEngine, db, tclient, Config) //Every 30 seconds all torrents are checked to see if queue changes need to be made or they need to be stopped due to ratio
router := mux.NewRouter() //setting up the handler for the web backend router := mux.NewRouter() //setting up the handler for the web backend
router.HandleFunc("/", serveHome) //Serving the main page for our SPA router.HandleFunc("/", serveHome) //Serving the main page for our SPA
@@ -241,6 +252,8 @@ func main() {
Engine.Conn = conn Engine.Conn = conn
Storage.Conn = conn Storage.Conn = conn
go handleMessages(conn) //Starting the message channel to handle all the JSON requests from the client
MessageLoop: //Tagging this so we can continue out of it with any errors we encounter that are failing MessageLoop: //Tagging this so we can continue out of it with any errors we encounter that are failing
for { for {
runningTorrents := tclient.Torrents() //getting running torrents here since multiple cases ask for the running torrents runningTorrents := tclient.Torrents() //getting running torrents here since multiple cases ask for the running torrents
@@ -278,31 +291,30 @@ func main() {
tokensDB := Storage.FetchJWTTokens(db) tokensDB := Storage.FetchJWTTokens(db)
tokensDB.TokenNames = append(tokens.TokenNames, Storage.SingleToken{payloadData["ClientName"].(string)}) tokensDB.TokenNames = append(tokens.TokenNames, Storage.SingleToken{payloadData["ClientName"].(string)})
db.Update(&tokensDB) //adding the new token client name to the database db.Update(&tokensDB) //adding the new token client name to the database
conn.WriteJSON(tokenReturn) sendJSON <- tokenReturn
case "torrentListRequest": case "torrentListRequest": //This will run automatically if a webUI is open
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested TorrentList Update") Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested TorrentList Update")
go func() { //running updates in separate thread so can still accept commands go func() { //running updates in separate thread so can still accept commands
TorrentLocalArray = Storage.FetchAllStoredTorrents(db) //Required to re-read the database since we write to the DB and this will pull the changes from it TorrentLocalArray = Storage.FetchAllStoredTorrents(db) //Required to re-read the database since we write to the DB and this will pull the changes from it
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well
PreviousTorrentArray = RunningTorrentArray PreviousTorrentArray = RunningTorrentArray
torrentlistArray := Engine.TorrentList{MessageType: "torrentList", ClientDBstruct: RunningTorrentArray, Totaltorrents: len(RunningTorrentArray)} torrentlistArray := Engine.TorrentList{MessageType: "torrentList", ClientDBstruct: RunningTorrentArray, Totaltorrents: len(RunningTorrentArray)}
Logger.WithFields(logrus.Fields{"torrentList": torrentlistArray, "previousTorrentList": PreviousTorrentArray}).Debug("Previous and Current Torrent Lists for sending to client") Logger.WithFields(logrus.Fields{"torrentList": torrentlistArray, "previousTorrentList": PreviousTorrentArray}).Debug("Previous and Current Torrent Lists for sending to client")
conn.WriteJSON(torrentlistArray) sendJSON <- torrentlistArray
}() }()
case "torrentFileListRequest": //client requested a filelist update case "torrentFileListRequest": //client requested a filelist update
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested FileList Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested FileList Update")
fileListArrayRequest := payloadData["FileListHash"].(string) fileListArrayRequest := payloadData["FileListHash"].(string)
FileListArray := Engine.CreateFileListArray(tclient, fileListArrayRequest, db, Config) FileListArray := Engine.CreateFileListArray(tclient, fileListArrayRequest, db, Config)
conn.WriteJSON(FileListArray) //writing the JSON to the client sendJSON <- FileListArray
case "torrentPeerListRequest": case "torrentPeerListRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested PeerList Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested PeerList Update")
peerListArrayRequest := payloadData["PeerListHash"].(string) peerListArrayRequest := payloadData["PeerListHash"].(string)
torrentPeerList := Engine.CreatePeerListArray(tclient, peerListArrayRequest) torrentPeerList := Engine.CreatePeerListArray(tclient, peerListArrayRequest)
conn.WriteJSON(torrentPeerList) sendJSON <- torrentPeerList
case "fetchTorrentsByLabel": case "fetchTorrentsByLabel":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Torrents by Label") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Torrents by Label")
@@ -317,7 +329,7 @@ func main() {
} }
} }
} }
conn.WriteJSON(labelRunningArray) sendJSON <- labelRunningArray
case "changeStorageValue": case "changeStorageValue":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Storage Location Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Storage Location Update")
@@ -343,7 +355,7 @@ func main() {
case "settingsFileRequest": case "settingsFileRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Settings File") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Settings File")
clientSettingsFile := Engine.SettingsFile{MessageType: "settingsFile", Config: Config} clientSettingsFile := Engine.SettingsFile{MessageType: "settingsFile", Config: Config}
conn.WriteJSON(clientSettingsFile) sendJSON <- clientSettingsFile
case "rssFeedRequest": case "rssFeedRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested RSS Update") Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested RSS Update")
@@ -355,7 +367,7 @@ func main() {
RSSsingleFeed.RSSFeedURL = singleFeed.URL RSSsingleFeed.RSSFeedURL = singleFeed.URL
RSSJSONFeed.RSSFeeds = append(RSSJSONFeed.RSSFeeds, RSSsingleFeed) RSSJSONFeed.RSSFeeds = append(RSSJSONFeed.RSSFeeds, RSSsingleFeed)
} }
conn.WriteJSON(RSSJSONFeed) sendJSON <- RSSJSONFeed
case "addRSSFeed": case "addRSSFeed":
newRSSFeed := payloadData["RSSURL"].(string) newRSSFeed := payloadData["RSSURL"].(string)
@@ -400,7 +412,7 @@ func main() {
UpdatedRSSFeed := Engine.RefreshSingleRSSFeed(db, Storage.FetchSpecificRSSFeed(db, RSSFeedURL)) UpdatedRSSFeed := Engine.RefreshSingleRSSFeed(db, Storage.FetchSpecificRSSFeed(db, RSSFeedURL))
TorrentRSSList := Engine.SingleRSSFeedMessage{MessageType: "rssTorrentList", URL: RSSFeedURL, Name: UpdatedRSSFeed.Name, TotalTorrents: len(UpdatedRSSFeed.Torrents), Torrents: UpdatedRSSFeed.Torrents} TorrentRSSList := Engine.SingleRSSFeedMessage{MessageType: "rssTorrentList", URL: RSSFeedURL, Name: UpdatedRSSFeed.Name, TotalTorrents: len(UpdatedRSSFeed.Torrents), Torrents: UpdatedRSSFeed.Torrents}
Logger.WithFields(logrus.Fields{"TorrentRSSList": TorrentRSSList}).Info("Returning Torrent list from RSSFeed to client") Logger.WithFields(logrus.Fields{"TorrentRSSList": TorrentRSSList}).Info("Returning Torrent list from RSSFeed to client")
conn.WriteJSON(TorrentRSSList) sendJSON <- TorrentRSSList
case "magnetLinkSubmit": //if we detect a magnet link we will be adding a magnet torrent case "magnetLinkSubmit": //if we detect a magnet link we will be adding a magnet torrent
storageValue, ok := payloadData["StorageValue"].(string) storageValue, ok := payloadData["StorageValue"].(string)
@@ -510,15 +522,6 @@ func main() {
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for stopping torrents") Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for stopping torrents")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.StopTorrent(singleTorrent, &oldTorrentInfo, db) Engine.StopTorrent(singleTorrent, &oldTorrentInfo, db)
if len(torrentQueues.QueuedTorrents) > 1 {
addTorrent := torrentQueues.QueuedTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == addTorrent[0] {
Engine.AddTorrentToActive(&torrentLocalStorage, singleTorrent, db)
}
}
}
} }
} }
} }
@@ -534,18 +537,6 @@ func main() {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
torrentQueues = Storage.FetchQueues(db) torrentQueues = Storage.FetchQueues(db)
for index, activeTorrentHash := range torrentQueues.ActiveTorrents { //If torrent is in the active slice, pull it
if singleTorrent.InfoHash().String() == activeTorrentHash {
singleTorrent.SetMaxEstablishedConns(0)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:index], torrentQueues.ActiveTorrents[index+1:]...)
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //If torrent is in the queued slice, pull it
if singleTorrent.InfoHash().String() == queuedTorrentHash {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for deleting torrents") Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for deleting torrents")
if withData { if withData {
oldTorrentInfo.TorrentStatus = "DroppedData" //Will be cleaned up the next engine loop since deleting a torrent mid loop can cause issues oldTorrentInfo.TorrentStatus = "DroppedData" //Will be cleaned up the next engine loop since deleting a torrent mid loop can cause issues
@@ -567,16 +558,19 @@ func main() {
if singleTorrent.InfoHash().String() == singleSelection { if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Info("Found matching torrent to start") Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Info("Found matching torrent to start")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.AddTorrentToActive(&oldTorrentInfo, singleTorrent, db)
Logger.WithFields(logrus.Fields{"Torrent": oldTorrentInfo.TorrentName}).Info("Changing database to torrent running with 80 max connections") Logger.WithFields(logrus.Fields{"Torrent": oldTorrentInfo.TorrentName}).Info("Changing database to torrent running with 80 max connections")
oldTorrentInfo.TorrentStatus = "ForceStart"
oldTorrentInfo.MaxConnections = 80
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
Engine.AddTorrentToForceStart(&oldTorrentInfo, singleTorrent, db)
} }
torrentQueues = Storage.FetchQueues(db) torrentQueues = Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents { //Since we are starting a new torrent stop the first torrent in the que if running is full if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents { //Since we are starting a new torrent stop the last torrent in the que if running is full
//removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1] //removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
removeTorrent := torrentQueues.ActiveTorrents[:1] removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
for _, singleTorrent := range runningTorrents { for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] { if singleTorrent.InfoHash().String() == removeTorrent {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db) Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo) Storage.UpdateStorageTick(db, oldTorrentInfo)
@@ -647,14 +641,13 @@ func main() {
} }
default: default:
//conn.Close()
Logger.WithFields(logrus.Fields{"message": msg}).Info("Unrecognized Message from client... ignoring") Logger.WithFields(logrus.Fields{"message": msg}).Info("Unrecognized Message from client... ignoring")
return return
} }
} }
}) })
if Config.UseProxy { if Config.UseReverseProxy {
err := http.ListenAndServe(httpAddr, handlers.ProxyHeaders(router)) err := http.ListenAndServe(httpAddr, handlers.ProxyHeaders(router))
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server!") Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server!")

View File

@@ -42,7 +42,7 @@ func GenerateClientConfigFile(config FullClientSettings, authString string) {
` `
} }
if config.UseProxy { if config.UseReverseProxy {
clientFile = ` clientFile = `
ClientAuthString = "` + authString + `" ClientAuthString = "` + authString + `"
` + webUIAuth + ` ` + webUIAuth + `

View File

@@ -4,6 +4,7 @@ import (
"crypto/sha256" "crypto/sha256"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"golang.org/x/time/rate" "golang.org/x/time/rate"
@@ -17,12 +18,15 @@ import (
//Logger is the injected variable for global logger //Logger is the injected variable for global logger
var Logger *logrus.Logger var Logger *logrus.Logger
//ClientConnectSettings contains all the settings for connecting and authenticating to the server
type ClientConnectSettings struct { type ClientConnectSettings struct {
HTTPAddr string HTTPAddr string
HTTPAddrIP string HTTPAddrIP string
UseProxy bool UseReverseProxy bool
UseSocksProxy bool
WebsocketClientPort string WebsocketClientPort string
BaseURL string BaseURL string
SocksProxyURL string
ClientUsername string ClientUsername string
ClientPassword string ClientPassword string
PushBulletToken string `json:"-"` PushBulletToken string `json:"-"`
@@ -34,7 +38,7 @@ type FullClientSettings struct {
LoggingLevel logrus.Level LoggingLevel logrus.Level
LoggingOutput string LoggingOutput string
Version int Version int
TorrentConfig torrent.Config `json:"-"` TorrentConfig torrent.ClientConfig `json:"-"`
TFileUploadFolder string TFileUploadFolder string
SeedRatioStop float64 SeedRatioStop float64
DefaultMoveFolder string DefaultMoveFolder string
@@ -48,16 +52,16 @@ func defaultConfig() FullClientSettings {
var Config FullClientSettings var Config FullClientSettings
Config.ID = 4 //Unique ID for StormDB Config.ID = 4 //Unique ID for StormDB
Config.Version = 1.0 Config.Version = 1.0
Config.LoggingLevel = 3 //Warn level Config.LoggingLevel = logrus.WarnLevel //Warn level
Config.TorrentConfig.DataDir = "downloads" //the absolute or relative path of the default download directory for torrents Config.TorrentConfig.DataDir = "downloads" //the absolute or relative path of the default download directory for torrents
Config.TFileUploadFolder = "uploadedTorrents" Config.TFileUploadFolder = "uploadedTorrents"
Config.TorrentConfig.Seed = true Config.TorrentConfig.Seed = true
Config.HTTPAddr = ":8000" Config.HTTPAddr = ":8000"
Config.SeedRatioStop = 1.50 Config.SeedRatioStop = 1.50
Config.TorrentConfig.DHTConfig = dht.ServerConfig{ //Config.TorrentConfig.DhtStartingNodes = dht.StartingNodesGetter{
StartingNodes: dht.GlobalBootstrapAddrs, // StartingNodes: dht.GlobalBootstrapAddrs,
} //}
return Config return Config
} }
@@ -118,6 +122,7 @@ func FullClientSettingsNew() FullClientSettings {
var httpAddr string var httpAddr string
var baseURL string var baseURL string
var socksProxyURLBase string
var websocketClientPort string var websocketClientPort string
var logLevel logrus.Level var logLevel logrus.Level
//logging //logging
@@ -125,24 +130,25 @@ func FullClientSettingsNew() FullClientSettings {
logOutput := viper.GetString("serverConfig.LogOutput") logOutput := viper.GetString("serverConfig.LogOutput")
switch logLevelString { //Options = Debug 5, Info 4, Warn 3, Error 2, Fatal 1, Panic 0 switch logLevelString { //Options = Debug 5, Info 4, Warn 3, Error 2, Fatal 1, Panic 0
case "Panic": case "Panic":
logLevel = 0 logLevel = logrus.PanicLevel
case "Fatal": case "Fatal":
logLevel = 1 logLevel = logrus.FatalLevel
case "Error": case "Error":
logLevel = 2 logLevel = logrus.ErrorLevel
case "Warn": case "Warn":
logLevel = 3 logLevel = logrus.WarnLevel
case "Info": case "Info":
logLevel = 4 logLevel = logrus.InfoLevel
case "Debug": case "Debug":
logLevel = 5 logLevel = logrus.DebugLevel
default: default:
logLevel = 3 logLevel = logrus.WarnLevel
} }
//HTTP, proxy //HTTP, proxy
httpAddrIP := viper.GetString("serverConfig.ServerAddr") httpAddrIP := viper.GetString("serverConfig.ServerAddr")
httpAddrPort := viper.GetString("serverConfig.ServerPort") httpAddrPortRaw := viper.GetString("serverConfig.ServerPort")
httpAddrPort := ":" + httpAddrPortRaw //adding the separator required for running the webui
httpAddr = httpAddrIP + httpAddrPort httpAddr = httpAddrIP + httpAddrPort
proxySet := viper.GetBool("reverseProxy.ProxyEnabled") proxySet := viper.GetBool("reverseProxy.ProxyEnabled")
websocketClientPort = strings.TrimLeft(viper.GetString("serverConfig.ServerPort"), ":") //Trimming off the colon in front of the port websocketClientPort = strings.TrimLeft(viper.GetString("serverConfig.ServerPort"), ":") //Trimming off the colon in front of the port
@@ -150,6 +156,10 @@ func FullClientSettingsNew() FullClientSettings {
baseURL = viper.GetString("reverseProxy.BaseURL") baseURL = viper.GetString("reverseProxy.BaseURL")
fmt.Println("WebsocketClientPort", viper.GetString("serverConfig.ServerPort")) fmt.Println("WebsocketClientPort", viper.GetString("serverConfig.ServerPort"))
} }
socksProxySet := viper.GetBool("socksProxy.ProxyEnabled")
if socksProxySet {
socksProxyURLBase = viper.GetString("reverseProxy.BaseURL")
}
//Client Authentication //Client Authentication
clientAuthEnabled := viper.GetBool("goTorrentWebUI.WebUIAuth") clientAuthEnabled := viper.GetBool("goTorrentWebUI.WebUIAuth")
var webUIUser string var webUIUser string
@@ -201,13 +211,18 @@ func FullClientSettingsNew() FullClientSettings {
disableIPv6 := viper.GetBool("torrentClientConfig.DisableIPv6") disableIPv6 := viper.GetBool("torrentClientConfig.DisableIPv6")
debug := viper.GetBool("torrentClientConfig.Debug") debug := viper.GetBool("torrentClientConfig.Debug")
dhtServerConfig := dht.ServerConfig{ //dhtServerConfig := dht.StartingNodesGetter()
StartingNodes: dht.GlobalBootstrapAddrs,
} //if viper.IsSet("DHTConfig") {
if viper.IsSet("DHTConfig") { // fmt.Println("Reading in custom DHT config")
fmt.Println("Reading in custom DHT config") // dhtServerConfig = dhtServerSettings(dhtServerConfig)
dhtServerConfig = dhtServerSettings(dhtServerConfig) //}
strippedDHTPort := strings.TrimPrefix(listenAddr, ":")
DHTPortInt64, err := strconv.ParseInt(strippedDHTPort, 10, 0)
if err != nil {
fmt.Println("Failed creating 64-bit integer for goTorrent Port!", err)
} }
DHTPortInt := int(DHTPortInt64) //converting to integer
encryptionPolicy := torrent.EncryptionPolicy{ encryptionPolicy := torrent.EncryptionPolicy{
DisableEncryption: viper.GetBool("EncryptionPolicy.DisableEncryption"), DisableEncryption: viper.GetBool("EncryptionPolicy.DisableEncryption"),
@@ -215,23 +230,22 @@ func FullClientSettingsNew() FullClientSettings {
PreferNoEncryption: viper.GetBool("EncryptionPolicy.PreferNoEncryption"), PreferNoEncryption: viper.GetBool("EncryptionPolicy.PreferNoEncryption"),
} }
tConfig := torrent.Config{ tConfig := torrent.NewDefaultClientConfig()
DataDir: dataDirAbs,
ListenAddr: listenAddr, tConfig.DataDir = dataDirAbs
DisablePEX: disablePex, tConfig.ListenPort = DHTPortInt
NoDHT: noDHT, tConfig.DisablePEX = disablePex
DHTConfig: dhtServerConfig, tConfig.NoDHT = noDHT
NoUpload: noUpload, tConfig.NoUpload = noUpload
Seed: seed, tConfig.Seed = seed
UploadRateLimiter: uploadRateLimiter, tConfig.UploadRateLimiter = uploadRateLimiter
DownloadRateLimiter: downloadRateLimiter, tConfig.DownloadRateLimiter = downloadRateLimiter
PeerID: peerID, tConfig.PeerID = peerID
DisableUTP: disableUTP, tConfig.DisableUTP = disableUTP
DisableTCP: disableTCP, tConfig.DisableTCP = disableTCP
DisableIPv6: disableIPv6, tConfig.DisableIPv6 = disableIPv6
Debug: debug, tConfig.Debug = debug
EncryptionPolicy: encryptionPolicy, tConfig.EncryptionPolicy = encryptionPolicy
}
Config := FullClientSettings{ Config := FullClientSettings{
LoggingLevel: logLevel, LoggingLevel: logLevel,
@@ -240,15 +254,17 @@ func FullClientSettingsNew() FullClientSettings {
ClientConnectSettings: ClientConnectSettings{ ClientConnectSettings: ClientConnectSettings{
HTTPAddr: httpAddr, HTTPAddr: httpAddr,
HTTPAddrIP: httpAddrIP, HTTPAddrIP: httpAddrIP,
UseProxy: proxySet, UseReverseProxy: proxySet,
UseSocksProxy: socksProxySet,
WebsocketClientPort: websocketClientPort, WebsocketClientPort: websocketClientPort,
ClientUsername: webUIUser, ClientUsername: webUIUser,
ClientPassword: webUIPasswordHash, ClientPassword: webUIPasswordHash,
BaseURL: baseURL, BaseURL: baseURL,
SocksProxyURL: socksProxyURLBase,
PushBulletToken: pushBulletToken, PushBulletToken: pushBulletToken,
}, },
TFileUploadFolder: "uploadedTorrents", TFileUploadFolder: "uploadedTorrents",
TorrentConfig: tConfig, TorrentConfig: *tConfig,
DefaultMoveFolder: defaultMoveFolderAbs, DefaultMoveFolder: defaultMoveFolderAbs,
TorrentWatchFolder: torrentWatchFolderAbs, TorrentWatchFolder: torrentWatchFolderAbs,
MaxActiveTorrents: maxActiveTorrents, MaxActiveTorrents: maxActiveTorrents,

View File

@@ -1,6 +1,7 @@
package storage package storage
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
@@ -21,6 +22,7 @@ type TorrentQueues struct {
ID int `storm:"id,unique"` //storm requires unique ID (will be 5) ID int `storm:"id,unique"` //storm requires unique ID (will be 5)
ActiveTorrents []string ActiveTorrents []string
QueuedTorrents []string QueuedTorrents []string
ForcedTorrents []string
} }
//IssuedTokensList contains a slice of all the tokens issues to applications //IssuedTokensList contains a slice of all the tokens issues to applications
@@ -76,20 +78,20 @@ type TorrentLocal struct {
DateAdded string DateAdded string
StoragePath string //The absolute value of the path where the torrent will be moved when completed StoragePath string //The absolute value of the path where the torrent will be moved when completed
TempStoragePath string //The absolute path of where the torrent is temporarily stored as it is downloaded TempStoragePath string //The absolute path of where the torrent is temporarily stored as it is downloaded
TorrentMoved bool TorrentMoved bool //If completed has the torrent been moved to the end location
TorrentName string TorrentName string
TorrentStatus string TorrentStatus string //"Stopped", "Running", "ForceStart"
TorrentUploadLimit bool //if true this torrent will bypass the upload storage limit (effectively unlimited) TorrentUploadLimit bool //if true this torrent will bypass the upload storage limit (effectively unlimited)
MaxConnections int MaxConnections int //Max connections that the torrent can have to it at one time
TorrentType string //magnet or .torrent file TorrentType string //magnet or .torrent file
TorrentFileName string //Should be just the name of the torrent TorrentFileName string //Should be just the name of the torrent
TorrentFile []byte TorrentFile []byte //If torrent was from .torrent file, store the entire file for re-adding on restart
Label string Label string //User enterable label to sort torrents by
UploadedBytes int64 UploadedBytes int64 //Total amount the client has uploaded on this torrent
DownloadedBytes int64 DownloadedBytes int64 //Total amount the client has downloaded on this torrent
TorrentSize int64 //If we cancel a file change the download size since we won't be downloading that file TorrentSize int64 //If we cancel a file change the download size since we won't be downloading that file
UploadRatio string UploadRatio string
TorrentFilePriority []TorrentFilePriority TorrentFilePriority []TorrentFilePriority //Slice of all the files the torrent contains and the priority of each file
} }
//SaveConfig saves the config to the database to compare for changes to settings.toml on restart //SaveConfig saves the config to the database to compare for changes to settings.toml on restart
@@ -146,11 +148,11 @@ func FetchAllStoredTorrents(torrentStorage *storm.DB) (torrentLocalArray []*Torr
//AddTorrentLocalStorage is called when adding a new torrent via any method, requires the boltdb pointer and the torrentlocal struct //AddTorrentLocalStorage is called when adding a new torrent via any method, requires the boltdb pointer and the torrentlocal struct
func AddTorrentLocalStorage(torrentStorage *storm.DB, local TorrentLocal) { func AddTorrentLocalStorage(torrentStorage *storm.DB, local TorrentLocal) {
Logger.WithFields(logrus.Fields{"Storage Path": local.StoragePath, "Torrent": local.TorrentName, "File(if file)": local.TorrentFileName}).Info("Adding new Torrent to database") Logger.WithFields(logrus.Fields{"Storage Path": local.StoragePath, "Torrent": local.TorrentName, "File(if file)": local.TorrentFileName}).Info("Adding new Torrent to database")
fmt.Println("ENTIRE TORRENT", local)
err := torrentStorage.Save(&local) err := torrentStorage.Save(&local)
if err != nil { if err != nil {
Logger.WithFields(logrus.Fields{"database": torrentStorage, "error": err}).Error("Error adding new Torrent to database!") Logger.WithFields(logrus.Fields{"database": torrentStorage, "error": err}).Error("Error adding new Torrent to database!")
} }
} }
//DelTorrentLocalStorage is called to delete a torrent when we fail (for whatever reason to load the information for it). Deleted by HASH matching. //DelTorrentLocalStorage is called to delete a torrent when we fail (for whatever reason to load the information for it). Deleted by HASH matching.