5 Commits
0.6.0 ... 0.7.0

8 changed files with 275 additions and 174 deletions

View File

@@ -35,6 +35,13 @@
#URL is CASE SENSITIVE
BaseURL = "domain.com/subroute/" # MUST be in the format (if you have a subdomain, and must have trailing slash) "yoursubdomain.domain.org/subroute/"
[socksProxy]
SocksProxyEnabled = false #bool, either false or true
# Sets usage of Socks5 Proxy. Authentication should be included in the url if needed.
# Examples: socks5://demo:demo@192.168.99.100:1080
# http://proxy.domain.com:3128
SocksProxyURL = ""
[EncryptionPolicy]
DisableEncryption = false

View File

@@ -57,6 +57,53 @@ func CheckTorrentWatchFolder(c *cron.Cron, db *storm.DB, tclient *torrent.Client
})
}
//CheckTorrents runs a upload ratio check, a queue check (essentially anything that should not be frontend dependent)
func CheckTorrentsCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, config Settings.FullClientSettings) {
c.AddFunc("@every 30s", func() {
Logger.Debug("Running a torrent Ratio and Queue Check")
torrentLocalArray := Storage.FetchAllStoredTorrents(db)
torrentQueues := Storage.FetchQueues(db)
for _, singleTorrentFromStorage := range torrentLocalArray {
//torrentQueues := Storage.FetchQueues(db)
var singleTorrent *torrent.Torrent
for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent
if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() {
singleTorrent = liveTorrent
}
}
//var TempHash metainfo.Hash
//calculatedTotalSize := CalculateDownloadSize(singleTorrentFromStorage, singleTorrent)
calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
//TempHash = singleTorrent.InfoHash()
bytesCompleted := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
if float64(singleTorrentFromStorage.UploadedBytes)/float64(bytesCompleted) >= config.SeedRatioStop && singleTorrentFromStorage.TorrentUploadLimit == true { //If storage shows torrent stopped or if it is over the seeding ratio AND is under the global limit
StopTorrent(singleTorrent, singleTorrentFromStorage, db)
}
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus == "Queued" {
Logger.WithFields(logrus.Fields{"Action: Adding Torrent to Active Queue": singleTorrentFromStorage.TorrentName}).Info()
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
}
if (calculatedCompletedSize == singleTorrentFromStorage.TorrentSize) && (singleTorrentFromStorage.TorrentMoved == false) { //if we are done downloading and haven't moved torrent yet
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName}).Info("Torrent Completed, moving...")
tStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) //Todo... find a better way to do this in the go-routine currently just to make sure it doesn't trigger multiple times
tStorage.TorrentMoved = true
Storage.UpdateStorageTick(db, tStorage)
go func() { //moving torrent in separate go-routine then verifying that the data is still there and correct
err := MoveAndLeaveSymlink(config, singleTorrent.InfoHash().String(), db, false, "") //can take some time to move file so running this in another thread TODO make this a goroutine and skip this block if the routine is still running
if err != nil { //If we fail, print the error and attempt a retry
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName, "error": err}).Error("Failed to move Torrent!")
VerifyData(singleTorrent)
tStorage.TorrentMoved = false
Storage.UpdateStorageTick(db, tStorage)
}
}()
}
}
ValidateQueues(db, config, tclient) //Ensure we don't have too many in activeQueue
})
}
//RefreshRSSCron refreshes all of the RSS feeds on an hourly basis
func RefreshRSSCron(c *cron.Cron, db *storm.DB, tclient *torrent.Client, torrentLocalStorage Storage.TorrentLocal, config Settings.FullClientSettings, torrentQueues Storage.TorrentQueues) {
c.AddFunc("@hourly", func() {

View File

@@ -32,10 +32,6 @@ func CreateServerPushMessage(message ServerPushMessage, conn *websocket.Conn) {
conn.WriteJSON(message)
}
func QueueJSONMessage(conn *websocket.Conn){
}
//RefreshSingleRSSFeed refreshing a single RSS feed to send to the client (so no updating database) mainly by updating the torrent list to display any changes
func RefreshSingleRSSFeed(db *storm.DB, RSSFeed Storage.SingleRSSFeed) Storage.SingleRSSFeed { //Todo.. duplicate as cron job... any way to merge these to reduce duplication?
singleRSSFeed := Storage.SingleRSSFeed{URL: RSSFeed.URL, Name: RSSFeed.Name}
@@ -141,6 +137,7 @@ func AddTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.Torr
}
var TempHash metainfo.Hash
TempHash = clientTorrent.InfoHash()
fmt.Println("GOT INFOHASH", TempHash.String())
allStoredTorrents := Storage.FetchAllStoredTorrents(db)
for _, runningTorrentHashes := range allStoredTorrents {
if runningTorrentHashes.Hash == TempHash.String() {
@@ -188,7 +185,6 @@ func AddTorrent(clientTorrent *torrent.Torrent, torrentLocalStorage Storage.Torr
//CreateInitialTorrentArray adds all the torrents on program start from the database
func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Storage.TorrentLocal, db *storm.DB, config Settings.FullClientSettings) {
for _, singleTorrentFromStorage := range TorrentLocalArray {
var singleTorrent *torrent.Torrent
var err error
if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder
@@ -202,7 +198,6 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
if err != nil {
continue
}
}
if len(singleTorrentFromStorage.InfoBytes) == 0 { //TODO.. kind of a fringe scenario.. not sure if needed since the db should always have the infobytes
timeOut := timeOutInfo(singleTorrent, 45)
@@ -218,13 +213,20 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
Logger.WithFields(logrus.Fields{"torrentFile": singleTorrent.Name(), "error": err}).Error("Unable to add infobytes to the torrent!")
}
torrentQueues := Storage.FetchQueues(db)
if singleTorrentFromStorage.TorrentStatus == "Stopped" {
singleTorrent.SetMaxEstablishedConns(0)
continue
}
if singleTorrentFromStorage.TorrentStatus == "ForceStart" {
AddTorrentToForceStart(singleTorrentFromStorage, singleTorrent, db)
}
if len(torrentQueues.ActiveTorrents) == 0 && len(torrentQueues.QueuedTorrents) == 0 { // If empty, run through all the torrents and assign them
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus != "Stopped" {
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents {
if singleTorrentFromStorage.TorrentStatus == "Completed" || singleTorrentFromStorage.TorrentStatus == "Seeding" {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Completed Torrents have lower priority, adding to Queued")
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
} else {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue")
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue (Initial Torrent Load)")
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
}
} else {
@@ -235,7 +237,8 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
if singleTorrentFromStorage.TorrentStatus == "Queued" {
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
} else {
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents && singleTorrentFromStorage.TorrentStatus != "Stopped" {
if len(torrentQueues.ActiveTorrents) < Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Torrent Name": singleTorrentFromStorage.TorrentName}).Info("Adding Torrent to Active Queue (Initial Torrent Load Second)")
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
} else {
AddTorrentToQueue(singleTorrentFromStorage, singleTorrent, db)
@@ -247,7 +250,7 @@ func CreateInitialTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
}
torrentQueues := Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && len(torrentQueues.QueuedTorrents) > 0 { //after all the torrents are added, see if out active torrent list isn't full, then add from the queue
Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "Current : ": torrentQueues.ActiveTorrents}).Debug("Adding Torrents from queue to active to fill...")
Logger.WithFields(logrus.Fields{"Max Active: ": config.MaxActiveTorrents, "Current : ": torrentQueues.ActiveTorrents}).Info("Adding Torrents from queue to active to fill...")
maxCanSend := config.MaxActiveTorrents - len(torrentQueues.ActiveTorrents)
if maxCanSend > len(torrentQueues.QueuedTorrents) {
maxCanSend = len(torrentQueues.QueuedTorrents)
@@ -274,7 +277,7 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
for _, singleTorrentFromStorage := range TorrentLocalArray {
torrentQueues := Storage.FetchQueues(db)
var singleTorrent *torrent.Torrent
var TempHash metainfo.Hash
for _, liveTorrent := range tclient.Torrents() { //matching the torrent from storage to the live torrent
if singleTorrentFromStorage.Hash == liveTorrent.InfoHash().String() {
singleTorrent = liveTorrent
@@ -285,40 +288,26 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
//Handling deleted torrents here
if singleTorrentFromStorage.TorrentStatus == "Dropped" {
Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting just the torrent")
DeleteTorrentFromQueues(singleTorrentFromStorage.Hash, db)
singleTorrent.Drop()
Storage.DelTorrentLocalStorage(db, singleTorrentFromStorage.Hash)
}
if singleTorrentFromStorage.TorrentStatus == "DroppedData" {
Logger.WithFields(logrus.Fields{"selection": singleTorrentFromStorage.TorrentName}).Info("Deleting torrent and data")
singleTorrent.Drop()
DeleteTorrentFromQueues(singleTorrentFromStorage.Hash, db)
Storage.DelTorrentLocalStorageAndFiles(db, singleTorrentFromStorage.Hash, Config.TorrentConfig.DataDir)
}
if singleTorrentFromStorage.TorrentType == "file" { //if it is a file pull it from the uploaded torrent folder
fullClientDB.SourceType = "Torrent File"
} else {
fullClientDB.SourceType = "Magnet Link"
}
var TempHash metainfo.Hash
TempHash = singleTorrent.InfoHash()
calculatedTotalSize := CalculateDownloadSize(singleTorrentFromStorage, singleTorrent)
calculatedCompletedSize := CalculateCompletedSize(singleTorrentFromStorage, singleTorrent)
TempHash = singleTorrent.InfoHash()
if (calculatedCompletedSize == singleTorrentFromStorage.TorrentSize) && (singleTorrentFromStorage.TorrentMoved == false) { //if we are done downloading and haven't moved torrent yet
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName}).Info("Torrent Completed, moving...")
tStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String()) //Todo... find a better way to do this in the go-routine currently just to make sure it doesn't trigger multiple times
tStorage.TorrentMoved = true
Storage.UpdateStorageTick(db, tStorage)
go func() { //moving torrent in separate go-routine then verifying that the data is still there and correct
err := MoveAndLeaveSymlink(config, singleTorrent.InfoHash().String(), db, false, "") //can take some time to move file so running this in another thread TODO make this a goroutine and skip this block if the routine is still running
if err != nil { //If we fail, print the error and attempt a retry
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrentFromStorage.TorrentName, "error": err}).Error("Failed to move Torrent!")
VerifyData(singleTorrent)
tStorage.TorrentMoved = false
Storage.UpdateStorageTick(db, tStorage)
}
}()
}
fullStruct := singleTorrent.Stats()
activePeersString := strconv.Itoa(fullStruct.ActivePeers) //converting to strings
totalPeersString := fmt.Sprintf("%v", fullStruct.TotalPeers)
@@ -332,8 +321,8 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
PercentDone := fmt.Sprintf("%.2f", float32(calculatedCompletedSize)/float32(calculatedTotalSize))
fullClientDB.TorrentHash = TempHash
fullClientDB.PercentDone = PercentDone
fullClientDB.DataBytesRead = fullStruct.ConnStats.BytesReadData //used for calculations not passed to client calculating up/down speed
fullClientDB.DataBytesWritten = fullStruct.ConnStats.BytesWrittenData //used for calculations not passed to client calculating up/down speed
fullClientDB.DataBytesRead = fullStruct.ConnStats.BytesReadData.Int64() //used for calculations not passed to client calculating up/down speed
fullClientDB.DataBytesWritten = fullStruct.ConnStats.BytesWrittenData.Int64() //used for calculations not passed to client calculating up/down speed
fullClientDB.ActivePeers = activePeersString + " / (" + totalPeersString + ")"
fullClientDB.TorrentHashString = TempHash.String()
fullClientDB.TorrentName = singleTorrentFromStorage.TorrentName
@@ -347,25 +336,12 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
TempHash := singleTorrent.InfoHash()
if previousElement.TorrentHashString == TempHash.String() { //matching previous to new
CalculateTorrentSpeed(singleTorrent, fullClientDB, previousElement, calculatedCompletedSize)
fullClientDB.TotalUploadedBytes = singleTorrentFromStorage.UploadedBytes + (fullStruct.ConnStats.BytesWrittenData - previousElement.DataBytesWritten)
fullClientDB.TotalUploadedBytes = singleTorrentFromStorage.UploadedBytes + (fullStruct.ConnStats.BytesWrittenData.Int64() - previousElement.DataBytesWritten)
}
}
}
CalculateTorrentETA(singleTorrentFromStorage.TorrentSize, calculatedCompletedSize, fullClientDB) //needs to be here since we need the speed calculated before we can estimate the eta.
if (len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents) && (len(torrentQueues.QueuedTorrents) > 0) { //If there is room for another torrent in active torrents, add it.
var newTorrentHash string
for _, torrentHash := range torrentQueues.QueuedTorrents {
if singleTorrentFromStorage.TorrentStatus != "Stopped" {
newTorrentHash = torrentHash
}
}
for _, torrent := range tclient.Torrents() {
if newTorrentHash == torrent.InfoHash().String() {
AddTorrentToActive(singleTorrentFromStorage, singleTorrent, db)
}
}
}
fullClientDB.TotalUploadedSize = HumanizeBytes(float32(fullClientDB.TotalUploadedBytes))
fullClientDB.UploadRatio = CalculateUploadRatio(singleTorrent, fullClientDB) //calculate the upload ratio
@@ -380,7 +356,6 @@ func CreateRunningTorrentArray(tclient *torrent.Client, TorrentLocalArray []*Sto
RunningTorrentArray = append(RunningTorrentArray, *fullClientDB)
}
ValidateQueues(db, config, tclient) //Ensure we don't have too many in activeQueue
return RunningTorrentArray
}

View File

@@ -109,7 +109,7 @@ func CalculateTorrentSpeed(t *torrent.Torrent, c *ClientDB, oc ClientDB, complet
dt := float32(now.Sub(oc.UpdatedAt)) // get the delta time length between now and last updated
db := float32(bytes - oc.BytesCompleted) //getting the delta bytes
rate := db * (float32(time.Second) / dt) // converting into seconds
dbU := float32(bytesUpload - oc.DataBytesWritten)
dbU := float32(bytesUpload.Int64() - oc.DataBytesWritten)
rateUpload := dbU * (float32(time.Second) / dt)
if rate >= 0 {
rateMB := rate / 1024 / 1024 //creating MB to calculate ETA
@@ -186,7 +186,6 @@ func CalculateUploadRatio(t *torrent.Torrent, c *ClientDB) string {
//StopTorrent stops the torrent, updates the database and sends a message. Since stoptorrent is called by each loop (individually) no need to call an array
func StopTorrent(singleTorrent *torrent.Torrent, torrentLocalStorage *Storage.TorrentLocal, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
if torrentLocalStorage.TorrentStatus == "Stopped" { //if we are already stopped
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Already Stopped, returning...")
return
@@ -194,24 +193,58 @@ func StopTorrent(singleTorrent *torrent.Torrent, torrentLocalStorage *Storage.To
torrentLocalStorage.TorrentStatus = "Stopped"
torrentLocalStorage.MaxConnections = 0
singleTorrent.SetMaxEstablishedConns(0)
for _, torrentHash := range torrentQueues.ActiveTorrents { //pulling it out of activetorrents
if torrentHash == singleTorrent.InfoHash().String() {
DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db)
}
}
for _, torrentHash := range torrentQueues.QueuedTorrents { //pulling it out of queuedTorrent
if torrentHash == singleTorrent.InfoHash().String() {
DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db)
}
}
DeleteTorrentFromQueues(singleTorrent.InfoHash().String(), db)
Storage.UpdateStorageTick(db, *torrentLocalStorage)
CreateServerPushMessage(ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "success", Payload: "Torrent Stopped!"}, Conn)
return
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent Stopped Success!")
}
//AddTorrentToForceStart forces torrent to be high priority on start
func AddTorrentToForceStart(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for index, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active remove from active
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:index], torrentQueues.ActiveTorrents[index+1:]...)
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //Removing from the queued torrents if in queued torrents
if queuedTorrentHash == singleTorrent.InfoHash().String() {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
singleTorrent.NewReader()
singleTorrent.SetMaxEstablishedConns(80)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents, singleTorrent.InfoHash().String())
torrentLocalStorage.TorrentStatus = "ForceStart"
torrentLocalStorage.MaxConnections = 80
for _, file := range singleTorrent.Files() {
for _, sentFile := range torrentLocalStorage.TorrentFilePriority {
if file.DisplayPath() == sentFile.TorrentFilePath {
switch sentFile.TorrentFilePriority {
case "High":
file.SetPriority(torrent.PiecePriorityHigh)
case "Normal":
file.SetPriority(torrent.PiecePriorityNormal)
case "Cancel":
file.SetPriority(torrent.PiecePriorityNone)
default:
file.SetPriority(torrent.PiecePriorityNormal)
}
}
}
}
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to ForceStart Queue")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
Storage.UpdateQueues(db, torrentQueues)
}
//AddTorrentToActive adds a torrent to the active slice
func AddTorrentToActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent *torrent.Torrent, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
if torrentLocalStorage.TorrentStatus == "Stopped" {
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Torrent set as stopped, skipping add")
return
}
for _, torrentHash := range torrentQueues.ActiveTorrents {
if torrentHash == singleTorrent.InfoHash().String() { //If torrent already in active skip
return
@@ -243,7 +276,8 @@ func AddTorrentToActive(torrentLocalStorage *Storage.TorrentLocal, singleTorrent
}
}
}
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to Active Queue")
Logger.WithFields(logrus.Fields{"Torrent Name": torrentLocalStorage.TorrentName}).Info("Adding Torrent to Active Queue (Manual Call)")
Storage.UpdateStorageTick(db, *torrentLocalStorage)
Storage.UpdateQueues(db, torrentQueues)
}
@@ -268,20 +302,26 @@ func RemoveTorrentFromActive(torrentLocalStorage *Storage.TorrentLocal, singleTo
//DeleteTorrentFromQueues deletes the torrent from all queues (for a stop or delete action)
func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) {
torrentQueues := Storage.FetchQueues(db)
for x, torrentHashActive := range torrentQueues.ActiveTorrents {
for x, torrentHashActive := range torrentQueues.ActiveTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.
if torrentHash == torrentHashActive {
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)
Storage.UpdateQueues(db, torrentQueues)
} else {
for x, torrentHashQueued := range torrentQueues.QueuedTorrents {
if torrentHash == torrentHashQueued {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)
Storage.UpdateQueues(db, torrentQueues)
}
}
Logger.Info("Removing Torrent from Active: ", torrentHash)
}
}
Logger.WithFields(logrus.Fields{"Torrent Hash": torrentHash}).Info("Removing Torrent from all Queues")
for x, torrentHashQueued := range torrentQueues.QueuedTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.
if torrentHash == torrentHashQueued {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)
Logger.Info("Removing Torrent from Queued", torrentHash)
}
}
for x, torrentHashActive := range torrentQueues.ForcedTorrents { //FOR EXTRA CAUTION deleting it from all queues in case a mistake occurred.
if torrentHash == torrentHashActive {
torrentQueues.ForcedTorrents = append(torrentQueues.ForcedTorrents[:x], torrentQueues.ForcedTorrents[x+1:]...)
Logger.Info("Removing Torrent from Forced: ", torrentHash)
}
}
Storage.UpdateQueues(db, torrentQueues)
Logger.WithFields(logrus.Fields{"Torrent Hash": torrentHash, "TorrentQueues": torrentQueues}).Info("Removing Torrent from all Queues")
}
//AddTorrentToQueue adds a torrent to the queue
@@ -331,6 +371,33 @@ func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *t
}
}
}
torrentQueues = Storage.FetchQueues(db)
for _, singleTorrent := range tclient.Torrents() {
singleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
if singleTorrentFromStorage.TorrentStatus == "Stopped" {
continue
}
for _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one
if singleTorrent.InfoHash().String() == queuedTorrent {
if singleTorrent.BytesMissing() > 0 {
for _, activeTorrent := range torrentQueues.ActiveTorrents {
for _, singleActiveTorrent := range tclient.Torrents() {
if activeTorrent == singleActiveTorrent.InfoHash().String() {
if singleActiveTorrent.Seeding() == true {
singleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)
Logger.WithFields(logrus.Fields{"TorrentName": singleActiveTFS.TorrentName}).Info("Seeding, Removing from active to add queued")
RemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)
singleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)
Logger.WithFields(logrus.Fields{"TorrentName": singleQueuedTFS.TorrentName}).Info("Adding torrent to the queue, not active")
AddTorrentToActive(&singleQueuedTFS, singleTorrent, db)
}
}
}
}
}
}
}
}
}
//CalculateTorrentStatus is used to determine what the STATUS column of the frontend will display ll2
@@ -339,32 +406,25 @@ func CalculateTorrentStatus(t *torrent.Torrent, c *ClientDB, config Settings.Ful
c.Status = "Stopped"
return
}
if float64(c.TotalUploadedBytes)/float64(bytesCompleted) >= config.SeedRatioStop && tFromStorage.TorrentUploadLimit == true { //If storage shows torrent stopped or if it is over the seeding ratio AND is under the global limit
StopTorrent(t, tFromStorage, db)
} else { //Only has 2 states in storage, stopped or running, so we know it should be running, and the websocket request handled updating the database with connections and status
for _, torrentHash := range torrentQueues.QueuedTorrents {
if tFromStorage.Hash == torrentHash {
c.Status = "Queued"
return
}
}
if len(torrentQueues.ActiveTorrents) < config.MaxActiveTorrents && tFromStorage.TorrentStatus == "Queued" {
AddTorrentToActive(tFromStorage, t, db)
}
bytesMissing := totalSize - bytesCompleted
c.MaxConnections = 80
t.SetMaxEstablishedConns(80)
if t.Seeding() && t.Stats().ActivePeers > 0 && bytesMissing == 0 {
c.Status = "Seeding"
} else if t.Stats().ActivePeers > 0 && bytesMissing > 0 {
c.Status = "Downloading"
} else if t.Stats().ActivePeers == 0 && bytesMissing == 0 {
c.Status = "Completed"
} else if t.Stats().ActivePeers == 0 && bytesMissing > 0 {
c.Status = "Awaiting Peers"
} else {
c.Status = "Unknown"
//Only has 2 states in storage, stopped or running, so we know it should be running, and the websocket request handled updating the database with connections and status
for _, torrentHash := range torrentQueues.QueuedTorrents {
if tFromStorage.Hash == torrentHash {
c.Status = "Queued"
return
}
}
bytesMissing := totalSize - bytesCompleted
c.MaxConnections = 80
t.SetMaxEstablishedConns(80)
if t.Seeding() && t.Stats().ActivePeers > 0 && bytesMissing == 0 {
c.Status = "Seeding"
} else if t.Stats().ActivePeers > 0 && bytesMissing > 0 {
c.Status = "Downloading"
} else if t.Stats().ActivePeers == 0 && bytesMissing == 0 {
c.Status = "Completed"
} else if t.Stats().ActivePeers == 0 && bytesMissing > 0 {
c.Status = "Awaiting Peers"
} else {
c.Status = "Unknown"
}
}

68
main.go
View File

@@ -33,6 +33,7 @@ var (
//Authenticated stores the value of the result of the client that connects to the server
Authenticated = false
APP_ID = os.Getenv("APP_ID")
sendJSON = make(chan interface{})
)
var upgrader = websocket.Upgrader{
@@ -48,6 +49,14 @@ func serveHome(w http.ResponseWriter, r *http.Request) {
s1.ExecuteTemplate(w, "base", map[string]string{"APP_ID": APP_ID})
}
//HandleMessages creates a queue of JSON messages from the client and executes them in order
func handleMessages(conn *websocket.Conn) {
for {
msgJSON := <-sendJSON
conn.WriteJSON(msgJSON)
}
}
func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
msg := Engine.Message{}
err := conn.ReadJSON(&msg)
@@ -65,6 +74,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": clientAuthToken}).Error("Unable to read authentication message")
}
fmt.Println("Authstring", clientAuthToken)
//clientAuthToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnROYW1lIjoiZ29Ub3JyZW50V2ViVUkiLCJpc3MiOiJnb1RvcnJlbnRTZXJ2ZXIifQ.Lfqp9tm06CY4XfrqnNDeVLkq9c7rsbibDrUdPko8ffQ"
signingKeyStruct := Storage.FetchJWTTokens(db)
singingKey := signingKeyStruct.SigningKey
token, err := jwt.Parse(clientAuthToken, func(token *jwt.Token) (interface{}, error) {
@@ -77,6 +87,7 @@ func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Parsing of Token failed, ensure you have the correct token! Closing Connection"}
conn.WriteJSON(authFail)
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": token}).Error("Unable to parse token!")
fmt.Println("ENTIRE SUPPLIED TOKEN:", token, "CLIENTAUTHTOKEN", clientAuthToken)
conn.Close()
return
}
@@ -204,6 +215,7 @@ func main() {
}
Engine.CheckTorrentWatchFolder(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) //Every 5 minutes the engine will check the specified folder for new .torrent files
Engine.RefreshRSSCron(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) // Refresing the RSS feeds on an hourly basis to add torrents that show up in the RSS feed
Engine.CheckTorrentsCron(cronEngine, db, tclient, Config)
router := mux.NewRouter() //setting up the handler for the web backend
router.HandleFunc("/", serveHome) //Serving the main page for our SPA
@@ -241,6 +253,8 @@ func main() {
Engine.Conn = conn
Storage.Conn = conn
go handleMessages(conn) //Starting the message channel to handle all the JSON requests from the client
MessageLoop: //Tagging this so we can continue out of it with any errors we encounter that are failing
for {
runningTorrents := tclient.Torrents() //getting running torrents here since multiple cases ask for the running torrents
@@ -278,31 +292,30 @@ func main() {
tokensDB := Storage.FetchJWTTokens(db)
tokensDB.TokenNames = append(tokens.TokenNames, Storage.SingleToken{payloadData["ClientName"].(string)})
db.Update(&tokensDB) //adding the new token client name to the database
conn.WriteJSON(tokenReturn)
sendJSON <- tokenReturn
case "torrentListRequest":
case "torrentListRequest": //This will run automatically if a webUI is open
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested TorrentList Update")
go func() { //running updates in separate thread so can still accept commands
TorrentLocalArray = Storage.FetchAllStoredTorrents(db) //Required to re-read the database since we write to the DB and this will pull the changes from it
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well
PreviousTorrentArray = RunningTorrentArray
torrentlistArray := Engine.TorrentList{MessageType: "torrentList", ClientDBstruct: RunningTorrentArray, Totaltorrents: len(RunningTorrentArray)}
Logger.WithFields(logrus.Fields{"torrentList": torrentlistArray, "previousTorrentList": PreviousTorrentArray}).Debug("Previous and Current Torrent Lists for sending to client")
conn.WriteJSON(torrentlistArray)
sendJSON <- torrentlistArray
}()
case "torrentFileListRequest": //client requested a filelist update
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested FileList Update")
fileListArrayRequest := payloadData["FileListHash"].(string)
FileListArray := Engine.CreateFileListArray(tclient, fileListArrayRequest, db, Config)
conn.WriteJSON(FileListArray) //writing the JSON to the client
sendJSON <- FileListArray
case "torrentPeerListRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested PeerList Update")
peerListArrayRequest := payloadData["PeerListHash"].(string)
torrentPeerList := Engine.CreatePeerListArray(tclient, peerListArrayRequest)
conn.WriteJSON(torrentPeerList)
sendJSON <- torrentPeerList
case "fetchTorrentsByLabel":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Torrents by Label")
@@ -317,7 +330,7 @@ func main() {
}
}
}
conn.WriteJSON(labelRunningArray)
sendJSON <- labelRunningArray
case "changeStorageValue":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Storage Location Update")
@@ -343,7 +356,7 @@ func main() {
case "settingsFileRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Settings File")
clientSettingsFile := Engine.SettingsFile{MessageType: "settingsFile", Config: Config}
conn.WriteJSON(clientSettingsFile)
sendJSON <- clientSettingsFile
case "rssFeedRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested RSS Update")
@@ -355,7 +368,7 @@ func main() {
RSSsingleFeed.RSSFeedURL = singleFeed.URL
RSSJSONFeed.RSSFeeds = append(RSSJSONFeed.RSSFeeds, RSSsingleFeed)
}
conn.WriteJSON(RSSJSONFeed)
sendJSON <- RSSJSONFeed
case "addRSSFeed":
newRSSFeed := payloadData["RSSURL"].(string)
@@ -400,7 +413,7 @@ func main() {
UpdatedRSSFeed := Engine.RefreshSingleRSSFeed(db, Storage.FetchSpecificRSSFeed(db, RSSFeedURL))
TorrentRSSList := Engine.SingleRSSFeedMessage{MessageType: "rssTorrentList", URL: RSSFeedURL, Name: UpdatedRSSFeed.Name, TotalTorrents: len(UpdatedRSSFeed.Torrents), Torrents: UpdatedRSSFeed.Torrents}
Logger.WithFields(logrus.Fields{"TorrentRSSList": TorrentRSSList}).Info("Returning Torrent list from RSSFeed to client")
conn.WriteJSON(TorrentRSSList)
sendJSON <- TorrentRSSList
case "magnetLinkSubmit": //if we detect a magnet link we will be adding a magnet torrent
storageValue, ok := payloadData["StorageValue"].(string)
@@ -510,15 +523,6 @@ func main() {
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for stopping torrents")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.StopTorrent(singleTorrent, &oldTorrentInfo, db)
if len(torrentQueues.QueuedTorrents) > 1 {
addTorrent := torrentQueues.QueuedTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == addTorrent[0] {
Engine.AddTorrentToActive(&torrentLocalStorage, singleTorrent, db)
}
}
}
}
}
}
@@ -534,18 +538,6 @@ func main() {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
torrentQueues = Storage.FetchQueues(db)
for index, activeTorrentHash := range torrentQueues.ActiveTorrents { //If torrent is in the active slice, pull it
if singleTorrent.InfoHash().String() == activeTorrentHash {
singleTorrent.SetMaxEstablishedConns(0)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:index], torrentQueues.ActiveTorrents[index+1:]...)
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //If torrent is in the queued slice, pull it
if singleTorrent.InfoHash().String() == queuedTorrentHash {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for deleting torrents")
if withData {
oldTorrentInfo.TorrentStatus = "DroppedData" //Will be cleaned up the next engine loop since deleting a torrent mid loop can cause issues
@@ -567,16 +559,19 @@ func main() {
if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Info("Found matching torrent to start")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.AddTorrentToActive(&oldTorrentInfo, singleTorrent, db)
Logger.WithFields(logrus.Fields{"Torrent": oldTorrentInfo.TorrentName}).Info("Changing database to torrent running with 80 max connections")
oldTorrentInfo.TorrentStatus = "ForceStart"
oldTorrentInfo.MaxConnections = 80
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
Engine.AddTorrentToForceStart(&oldTorrentInfo, singleTorrent, db)
}
torrentQueues = Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents { //Since we are starting a new torrent stop the first torrent in the que if running is full
if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents { //Since we are starting a new torrent stop the last torrent in the que if running is full
//removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
removeTorrent := torrentQueues.ActiveTorrents[:1]
removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
if singleTorrent.InfoHash().String() == removeTorrent {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
@@ -647,14 +642,13 @@ func main() {
}
default:
//conn.Close()
Logger.WithFields(logrus.Fields{"message": msg}).Info("Unrecognized Message from client... ignoring")
return
}
}
})
if Config.UseProxy {
if Config.UseReverseProxy {
err := http.ListenAndServe(httpAddr, handlers.ProxyHeaders(router))
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server!")

View File

@@ -42,7 +42,7 @@ func GenerateClientConfigFile(config FullClientSettings, authString string) {
`
}
if config.UseProxy {
if config.UseReverseProxy {
clientFile = `
ClientAuthString = "` + authString + `"
` + webUIAuth + `

View File

@@ -4,6 +4,7 @@ import (
"crypto/sha256"
"fmt"
"path/filepath"
"strconv"
"strings"
"golang.org/x/time/rate"
@@ -20,9 +21,11 @@ var Logger *logrus.Logger
type ClientConnectSettings struct {
HTTPAddr string
HTTPAddrIP string
UseProxy bool
UseReverseProxy bool
UseSocksProxy bool
WebsocketClientPort string
BaseURL string
SocksProxyURL string
ClientUsername string
ClientPassword string
PushBulletToken string `json:"-"`
@@ -34,7 +37,7 @@ type FullClientSettings struct {
LoggingLevel logrus.Level
LoggingOutput string
Version int
TorrentConfig torrent.Config `json:"-"`
TorrentConfig torrent.ClientConfig `json:"-"`
TFileUploadFolder string
SeedRatioStop float64
DefaultMoveFolder string
@@ -55,9 +58,9 @@ func defaultConfig() FullClientSettings {
Config.HTTPAddr = ":8000"
Config.SeedRatioStop = 1.50
Config.TorrentConfig.DHTConfig = dht.ServerConfig{
StartingNodes: dht.GlobalBootstrapAddrs,
}
//Config.TorrentConfig.DhtStartingNodes = dht.StartingNodesGetter{
// StartingNodes: dht.GlobalBootstrapAddrs,
//}
return Config
}
@@ -118,6 +121,7 @@ func FullClientSettingsNew() FullClientSettings {
var httpAddr string
var baseURL string
var socksProxyURLBase string
var websocketClientPort string
var logLevel logrus.Level
//logging
@@ -150,6 +154,10 @@ func FullClientSettingsNew() FullClientSettings {
baseURL = viper.GetString("reverseProxy.BaseURL")
fmt.Println("WebsocketClientPort", viper.GetString("serverConfig.ServerPort"))
}
socksProxySet := viper.GetBool("socksProxy.ProxyEnabled")
if socksProxySet {
socksProxyURLBase = viper.GetString("reverseProxy.BaseURL")
}
//Client Authentication
clientAuthEnabled := viper.GetBool("goTorrentWebUI.WebUIAuth")
var webUIUser string
@@ -201,13 +209,17 @@ func FullClientSettingsNew() FullClientSettings {
disableIPv6 := viper.GetBool("torrentClientConfig.DisableIPv6")
debug := viper.GetBool("torrentClientConfig.Debug")
dhtServerConfig := dht.ServerConfig{
StartingNodes: dht.GlobalBootstrapAddrs,
}
if viper.IsSet("DHTConfig") {
fmt.Println("Reading in custom DHT config")
dhtServerConfig = dhtServerSettings(dhtServerConfig)
//dhtServerConfig := dht.StartingNodesGetter()
//if viper.IsSet("DHTConfig") {
// fmt.Println("Reading in custom DHT config")
// dhtServerConfig = dhtServerSettings(dhtServerConfig)
//}
httpAddrPortInt64, err := strconv.ParseInt(httpAddrPort, 10, 0)
if err != nil {
fmt.Println("Failed creating 64-bit integer for goTorrent Port!", err)
}
httpAddrPortInt := int(httpAddrPortInt64) //converting to integer
encryptionPolicy := torrent.EncryptionPolicy{
DisableEncryption: viper.GetBool("EncryptionPolicy.DisableEncryption"),
@@ -215,22 +227,24 @@ func FullClientSettingsNew() FullClientSettings {
PreferNoEncryption: viper.GetBool("EncryptionPolicy.PreferNoEncryption"),
}
tConfig := torrent.Config{
DataDir: dataDirAbs,
ListenAddr: listenAddr,
DisablePEX: disablePex,
NoDHT: noDHT,
DHTConfig: dhtServerConfig,
NoUpload: noUpload,
Seed: seed,
UploadRateLimiter: uploadRateLimiter,
DownloadRateLimiter: downloadRateLimiter,
PeerID: peerID,
DisableUTP: disableUTP,
DisableTCP: disableTCP,
DisableIPv6: disableIPv6,
Debug: debug,
EncryptionPolicy: encryptionPolicy,
tConfig := torrent.NewDefaultClientConfig()
tConfig.DataDir = dataDirAbs
tConfig.ListenPort = httpAddrPortInt
tConfig.DisablePEX = disablePex
tConfig.NoDHT = noDHT
tConfig.NoUpload = noUpload
tConfig.Seed = seed
tConfig.UploadRateLimiter = uploadRateLimiter
tConfig.DownloadRateLimiter = downloadRateLimiter
tConfig.PeerID = peerID
tConfig.DisableUTP = disableUTP
tConfig.DisableTCP = disableTCP
tConfig.DisableIPv6 = disableIPv6
tConfig.Debug = debug
tConfig.EncryptionPolicy = encryptionPolicy
if listenAddr != "" {
tConfig.SetListenAddr(listenAddr) //Setting the IP address to listen on
}
Config := FullClientSettings{
@@ -240,15 +254,17 @@ func FullClientSettingsNew() FullClientSettings {
ClientConnectSettings: ClientConnectSettings{
HTTPAddr: httpAddr,
HTTPAddrIP: httpAddrIP,
UseProxy: proxySet,
UseReverseProxy: proxySet,
UseSocksProxy: socksProxySet,
WebsocketClientPort: websocketClientPort,
ClientUsername: webUIUser,
ClientPassword: webUIPasswordHash,
BaseURL: baseURL,
SocksProxyURL: socksProxyURLBase,
PushBulletToken: pushBulletToken,
},
TFileUploadFolder: "uploadedTorrents",
TorrentConfig: tConfig,
TorrentConfig: *tConfig,
DefaultMoveFolder: defaultMoveFolderAbs,
TorrentWatchFolder: torrentWatchFolderAbs,
MaxActiveTorrents: maxActiveTorrents,

View File

@@ -1,6 +1,7 @@
package storage
import (
"fmt"
"os"
"path/filepath"
@@ -21,6 +22,7 @@ type TorrentQueues struct {
ID int `storm:"id,unique"` //storm requires unique ID (will be 5)
ActiveTorrents []string
QueuedTorrents []string
ForcedTorrents []string
}
//IssuedTokensList contains a slice of all the tokens issues to applications
@@ -76,20 +78,20 @@ type TorrentLocal struct {
DateAdded string
StoragePath string //The absolute value of the path where the torrent will be moved when completed
TempStoragePath string //The absolute path of where the torrent is temporarily stored as it is downloaded
TorrentMoved bool
TorrentMoved bool //If completed has the torrent been moved to the end location
TorrentName string
TorrentStatus string
TorrentUploadLimit bool //if true this torrent will bypass the upload storage limit (effectively unlimited)
MaxConnections int
TorrentStatus string //"Stopped", "Running", "ForceStart"
TorrentUploadLimit bool //if true this torrent will bypass the upload storage limit (effectively unlimited)
MaxConnections int //Max connections that the torrent can have to it at one time
TorrentType string //magnet or .torrent file
TorrentFileName string //Should be just the name of the torrent
TorrentFile []byte
Label string
UploadedBytes int64
DownloadedBytes int64
TorrentSize int64 //If we cancel a file change the download size since we won't be downloading that file
TorrentFile []byte //If torrent was from .torrent file, store the entire file for re-adding on restart
Label string //User enterable label to sort torrents by
UploadedBytes int64 //Total amount the client has uploaded on this torrent
DownloadedBytes int64 //Total amount the client has downloaded on this torrent
TorrentSize int64 //If we cancel a file change the download size since we won't be downloading that file
UploadRatio string
TorrentFilePriority []TorrentFilePriority
TorrentFilePriority []TorrentFilePriority //Slice of all the files the torrent contains and the priority of each file
}
//SaveConfig saves the config to the database to compare for changes to settings.toml on restart
@@ -146,11 +148,11 @@ func FetchAllStoredTorrents(torrentStorage *storm.DB) (torrentLocalArray []*Torr
//AddTorrentLocalStorage is called when adding a new torrent via any method, requires the boltdb pointer and the torrentlocal struct
func AddTorrentLocalStorage(torrentStorage *storm.DB, local TorrentLocal) {
Logger.WithFields(logrus.Fields{"Storage Path": local.StoragePath, "Torrent": local.TorrentName, "File(if file)": local.TorrentFileName}).Info("Adding new Torrent to database")
fmt.Println("ENTIRE TORRENT", local)
err := torrentStorage.Save(&local)
if err != nil {
Logger.WithFields(logrus.Fields{"database": torrentStorage, "error": err}).Error("Error adding new Torrent to database!")
}
}
//DelTorrentLocalStorage is called to delete a torrent when we fail (for whatever reason to load the information for it). Deleted by HASH matching.