I ran an into an issue with the new implementation where a module is
used to start queue(s) but knows nothing about the current application
(as far as queue size, workers, etc. go).
Being able to get the given size allows us to use a formula to
dynamically generate the number of workers to use when starting each
queue.
---
queue.go | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/queue.go b/queue.go
index 681129e..7f16dd1 100644
--- a/queue.go+++ b/queue.go
@@ -50,11 +50,12 @@ var (
)
type Queue struct {
- name string- queue chan *Task- retries chan *Task- shutdown chan struct{}- workers atomic.Int32+ name string+ queue chan *Task+ retries chan *Task+ shutdown chan struct{}+ workers atomic.Int32+ queueSize int tasks sync.WaitGroup
tasksLock sync.Mutex
@@ -73,10 +74,11 @@ type Queue struct {
// handles the task.
func NewQueue(name string, size int) *Queue {
return &Queue{
- name: name,- queue: make(chan *Task, size),- retries: make(chan *Task), // unbuffered on purpose- shutdown: make(chan struct{}),+ name: name,+ queue: make(chan *Task, size),+ retries: make(chan *Task), // unbuffered on purpose+ shutdown: make(chan struct{}),+ queueSize: size, }
}
@@ -85,6 +87,11 @@ func (q *Queue) Name() string {
return q.name
}
+// Size returns the size of the queue buffer size+func (q *Queue) Size() int {+ return q.queueSize+}+// Enqueues a task. Will block if the queue is full until workers pick up tasks.
// Can be safely called from multiple goroutines.
//
--
2.47.1