1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
|
// License: GPLv3 Copyright: 2023, Kovid Goyal, <kovid at kovidgoyal.net>
package images
import (
"fmt"
"runtime"
"sync"
"sync/atomic"
"github.com/kovidgoyal/go-parallel"
)
var _ = fmt.Print
type Context struct {
num_of_threads atomic.Int32
}
func (self *Context) SetNumberOfThreads(n int) {
self.num_of_threads.Store(int32(n))
}
func (self *Context) NumberOfThreads() int {
return int(self.num_of_threads.Load())
}
func (self *Context) EffectiveNumberOfThreads() int {
ans := int(self.num_of_threads.Load())
if ans <= 0 {
ans = max(1, runtime.NumCPU())
}
return ans
}
// parallel processes the data in separate goroutines. If any of them panics,
// returns an error. Note that if multiple goroutines panic, only one error is
// returned.
func (self *Context) SafeParallel(start, stop int, fn func(<-chan int)) (err error) {
count := stop - start
if count < 1 {
return
}
procs := min(self.EffectiveNumberOfThreads(), count)
c := make(chan int, count)
for i := start; i < stop; i++ {
c <- i
}
close(c)
var wg sync.WaitGroup
for range procs {
wg.Add(1)
go func() {
defer func() {
if r := recover(); r != nil {
err = parallel.Format_stacktrace_on_panic(r, 1)
}
wg.Done()
}()
fn(c)
}()
}
wg.Wait()
return
}
|