Skip to content

Commit d3b5364

Browse files
committed
Upgrade to go 1.25
1 parent f0715fd commit d3b5364

9 files changed

Lines changed: 95 additions & 115 deletions

File tree

.github/workflows/build.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ jobs:
1313
timeout-minutes: 10
1414
strategy:
1515
matrix:
16-
go-version: [1.23.x, 1.24.x]
16+
go-version: [1.25.x, 1.26.x]
1717
platform: [ubuntu-latest, macos-latest, windows-latest]
1818
runs-on: ${{ matrix.platform }}
1919

@@ -24,10 +24,10 @@ jobs:
2424
go-version: ${{ matrix.go-version }}
2525

2626
- name: Checkout code
27-
uses: actions/checkout@v5
27+
uses: actions/checkout@v6
2828

2929
- name: Load cached dependencies
30-
uses: actions/cache@v4
30+
uses: actions/cache@v5
3131
with:
3232
path: ~/go/pkg/mod
3333
key: ${{ runner.os }}-go-${{ matrix.go-version }}-${{ hashFiles('**/go.sum') }}
@@ -44,9 +44,9 @@ jobs:
4444
run: make clean cover
4545

4646
- name: Upload coverage to coveralls.io
47-
if: matrix.platform == 'ubuntu-latest' && matrix.go-version == '1.24.x'
47+
if: matrix.platform == 'ubuntu-latest' && matrix.go-version == '1.26.x'
4848
uses: coverallsapp/github-action@v2
4949
with:
50-
file: cover.out
50+
file: bin/cover.out
5151
flag-name: ${{ runner.os }}-go-${{ matrix.go-version }}
5252
fail-on-error: false

.gitignore

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,3 @@ bin/*
77
*.exe
88
*.exe~
99
*.so
10-
11-
cover.out
12-
cover.html
13-
*.prof

Makefile

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
LINTER_VERSION=v2.1.6
1+
LINTER_VERSION=v2.9.0
22
LINTER=./bin/golangci-lint
33
ifeq ($(OS),Windows_NT)
44
LINTER=./bin/golangci-lint.exe
@@ -19,6 +19,7 @@ lint: ## Run linter and detect go mod tidy changes.
1919

2020
.PHONY: setup
2121
setup: ## Download dependencies.
22+
@mkdir -p bin
2223
go mod download
2324
@if [ ! -f "$(LINTER)" ]; then \
2425
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s $(LINTER_VERSION); \
@@ -34,8 +35,8 @@ bench: ## Run benchmarks.
3435

3536
.PHONY: cover
3637
cover: ## Run tests with coverage. Generates "cover.out" profile and its html representation.
37-
go test -race -timeout=10m -coverprofile=cover.out -coverpkg=./... $(pkgs)
38-
go tool cover -html=cover.out -o cover.html
38+
go test -race -timeout=10m -coverprofile=./bin/cover.out -coverpkg=./... $(pkgs)
39+
go tool cover -html=./bin/cover.out -o ./bin/cover.html
3940

4041
.PHONY: tidy
4142
tidy: ## Simply runs 'go mod tidy'.
@@ -44,12 +45,8 @@ tidy: ## Simply runs 'go mod tidy'.
4445
.PHONY: clean
4546
clean: ## Clean up go tests cache and coverage generated files.
4647
go clean -testcache
47-
@for file in cover.html cover.out; do \
48-
if [ -f $$file ]; then \
49-
echo "rm -f $$file"; \
50-
rm -f $$file; \
51-
fi \
52-
done
48+
@rm -f ./bin/cover*
49+
@rm -f ./bin/*.prof
5350

5451
.PHONY: help
5552
# Absolutely awesome: https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html

cmd/pprof/main.go

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ func main() {
3838
defer tearDownTmpCsvFile(fName)
3939

4040
// enable cpu profiling
41-
fCPU, err := os.Create("./cpu_" + *generateProfileFor + ".prof")
41+
fCPU, err := os.Create("./bin/cpu_" + *generateProfileFor + ".prof")
4242
if err != nil {
4343
log.Println("could not create CPU profile: ", err)
4444

@@ -62,7 +62,7 @@ func main() {
6262
}
6363

6464
// enable memory profiling
65-
fMem, err := os.Create("./mem_" + *generateProfileFor + ".prof")
65+
fMem, err := os.Create("./bin/mem_" + *generateProfileFor + ".prof")
6666
if err != nil {
6767
log.Println("could not create memory profile: ", err)
6868

@@ -206,25 +206,22 @@ func consumeBigCsvReaderResults(rowsChans []bigcsvreader.RowsChan, errsChan bigc
206206
)
207207

208208
for i := range rowsChans {
209-
wg.Add(1)
210-
go func(rowsChan bigcsvreader.RowsChan, waitGr *sync.WaitGroup) {
209+
wg.Go(func() {
210+
rowsChan := rowsChans[i]
211211
var localCount int64
212212
for record := range rowsChan {
213213
localCount++
214214
_ = record
215215
}
216216
atomic.AddInt64(&count, localCount)
217-
waitGr.Done()
218-
}(rowsChans[i], &wg)
217+
})
219218
}
220219

221-
wg.Add(1)
222-
go func(errsCh bigcsvreader.ErrsChan, waitGr *sync.WaitGroup) {
223-
for err := range errsCh {
220+
wg.Go(func() {
221+
for err := range errsChan {
224222
log.Println("Read error: ", err)
225223
}
226-
waitGr.Done()
227-
}(errsChan, &wg)
224+
})
228225

229226
wg.Wait()
230227

example_test.go

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -47,15 +47,14 @@ func ExampleCsvReader() {
4747
rowsChans, errsChan := bigCSV.Read(ctx)
4848

4949
// process rows and errors:
50-
5150
for i := range rowsChans {
52-
wg.Add(1)
53-
go rowWorker(rowsChans[i], &wg)
51+
wg.Go(func() {
52+
rowWorker(rowsChans[i])
53+
})
5454
}
55-
56-
wg.Add(1)
57-
go errWorker(errsChan, &wg)
58-
55+
wg.Go(func() {
56+
errWorker(errsChan)
57+
})
5958
wg.Wait()
6059

6160
// Unordered output:
@@ -66,18 +65,16 @@ func ExampleCsvReader() {
6665
// {ID:5 Name:Logitech Mouse G203 Desc:Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc eleifend felis quis magna auctor, ut lacinia eros efficitur. Maecenas mattis dolor a pharetra gravida. Aenean at eros sed metus posuere feugiat in vitae libero. Morbi a diam volutpat, tempor lacus sed, sagittis velit. Donec eget dignissim mauris, sed aliquam ex. Duis eros dolor, vestibulum ac aliquam eget, viverra in enim. Aenean ut turpis quis purus porta lobortis. Etiam sollicitudin lectus vitae velit tincidunt, ut volutpat justo aliquam. Aenean vitae vehicula arcu. Interdum et malesuada fames ac ante ipsum primis in faucibus. Nunc viverra enim nec risus mollis elementum nec dictum ex. Nunc lorem eros, vulputate a rutrum nec, scelerisque non augue. Sed in egestas eros. Quisque felis lorem, vehicula ac venenatis vel, tristique id sapien. Morbi vitae odio eget orci facilisis suscipit. Cras sodales, augue vitae tincidunt tempus, diam turpis volutpat est, vitae fringilla augue leo semper augue. Integer scelerisque tempor mauris, ac posuere sem aenean Price:30.5 Qty:35}
6766
}
6867

69-
func rowWorker(rowsChan bigcsvreader.RowsChan, waitGr *sync.WaitGroup) {
68+
func rowWorker(rowsChan bigcsvreader.RowsChan) {
7069
for row := range rowsChan {
7170
processRow(row)
7271
}
73-
waitGr.Done()
7472
}
7573

76-
func errWorker(errsChan bigcsvreader.ErrsChan, waitGr *sync.WaitGroup) {
74+
func errWorker(errsChan bigcsvreader.ErrsChan) {
7775
for err := range errsChan {
7876
handleError(err)
7977
}
80-
waitGr.Done()
8178
}
8279

8380
// processRow can be used to implement business logic

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
module github.com/actforgood/bigcsvreader
22

3-
go 1.23
3+
go 1.25

reader.go

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -153,18 +153,18 @@ func (cr *CsvReader) readAsync(
153153

154154
// create a wait group pool as we need to wait for all goroutines to terminate.
155155
var wg sync.WaitGroup
156-
wg.Add(totalThreads)
157156
worker := cr.readBetweenOffsetsAsync
158157
for thread := range totalThreads {
159-
go worker(
160-
ctx,
161-
thread+1,
162-
threadsInfo[thread][0], // start offset
163-
threadsInfo[thread][1], // end offset
164-
&wg,
165-
rowsChans[thread],
166-
errsChan,
167-
)
158+
wg.Go(func() {
159+
worker(
160+
ctx,
161+
thread+1,
162+
threadsInfo[thread][0], // start offset
163+
threadsInfo[thread][1], // end offset
164+
rowsChans[thread],
165+
errsChan,
166+
)
167+
})
168168
}
169169
wg.Wait()
170170

@@ -175,12 +175,9 @@ func (cr *CsvReader) readAsync(
175175
func (cr *CsvReader) readBetweenOffsetsAsync(
176176
ctx context.Context,
177177
currentThreadNo, offsetStart, offsetEnd int,
178-
wg *sync.WaitGroup,
179178
rowsChan chan<- []string,
180179
errsChan chan<- error,
181180
) {
182-
defer wg.Done()
183-
184181
f := cr.openFile(currentThreadNo, errsChan)
185182
if f == nil {
186183
return

reader_test.go

Lines changed: 55 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import (
1818
"sync"
1919
"sync/atomic"
2020
"testing"
21+
"testing/synctest"
2122
"time"
2223

2324
"github.com/actforgood/bigcsvreader"
@@ -129,54 +130,53 @@ func testCsvReaderWithDifferentFileSizesAndMaxGoroutines(rowsCount int64) func(t
129130
return func(t *testing.T) {
130131
t.Parallel()
131132

132-
// arrange
133-
fName, err := setUpTmpCsvFile(t.TempDir(), rowsCount)
134-
if err != nil {
135-
t.Fatalf("prerequisite failed: could not generate CSV file: %v", err)
136-
}
137-
subject := bigcsvreader.New()
138-
subject.SetFilePath(fName)
139-
subject.ColumnsCount = 5
140-
ctx, cancelCtx := context.WithCancel(context.Background())
141-
defer cancelCtx()
142-
var sumIDs int64
143-
var wg sync.WaitGroup
144-
145-
for maxGoroutines := 1; maxGoroutines <= 16; maxGoroutines++ {
146-
subject.MaxGoroutinesNo = maxGoroutines
147-
sumIDs = 0
148-
expectedSumIDs := rowsCount * (rowsCount + 1) / 2
149-
150-
// act
151-
rowsChans, errsChan := subject.Read(ctx)
152-
153-
// assert
154-
for i := range rowsChans {
155-
wg.Add(1)
156-
go func(rowsChan bigcsvreader.RowsChan, waitGr *sync.WaitGroup) {
157-
var localSumIDs int64
158-
for record := range rowsChan {
159-
if !assertEqual(t, 5, len(record)) {
160-
continue
161-
}
162-
id, _ := strconv.ParseInt(record[colID], 10, 64)
163-
localSumIDs += id
164-
expectedColName := colValueNamePrefix + record[colID]
165-
assertEqual(t, expectedColName, record[colName])
166-
assertEqual(t, colValueDescription, record[colDescription])
167-
assertEqual(t, colValuePrice, record[colPrice])
168-
assertEqual(t, colValueStock, record[colStock])
169-
}
170-
atomic.AddInt64(&sumIDs, localSumIDs)
171-
waitGr.Done()
172-
}(rowsChans[i], &wg)
133+
synctest.Test(t, func(*testing.T) {
134+
// arrange
135+
fName, err := setUpTmpCsvFile(t.TempDir(), rowsCount)
136+
if err != nil {
137+
t.Fatalf("prerequisite failed: could not generate CSV file: %v", err)
173138
}
174-
for err := range errsChan {
175-
assertNil(t, err)
139+
subject := bigcsvreader.New()
140+
subject.SetFilePath(fName)
141+
subject.ColumnsCount = 5
142+
ctx, cancelCtx := context.WithCancel(context.Background())
143+
defer cancelCtx()
144+
var sumIDs int64
145+
146+
for maxGoroutines := range 16 {
147+
subject.MaxGoroutinesNo = maxGoroutines + 1
148+
sumIDs = 0
149+
expectedSumIDs := rowsCount * (rowsCount + 1) / 2
150+
151+
// act
152+
rowsChans, errsChan := subject.Read(ctx)
153+
154+
// assert
155+
for i := range rowsChans {
156+
go func(rowsChan bigcsvreader.RowsChan) {
157+
var localSumIDs int64
158+
for record := range rowsChan {
159+
if !assertEqual(t, 5, len(record)) {
160+
continue
161+
}
162+
id, _ := strconv.ParseInt(record[colID], 10, 64)
163+
localSumIDs += id
164+
expectedColName := colValueNamePrefix + record[colID]
165+
assertEqual(t, expectedColName, record[colName])
166+
assertEqual(t, colValueDescription, record[colDescription])
167+
assertEqual(t, colValuePrice, record[colPrice])
168+
assertEqual(t, colValueStock, record[colStock])
169+
}
170+
atomic.AddInt64(&sumIDs, localSumIDs)
171+
}(rowsChans[i])
172+
}
173+
for err := range errsChan {
174+
assertNil(t, err)
175+
}
176+
synctest.Wait()
177+
assertEqual(t, expectedSumIDs, sumIDs)
176178
}
177-
wg.Wait()
178-
assertEqual(t, expectedSumIDs, sumIDs)
179-
}
179+
})
180180
}
181181
}
182182

@@ -297,15 +297,14 @@ func gatherRecords(rowsChans []bigcsvreader.RowsChan, errsChan bigcsvreader.Errs
297297
records = make([][]string, 0)
298298
)
299299
for i := range rowsChans {
300-
wg.Add(1)
301-
go func(rowsChan bigcsvreader.RowsChan, mutex *sync.Mutex, waitGr *sync.WaitGroup) {
300+
wg.Go(func() {
301+
rowsChan := rowsChans[i]
302302
for record := range rowsChan {
303-
mutex.Lock()
303+
mu.Lock()
304304
records = append(records, record)
305305
mu.Unlock()
306306
}
307-
waitGr.Done()
308-
}(rowsChans[i], &mu, &wg)
307+
})
309308
}
310309

311310
for err := range errsChan {
@@ -416,16 +415,15 @@ func consumeBenchResults(rowsChans []bigcsvreader.RowsChan, _ bigcsvreader.ErrsC
416415
)
417416

418417
for i := range rowsChans {
419-
wg.Add(1)
420-
go func(rowsChan bigcsvreader.RowsChan, waitGr *sync.WaitGroup) {
418+
wg.Go(func() {
419+
rowsChan := rowsChans[i]
421420
var localCount int64
422421
for record := range rowsChan {
423422
localCount++
424423
fakeProcessRow(record)
425424
}
426425
atomic.AddInt64(&count, localCount)
427-
waitGr.Done()
428-
}(rowsChans[i], &wg)
426+
})
429427
}
430428
wg.Wait()
431429

@@ -531,16 +529,14 @@ func benchmarkStdGoCsvReaderReadOneByOneProcessParalell(rowsCount int64) func(b
531529
wg sync.WaitGroup
532530
)
533531
for range numWorkers {
534-
wg.Add(1)
535-
go func() {
532+
wg.Go(func() {
536533
var localCount int64
537534
for record := range rowsChan {
538535
localCount++
539536
fakeProcessRow(record)
540537
}
541538
atomic.AddInt64(&count, localCount)
542-
wg.Done()
543-
}()
539+
})
544540
}
545541

546542
// sequential reading

0 commit comments

Comments
 (0)