-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.go
More file actions
241 lines (206 loc) · 5.11 KB
/
main.go
File metadata and controls
241 lines (206 loc) · 5.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
// splitcsv
package main
import (
"encoding/csv"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"strings"
)
func main() {
in := flag.String("in", "", "Input CSV file (required)")
parts := flag.Int("parts", 2, "Number of parts to split the file into")
comma := flag.String("comma", ",", "Column separator (\",\", \";\" etc.)")
header := flag.Bool("header", true, "Include header in all output files")
flag.Parse()
if *in == "" {
log.Fatal("specify input file path with -in")
}
if *parts < 1 {
log.Fatal("-parts must be at least 1")
}
if len(*comma) != 1 {
log.Fatal("-comma must be a single character")
}
sep := rune((*comma)[0])
f, err := os.Open(*in)
if err != nil {
log.Fatalf("failed to open file: %v", err)
}
defer f.Close()
r := csv.NewReader(f)
r.Comma = sep
headerRow, err := r.Read()
if err != nil {
log.Fatalf("failed to read header: %v", err)
}
// Count total data rows
total := 0
for {
_, err := r.Read()
if err != nil {
if isEOF(err) {
break
}
log.Fatalf("error reading data: %v", err)
}
total++
}
if total == 0 {
log.Fatal("no data rows found")
}
// Calculate rows per part
rowsPerPart := total / *parts
extraRows := total % *parts
// Go back to start for second pass
if _, err := f.Seek(0, 0); err != nil {
log.Fatalf("seek error: %v", err)
}
// Skip header line by seeking to start and re-reading
// (this is already done after the counting phase)
// Generate output filenames and create writers
inputBase := strings.TrimSuffix(filepath.Base(*in), filepath.Ext(*in))
inputExt := filepath.Ext(*in)
inputDir := filepath.Dir(*in)
writers := make([]*csv.Writer, *parts)
files := make([]*os.File, *parts)
filenames := make([]string, *parts)
// Create cleanup function
defer func() {
for _, file := range files {
if file != nil {
file.Close()
}
}
}()
for i := 0; i < *parts; i++ {
filename := fmt.Sprintf("%s_part%d%s", inputBase, i+1, inputExt)
fullPath := filepath.Join(inputDir, filename)
filenames[i] = fullPath
file, err := os.Create(fullPath)
if err != nil {
log.Fatalf("failed to create %s: %v", fullPath, err)
}
files[i] = file
writer := csv.NewWriter(file)
writer.Comma = sep
writers[i] = writer
// Write header if enabled
if *header {
if err := writer.Write(headerRow); err != nil {
log.Fatalf("failed to write header to %s: %v", fullPath, err)
}
}
}
// Distribute data rows
currentPart := 0
rowsInCurrentPart := 0
maxRowsForCurrentPart := rowsPerPart
if extraRows > 0 {
maxRowsForCurrentPart++
extraRows--
}
// Create new CSV reader for data processing
r2 := csv.NewReader(f)
r2.Comma = sep
// Skip header again
_, err = r2.Read()
if err != nil {
log.Fatalf("failed to re-read header: %v", err)
}
for {
rec, err := r2.Read()
if err != nil {
if isEOF(err) {
break
}
log.Fatalf("error reading CSV: %v", err)
}
// Clean newlines in all fields (simple approach for now)
cleanedRec := cleanNewlines(rec)
if err := writers[currentPart].Write(cleanedRec); err != nil {
log.Fatalf("failed to write to part %d: %v", currentPart+1, err)
}
rowsInCurrentPart++
if rowsInCurrentPart >= maxRowsForCurrentPart && currentPart < *parts-1 {
currentPart++
rowsInCurrentPart = 0
maxRowsForCurrentPart = rowsPerPart
if extraRows > 0 {
maxRowsForCurrentPart++
extraRows--
}
}
}
// Flush all writers
for i, writer := range writers {
writer.Flush()
if err := writer.Error(); err != nil {
log.Fatalf("flush error for part %d: %v", i+1, err)
}
}
fmt.Printf("Successfully split %s into %d parts:\n", *in, *parts)
for i, filename := range filenames {
fmt.Printf(" Part %d: %s\n", i+1, filename)
}
}
func isEOF(err error) bool {
return err.Error() == "EOF"
}
func detectQuotedFields(rawLine string, sep rune) []bool {
// Simple approach: check if each field starts with a quote
result := make([]bool, 0)
fieldStart := 0
inQuotes := false
runes := []rune(rawLine)
for i := 0; i <= len(runes); i++ {
var char rune
if i < len(runes) {
char = runes[i]
}
if i < len(runes) && char == '"' {
if !inQuotes {
inQuotes = true
} else {
// Check for escaped quote
if i+1 < len(runes) && runes[i+1] == '"' {
i++ // Skip the escaped quote
} else {
inQuotes = false
}
}
} else if i == len(runes) || (char == sep && !inQuotes) {
// End of field (or end of line)
if fieldStart < len(runes) && runes[fieldStart] == '"' {
result = append(result, true)
} else {
result = append(result, false)
}
fieldStart = i + 1
}
}
return result
}
func cleanNewlines(record []string) []string {
cleaned := make([]string, len(record))
re := regexp.MustCompile(`\r?\n`)
for i, field := range record {
cleaned[i] = re.ReplaceAllString(field, " ")
}
return cleaned
}
func cleanNewlinesInQuoted(record []string, quotedFields []bool) []string {
cleaned := make([]string, len(record))
re := regexp.MustCompile(`\r?\n`)
for i, field := range record {
if i < len(quotedFields) && quotedFields[i] {
cleaned[i] = re.ReplaceAllString(field, " ")
} else {
cleaned[i] = field
}
}
return cleaned
}