|
| 1 | +package splunk |
| 2 | + |
| 3 | +import ( |
| 4 | + "flag" |
| 5 | + "fmt" |
| 6 | + "io" |
| 7 | + "net/http" |
| 8 | + "strings" |
| 9 | + "time" |
| 10 | + |
| 11 | + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" |
| 12 | + "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" |
| 13 | + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" |
| 14 | + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil" |
| 15 | + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" |
| 16 | + "github.com/VictoriaMetrics/metrics" |
| 17 | + |
| 18 | + "github.com/VictoriaMetrics/VictoriaLogs/app/vlinsert/insertutil" |
| 19 | + "github.com/VictoriaMetrics/VictoriaLogs/lib/logstorage" |
| 20 | +) |
| 21 | + |
| 22 | +var ( |
| 23 | + splunkStreamFields = flagutil.NewArrayString("splunk.streamFields", "Comma-separated list of fields to use as log stream fields for logs ingested over splunk protocol. "+ |
| 24 | + "See https://docs.victoriametrics.com/victorialogs/data-ingestion/splunk/#stream-fields") |
| 25 | + splunkIgnoreFields = flagutil.NewArrayString("splunk.ignoreFields", "Comma-separated list of fields to ignore for logs ingested over splunk protocol. "+ |
| 26 | + "See https://docs.victoriametrics.com/victorialogs/data-ingestion/splunk/#dropping-fields") |
| 27 | + splunkTimeField = flag.String("splunk.timeField", "time", "Field to use as a log timestamp for logs ingested via splunk protocol. "+ |
| 28 | + "See https://docs.victoriametrics.com/victorialogs/data-ingestion/splunk/#time-field") |
| 29 | + splunkMsgField = flagutil.NewArrayString("splunk.msgField", "Field to use as a log message for logs ingested via splunk protocol. "+ |
| 30 | + "See https://docs.victoriametrics.com/victorialogs/data-ingestion/splunk/#message-field") |
| 31 | + splunkTenantID = flag.String("splunk.tenantID", "0:0", "TenantID for logs ingested via the Journald endpoint. "+ |
| 32 | + "See https://docs.victoriametrics.com/victorialogs/data-ingestion/splunk/#multitenancy") |
| 33 | +) |
| 34 | + |
| 35 | +func getCommonParams(r *http.Request) (*insertutil.CommonParams, error) { |
| 36 | + cp, err := insertutil.GetCommonParams(r) |
| 37 | + if err != nil { |
| 38 | + return nil, err |
| 39 | + } |
| 40 | + if cp.TenantID.AccountID == 0 && cp.TenantID.ProjectID == 0 { |
| 41 | + tenantID, err := logstorage.ParseTenantID(*splunkTenantID) |
| 42 | + if err != nil { |
| 43 | + return nil, fmt.Errorf("cannot parse -splunk.tenantID=%q for splunk: %w", *splunkTenantID, err) |
| 44 | + } |
| 45 | + cp.TenantID = tenantID |
| 46 | + } |
| 47 | + |
| 48 | + if !cp.IsTimeFieldSet { |
| 49 | + cp.TimeFields = []string{*splunkTimeField} |
| 50 | + } |
| 51 | + if len(cp.StreamFields) == 0 { |
| 52 | + cp.StreamFields = getStreamFields() |
| 53 | + } |
| 54 | + if len(cp.IgnoreFields) == 0 { |
| 55 | + cp.IgnoreFields = *splunkIgnoreFields |
| 56 | + } |
| 57 | + if len(cp.MsgFields) == 0 { |
| 58 | + cp.MsgFields = getMsgFields() |
| 59 | + } |
| 60 | + return cp, nil |
| 61 | +} |
| 62 | + |
| 63 | +func getMsgFields() []string { |
| 64 | + if len(*splunkMsgField) > 0 { |
| 65 | + return *splunkMsgField |
| 66 | + } |
| 67 | + return []string{"event"} |
| 68 | +} |
| 69 | + |
| 70 | +func getStreamFields() []string { |
| 71 | + if len(*splunkStreamFields) > 0 { |
| 72 | + return *splunkStreamFields |
| 73 | + } |
| 74 | + return defaultStreamFields |
| 75 | +} |
| 76 | + |
| 77 | +var defaultStreamFields = []string{ |
| 78 | + "sourcetype", |
| 79 | + "host", |
| 80 | + "source", |
| 81 | +} |
| 82 | + |
| 83 | +// RequestHandler processes splunk insert requests |
| 84 | +func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool { |
| 85 | + if !strings.HasPrefix(path, "/services/collector/event") { |
| 86 | + return false |
| 87 | + } |
| 88 | + switch r.Method { |
| 89 | + case http.MethodOptions: |
| 90 | + w.WriteHeader(http.StatusOK) |
| 91 | + return true |
| 92 | + case http.MethodPost: |
| 93 | + w.Header().Add("Content-Type", "application/json") |
| 94 | + default: |
| 95 | + w.WriteHeader(http.StatusMethodNotAllowed) |
| 96 | + return true |
| 97 | + } |
| 98 | + |
| 99 | + startTime := time.Now() |
| 100 | + requestsTotal.Inc() |
| 101 | + |
| 102 | + cp, err := getCommonParams(r) |
| 103 | + if err != nil { |
| 104 | + httpserver.Errorf(w, r, "%s", err) |
| 105 | + return true |
| 106 | + } |
| 107 | + if err := insertutil.CanWriteData(); err != nil { |
| 108 | + httpserver.Errorf(w, r, "%s", err) |
| 109 | + return true |
| 110 | + } |
| 111 | + |
| 112 | + encoding := r.Header.Get("Content-Encoding") |
| 113 | + reader, err := protoparserutil.GetUncompressedReader(r.Body, encoding) |
| 114 | + if err != nil { |
| 115 | + logger.Errorf("cannot decode splunk request: %s", err) |
| 116 | + return true |
| 117 | + } |
| 118 | + defer protoparserutil.PutUncompressedReader(reader) |
| 119 | + |
| 120 | + lmp := cp.NewLogMessageProcessor("splunk", true) |
| 121 | + streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI) |
| 122 | + err = processStreamInternal(streamName, reader, cp.TimeFields, cp.MsgFields, lmp) |
| 123 | + lmp.MustClose() |
| 124 | + if err != nil { |
| 125 | + httpserver.Errorf(w, r, "cannot process splunk request; error: %s", err) |
| 126 | + return true |
| 127 | + } |
| 128 | + |
| 129 | + requestDuration.UpdateDuration(startTime) |
| 130 | + return true |
| 131 | +} |
| 132 | + |
| 133 | +func processStreamInternal(streamName string, r io.Reader, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) error { |
| 134 | + wcr := writeconcurrencylimiter.GetReader(r) |
| 135 | + defer writeconcurrencylimiter.PutReader(wcr) |
| 136 | + |
| 137 | + lr := insertutil.NewLineReader(streamName, wcr) |
| 138 | + |
| 139 | + n := 0 |
| 140 | + errors := 0 |
| 141 | + var lastError error |
| 142 | + for { |
| 143 | + ok, err := readLine(lr, timeFields, msgFields, lmp) |
| 144 | + wcr.DecConcurrency() |
| 145 | + if err != nil { |
| 146 | + lastError = err |
| 147 | + errors++ |
| 148 | + logger.Warnf("splunk: cannot read line #%d in /splunk request: %s", n, err) |
| 149 | + } |
| 150 | + if !ok { |
| 151 | + break |
| 152 | + } |
| 153 | + n++ |
| 154 | + } |
| 155 | + if errors > 0 { |
| 156 | + errorsTotal.Add(errors) |
| 157 | + if n == errors { |
| 158 | + // Return an error if no logs were processed and there were errors |
| 159 | + return lastError |
| 160 | + } |
| 161 | + } |
| 162 | + |
| 163 | + return nil |
| 164 | +} |
| 165 | + |
| 166 | +func readLine(lr *insertutil.LineReader, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) (bool, error) { |
| 167 | + var line []byte |
| 168 | + for len(line) == 0 { |
| 169 | + if !lr.NextLine() { |
| 170 | + err := lr.Err() |
| 171 | + return false, err |
| 172 | + } |
| 173 | + line = lr.Line |
| 174 | + } |
| 175 | + |
| 176 | + p := logstorage.GetJSONParser() |
| 177 | + defer logstorage.PutJSONParser(p) |
| 178 | + |
| 179 | + p.Init(line) |
| 180 | + for p.NextMessage() { |
| 181 | + if err := p.Error(); err != nil { |
| 182 | + return true, err |
| 183 | + } |
| 184 | + ts, err := insertutil.ExtractTimestampFromFields(timeFields, p.Fields) |
| 185 | + if err != nil { |
| 186 | + return true, err |
| 187 | + } |
| 188 | + logstorage.RenameField(p.Fields, msgFields, "_msg") |
| 189 | + lmp.AddRow(ts, p.Fields, -1) |
| 190 | + } |
| 191 | + if err := p.Error(); err != nil { |
| 192 | + return true, err |
| 193 | + } |
| 194 | + return true, nil |
| 195 | +} |
| 196 | + |
| 197 | +var ( |
| 198 | + requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/splunk"}`) |
| 199 | + errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/splunk"}`) |
| 200 | + |
| 201 | + requestDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/splunk"}`) |
| 202 | +) |
0 commit comments