Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func (e *Exporter) scrapeDatabase(ch chan<- prometheus.Metric, errChan chan<- er
metricsToScrape := 0
for _, metric := range e.metricsToScrape {
metric := metric //https://golang.org/doc/faq#closures_and_goroutines
isScrapeMetric := e.isScrapeMetric(tick, metric, d)
isScrapeMetric := isScrapeMetric(e.logger, tick, metric, d)
metricsToScrape++
go func() {
// If the metric doesn't need to be scraped, send the cached values
Expand Down Expand Up @@ -419,7 +419,7 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
}
// Construct Prometheus values to sent back
for metric, metricHelp := range m.MetricsDesc {
value, ok := e.parseFloat(metric, metricHelp, row)
value, ok := parseFloat(e.logger, metric, metricHelp, row)
if !ok {
// Skip invalid metric values
continue
Expand Down Expand Up @@ -468,7 +468,7 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
return nil
}
e.logger.Debug("Calling function GeneratePrometheusMetrics()")
err := e.generatePrometheusMetrics(d, genericParser, m.Request, e.getQueryTimeout(m, d))
err := e.generatePrometheusMetrics(d, genericParser, m.Request, getQueryTimeout(e.logger, m, d))
e.logger.Debug("ScrapeGenericValues() - metricsCount: " + strconv.Itoa(metricsCount))
if err != nil {
return err
Expand Down
17 changes: 9 additions & 8 deletions collector/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package collector

import (
"log/slog"
"slices"
"strconv"
"strings"
Expand All @@ -13,7 +14,7 @@ import (
// isScrapeMetric returns true if a metric should be scraped. Metrics may not be scraped if they have a custom scrape interval,
// and the time since the last scrape is less than the custom scrape interval.
// If there is no tick time or last known tick, the metric is always scraped.
func (e *Exporter) isScrapeMetric(tick *time.Time, metric *Metric, d *Database) bool {
func isScrapeMetric(logger *slog.Logger, tick *time.Time, metric *Metric, d *Database) bool {
// If the metric isn't enabled for the database, don't scrape it.
if !metric.IsEnabledForDatabase(d) {
return false
Expand All @@ -24,7 +25,7 @@ func (e *Exporter) isScrapeMetric(tick *time.Time, metric *Metric, d *Database)
return true
}
// If the metric doesn't have a custom scrape interval, scrape it.
interval, ok := e.getScrapeInterval(metric.Context, metric.ScrapeInterval)
interval, ok := getScrapeInterval(logger, metric.Context, metric.ScrapeInterval)
if !ok {
return true
}
Expand All @@ -39,39 +40,39 @@ func (e *Exporter) isScrapeMetric(tick *time.Time, metric *Metric, d *Database)
return shouldScrape
}

func (e *Exporter) getScrapeInterval(context, scrapeInterval string) (time.Duration, bool) {
func getScrapeInterval(logger *slog.Logger, context, scrapeInterval string) (time.Duration, bool) {
if len(scrapeInterval) > 0 {
si, err := time.ParseDuration(scrapeInterval)
if err != nil {
e.logger.Error("Unable to convert scrapeinterval to duration (metric=" + context + ")")
logger.Error("Unable to convert scrapeinterval to duration (metric=" + context + ")")
return 0, false
}
return si, true
}
return 0, false
}

func (e *Exporter) getQueryTimeout(metric *Metric, d *Database) time.Duration {
func getQueryTimeout(logger *slog.Logger, metric *Metric, d *Database) time.Duration {
if len(metric.QueryTimeout) > 0 {
qt, err := time.ParseDuration(metric.QueryTimeout)
if err != nil {
e.logger.Error("Unable to convert querytimeout to duration (metric=" + metric.Context + ")")
logger.Error("Unable to convert querytimeout to duration (metric=" + metric.Context + ")")
return time.Duration(d.Config.GetQueryTimeout()) * time.Second
}
return qt
}
return time.Duration(d.Config.GetQueryTimeout()) * time.Second
}

func (e *Exporter) parseFloat(metric, metricHelp string, row map[string]string) (float64, bool) {
func parseFloat(logger *slog.Logger, metric, metricHelp string, row map[string]string) (float64, bool) {
value, ok := row[metric]
if !ok || value == "<nil>" {
// treat nil value as 0
return 0.0, ok
}
valueFloat, err := strconv.ParseFloat(strings.TrimSpace(value), 64)
if err != nil {
e.logger.Error("Unable to convert current value to float (metric=" + metric +
logger.Error("Unable to convert current value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row[metric] + ">)")
return -1, false
}
Expand Down
34 changes: 34 additions & 0 deletions site/docs/releases/builds.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,37 @@ Creates multi-arch container builds for linux/amd64 and linux/arm64:
```
make podman-build
```

### Build on Oracle Linux

To build on Oracle Linux, follow these steps.

#### 1. Install build tools

```bash
dnf install -y git golang make
```

#### 2. Clone the exporter git repository

```bash
git clone [email protected]:oracle/oracle-db-appdev-monitoring.git
```

#### 3. Build the binary

```bash
cd oracle-db-appdev-monitoring
make go-build
```

You will now have a tar.gz and binary file in the `dist/` directory, named according to your target platform.

For example, for the darwin-arm64 platform:

```
dist/
├── oracledb_exporter-2.1.0.darwin-arm64
│ └── oracledb_exporter
└── oracledb_exporter-2.1.0.darwin-arm64.tar.gz
```