Navigation

    Logo
    • Register
    • Login
    • Search
    • Recent
    • Tags
    • Unread
    • Categories
    • Unreplied
    • Popular
    • GitHub
    • Docu
    • Hilfe
    1. Home
    2. Deutsch
    3. ioBroker Allgemein
    4. Sicherung influxDB - Fehlermeldung

    NEWS

    • Neuer Blog: Fotos und Eindrücke aus Solingen

    • ioBroker@Smart Living Forum Solingen, 14.06. - Agenda added

    • ioBroker goes Matter ... Matter Adapter in Stable

    Sicherung influxDB - Fehlermeldung

    This topic has been deleted. Only users with topic management privileges can see it.
    • B
      Beowolf @Thomas Braun last edited by

      @thomas-braun

      ### Welcome to the InfluxDB configuration file.
      
      # The values in this file override the default values used by the system if
      # a config option is not specified. The commented out lines are the configuration
      # field and the default value used. Uncommenting a line and changing the value
      # will change the value used at runtime when the process is restarted.
      
      # Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
      # The data includes a random ID, os, arch, version, the number of series and other
      # usage data. No data from user databases is ever transmitted.
      # Change this option to true to enable reporting.
      reporting-enabled = false
      
      # Bind address to use for the RPC service for backup and restore.
      bind-address = "192.168.49.31:8088"
      
      ###
      ### [meta]
      ###
      ### Controls the parameters for the Raft consensus group that stores metadata
      ### about the InfluxDB cluster.
      ###
      
      [meta]
        # Where the metadata/raft database is stored
        dir = "/var/lib/influxdb/meta"
      
        # Automatically create a default retention policy when creating a database.
        # retention-autocreate = true
      
        # If log messages are printed for the meta service
        # logging-enabled = true
      
      ###
      ### [data]
      ###
      ### Controls where the actual shard data for InfluxDB lives and how it is
      ### flushed from the WAL. "dir" may need to be changed to a suitable place
      ### for your system, but the WAL settings are an advanced configuration. The
      ### defaults should work for most systems.
      ###
      
      [data]
        # The directory where the TSM storage engine stores TSM files.
        dir = "/var/lib/influxdb/data"
      
        # The directory where the TSM storage engine stores WAL files.
        wal-dir = "/var/lib/influxdb/wal"
      
        # The amount of time that a write will wait before fsyncing.  A duration
        # greater than 0 can be used to batch up multiple fsync calls.  This is useful for slower
        # disks or when WAL write contention is seen.  A value of 0s fsyncs every write to the WAL.
        # Values in the range of 0-100ms are recommended for non-SSD disks.
        # wal-fsync-delay = "0s"
      
      
        # The type of shard index to use for new shards.  The default is an in-memory index that is
        # recreated at startup.  A value of "tsi1" will use a disk based index that supports higher
        # cardinality datasets.
        # index-version = "inmem"
      
        # Trace logging provides more verbose output around the tsm engine. Turning
        # this on can provide more useful output for debugging tsm engine issues.
        # trace-logging-enabled = false
      
        # Whether queries should be logged before execution. Very useful for troubleshooting, but will
        # log any sensitive data contained within a query.
        # query-log-enabled = true
      
        # Settings for the TSM engine
      
        # CacheMaxMemorySize is the maximum size a shard's cache can
        # reach before it starts rejecting writes.
        # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
        # Values without a size suffix are in bytes.
        # cache-max-memory-size = "1g"
      
        # CacheSnapshotMemorySize is the size at which the engine will
        # snapshot the cache and write it to a TSM file, freeing up memory
        # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
        # Values without a size suffix are in bytes.
        # cache-snapshot-memory-size = "25m"
      
        # CacheSnapshotWriteColdDuration is the length of time at
        # which the engine will snapshot the cache and write it to
        # a new TSM file if the shard hasn't received writes or deletes
        # cache-snapshot-write-cold-duration = "10m"
      
        # CompactFullWriteColdDuration is the duration at which the engine
        # will compact all TSM files in a shard if it hasn't received a
        # write or delete
        # compact-full-write-cold-duration = "4h"
      
        # The maximum number of concurrent full and level compactions that can run at one time.  A
        # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime.  Any number greater
        # than 0 limits compactions to that value.  This setting does not apply
        # to cache snapshotting.
        # max-concurrent-compactions = 0
      
        # The threshold, in bytes, when an index write-ahead log file will compact
        # into an index file. Lower sizes will cause log files to be compacted more
        # quickly and result in lower heap usage at the expense of write throughput.
        # Higher sizes will be compacted less frequently, store more series in-memory,
        # and provide higher write throughput.
        # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
        # Values without a size suffix are in bytes.
        # max-index-log-file-size = "1m"
      
        # The maximum series allowed per database before writes are dropped.  This limit can prevent
        # high cardinality issues at the database level.  This limit can be disabled by setting it to
        # 0.
        # max-series-per-database = 1000000
      
        # The maximum number of tag values per tag that are allowed before writes are dropped.  This limit
        # can prevent high cardinality tag values from being written to a measurement.  This limit can be
        # disabled by setting it to 0.
        # max-values-per-tag = 100000
      
        # If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to
        # TSM files. This setting has been found to be problematic on some kernels, and defaults to off.
        # It might help users who have slow disks in some cases.
        # tsm-use-madv-willneed = false
      
      ###
      ### [coordinator]
      ###
      ### Controls the clustering service configuration.
      ###
      
      [coordinator]
        # The default time a write request will wait until a "timeout" error is returned to the caller.
        # write-timeout = "10s"
      
        # The maximum number of concurrent queries allowed to be executing at one time.  If a query is
        # executed and exceeds this limit, an error is returned to the caller.  This limit can be disabled
        # by setting it to 0.
        # max-concurrent-queries = 0
      
        # The maximum time a query will is allowed to execute before being killed by the system.  This limit
        # can help prevent run away queries.  Setting the value to 0 disables the limit.
        # query-timeout = "0s"
      
        # The time threshold when a query will be logged as a slow query.  This limit can be set to help
        # discover slow or resource intensive queries.  Setting the value to 0 disables the slow query logging.
        # log-queries-after = "0s"
      
        # The maximum number of points a SELECT can process.  A value of 0 will make
        # the maximum point count unlimited.  This will only be checked every second so queries will not
        # be aborted immediately when hitting the limit.
        # max-select-point = 0
      
        # The maximum number of series a SELECT can run.  A value of 0 will make the maximum series
        # count unlimited.
        # max-select-series = 0
      
        # The maxium number of group by time bucket a SELECT can create.  A value of zero will max the maximum
        # number of buckets unlimited.
        # max-select-buckets = 0
      
      ###
      ### [retention]
      ###
      ### Controls the enforcement of retention policies for evicting old data.
      ###
      
      [retention]
        # Determines whether retention policy enforcement enabled.
        # enabled = true
      
        # The interval of time when retention policy enforcement checks run.
        # check-interval = "30m"
      
      ###
      ### [shard-precreation]
      ###
      ### Controls the precreation of shards, so they are available before data arrives.
      ### Only shards that, after creation, will have both a start- and end-time in the
      ### future, will ever be created. Shards are never precreated that would be wholly
      ### or partially in the past.
      
      [shard-precreation]
        # Determines whether shard pre-creation service is enabled.
        # enabled = true
      
        # The interval of time when the check to pre-create new shards runs.
        # check-interval = "10m"
      
        # The default period ahead of the endtime of a shard group that its successor
        # group is created.
        # advance-period = "30m"
      
      ###
      ### Controls the system self-monitoring, statistics and diagnostics.
      ###
      ### The internal database for monitoring data is created automatically if
      ### if it does not already exist. The target retention within this database
      ### is called 'monitor' and is also created with a retention period of 7 days
      ### and a replication factor of 1, if it does not exist. In all cases the
      ### this retention policy is configured as the default for the database.
      
      [monitor]
        # Whether to record statistics internally.
        # store-enabled = true
      
        # The destination database for recorded statistics
        # store-database = "_internal"
      
        # The interval at which to record statistics
        # store-interval = "10s"
      
      ###
      ### [http]
      ###
      ### Controls how the HTTP endpoints are configured. These are the primary
      ### mechanism for getting data into and out of InfluxDB.
      ###
      
      [http]
        # Determines whether HTTP endpoint is enabled.
      enabled = true
      
        # The bind address used by the HTTP service.
      bind-address = ":8086"
      
        # Determines whether user authentication is enabled over HTTP/HTTPS.
      auth-enabled = true
      
        # The default realm sent back when issuing a basic auth challenge.
        # realm = "InfluxDB"
      
        # Determines whether HTTP request logging is enabled.
      log-enabled = true
      
        # Determines whether the HTTP write request logs should be suppressed when the log is enabled.
        # suppress-write-log = false
      
        # When HTTP request logging is enabled, this option specifies the path where
        # log entries should be written. If unspecified, the default is to write to stderr, which
        # intermingles HTTP logs with internal InfluxDB logging.
        #
        # If influxd is unable to access the specified path, it will log an error and fall back to writing
        # the request log to stderr.
        # access-log-path = ""
      
        # Determines whether detailed write logging is enabled.
      write-tracing = false
      
        # Determines whether the pprof endpoint is enabled.  This endpoint is used for
        # troubleshooting and monitoring.
      pprof-enabled = false
      
        # Enables a pprof endpoint that binds to localhost:6060 immediately on startup.
        # This is only needed to debug startup issues.
        # debug-pprof-enabled = false
      
        # Determines whether HTTPS is enabled.
      https-enabled = false
      
        # The SSL certificate to use when HTTPS is enabled.
      https-certificate = "/etc/ssl/influxdb.pem"
      
        # Use a separate private key location.
        # https-private-key = ""
      
        # The JWT auth shared secret to validate requests using JSON web tokens.
        # shared-secret = ""
      
        # The default chunk size for result sets that should be chunked.
        # max-row-limit = 0
      
        # The maximum number of HTTP connections that may be open at once.  New connections that
        # would exceed this limit are dropped.  Setting this value to 0 disables the limit.
        # max-connection-limit = 0
      
        # Enable http service over unix domain socket
        # unix-socket-enabled = false
      
        # The path of the unix domain socket.
        # bind-socket = "/var/run/influxdb.sock"
      
        # The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit.
        # max-body-size = 25000000
      
        # The maximum number of writes processed concurrently.
        # Setting this to 0 disables the limit.
        # max-concurrent-write-limit = 0
      
        # The maximum number of writes queued for processing.
        # Setting this to 0 disables the limit.
        # max-enqueued-write-limit = 0
      
        # The maximum duration for a write to wait in the queue to be processed.
        # Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit.
        # enqueued-write-timeout = 0
      
      
      ###
      ### [ifql]
      ###
      ### Configures the ifql RPC API.
      ###
      
      [ifql]
        # Determines whether the RPC service is enabled.
        # enabled = true
      
        # Determines whether additional logging is enabled.
        # log-enabled = true
      
        # The bind address used by the ifql RPC service.
        # bind-address = ":8082"
      
      
      ###
      ### [logging]
      ###
      ### Controls how the logger emits logs to the output.
      ###
      
      [logging]
        # Determines which log encoder to use for logs. Available options
        # are auto, logfmt, and json. auto will use a more a more user-friendly
        # output format if the output terminal is a TTY, but the format is not as
        # easily machine-readable. When the output is a non-TTY, auto will use
        # logfmt.
        # format = "auto"
      
        # Determines which level of logs will be emitted. The available levels
        # are error, warn, info, and debug. Logs that are equal to or above the
        # specified level will be emitted.
        # level = "info"
      
        # Suppresses the logo output that is printed when the program is started.
        # The logo is always suppressed if STDOUT is not a TTY.
        # suppress-logo = false
      
      ###
      ### [subscriber]
      ###
      ### Controls the subscriptions, which can be used to fork a copy of all data
      ### received by the InfluxDB host.
      ###
      
      [subscriber]
        # Determines whether the subscriber service is enabled.
        # enabled = true
      
        # The default timeout for HTTP writes to subscribers.
        # http-timeout = "30s"
      
        # Allows insecure HTTPS connections to subscribers.  This is useful when testing with self-
        # signed certificates.
        # insecure-skip-verify = false
      
        # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
        # ca-certs = ""
      
        # The number of writer goroutines processing the write channel.
        # write-concurrency = 40
      
        # The number of in-flight writes buffered in the write channel.
        # write-buffer-size = 1000
      
      
      ###
      ### [[graphite]]
      ###
      ### Controls one or many listeners for Graphite data.
      ###
      
      [[graphite]]
        # Determines whether the graphite endpoint is enabled.
        # enabled = false
        # database = "graphite"
        # retention-policy = ""
        # bind-address = ":2003"
        # protocol = "tcp"
        # consistency-level = "one"
      
        # These next lines control how batching works. You should have this enabled
        # otherwise you could get dropped metrics or poor performance. Batching
        # will buffer points in memory if you have many coming in.
      
        # Flush if this many points get buffered
        # batch-size = 5000
      
        # number of batches that may be pending in memory
        # batch-pending = 10
      
        # Flush at least this often even if we haven't hit buffer limit
        # batch-timeout = "1s"
      
        # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
        # udp-read-buffer = 0
      
        ### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
        # separator = "."
      
        ### Default tags that will be added to all metrics.  These can be overridden at the template level
        ### or by tags extracted from metric
        # tags = ["region=us-east", "zone=1c"]
      
        ### Each template line requires a template pattern.  It can have an optional
        ### filter before the template and separated by spaces.  It can also have optional extra
        ### tags following the template.  Multiple tags should be separated by commas and no spaces
        ### similar to the line protocol format.  There can be only one default template.
        # templates = [
        #   "*.app env.service.resource.measurement",
        #   # Default template
        #   "server.*",
        # ]
      
      ###
      ### [collectd]
      ###
      ### Controls one or many listeners for collectd data.
      ###
      
      [[collectd]]
        # enabled = false
        # bind-address = ":25826"
        # database = "collectd"
        # retention-policy = ""
        #
        # The collectd service supports either scanning a directory for multiple types
        # db files, or specifying a single db file.
        # typesdb = "/usr/local/share/collectd"
        #
        # security-level = "none"
        # auth-file = "/etc/collectd/auth_file"
      
        # These next lines control how batching works. You should have this enabled
        # otherwise you could get dropped metrics or poor performance. Batching
        # will buffer points in memory if you have many coming in.
      
        # Flush if this many points get buffered
        # batch-size = 5000
      
        # Number of batches that may be pending in memory
        # batch-pending = 10
      
        # Flush at least this often even if we haven't hit buffer limit
        # batch-timeout = "10s"
      
        # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
        # read-buffer = 0
      
        # Multi-value plugins can be handled two ways.
        # "split" will parse and store the multi-value plugin data into separate measurements
        # "join" will parse and store the multi-value plugin as a single multi-value measurement.
        # "split" is the default behavior for backward compatability with previous versions of influxdb.
        # parse-multivalue-plugin = "split"
      ###
      ### [opentsdb]
      ###
      ### Controls one or many listeners for OpenTSDB data.
      ###
      
      [[opentsdb]]
        # enabled = false
        # bind-address = ":4242"
        # database = "opentsdb"
        # retention-policy = ""
        # consistency-level = "one"
        # tls-enabled = false
        # certificate= "/etc/ssl/influxdb.pem"
      
        # Log an error for every malformed point.
        # log-point-errors = true
      
        # These next lines control how batching works. You should have this enabled
        # otherwise you could get dropped metrics or poor performance. Only points
        # metrics received over the telnet protocol undergo batching.
      
        # Flush if this many points get buffered
        # batch-size = 1000
      
        # Number of batches that may be pending in memory
        # batch-pending = 5
      
        # Flush at least this often even if we haven't hit buffer limit
        # batch-timeout = "1s"
      
      ###
      ### [[udp]]
      ###
      ### Controls the listeners for InfluxDB line protocol data via UDP.
      ###
      
      [[udp]]
        # enabled = false
        # bind-address = ":8089"
        # database = "udp"
        # retention-policy = ""
      
        # InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h")
        # precision = ""
      
        # These next lines control how batching works. You should have this enabled
        # otherwise you could get dropped metrics or poor performance. Batching
        # will buffer points in memory if you have many coming in.
      
        # Flush if this many points get buffered
        # batch-size = 5000
      
        # Number of batches that may be pending in memory
        # batch-pending = 10
      
        # Will flush at least this often even if we haven't hit buffer limit
        # batch-timeout = "1s"
      
        # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
        # read-buffer = 0
      
      ###
      ### [continuous_queries]
      ###
      ### Controls how continuous queries are run within InfluxDB.
      ###
      
      [continuous_queries]
        # Determines whether the continuous query service is enabled.
        # enabled = true
      
        # Controls whether queries are logged when executed by the CQ service.
        # log-enabled = true
      
        # Controls whether queries are logged to the self-monitoring data store.
        # query-stats-enabled = false
      
        # interval for how often continuous queries will be checked if they need to run
        # run-interval = "1s"
      
      ###
      ### [tls]
      ###
      ### Global configuration settings for TLS in InfluxDB.
      ###
      
      [tls]
        # Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants
        # for a list of available ciphers, which depends on the version of Go (use the query
        # SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses
        # the default settings from Go's crypto/tls package.
        # ciphers = [
        #   "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
        #   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
        # ]
      
        # Minimum version of the tls protocol that will be negotiated. If not specified, uses the
        # default settings from Go's crypto/tls package.
        # min-version = "tls1.2"
      
        # Maximum version of the tls protocol that will be negotiated. If not specified, uses the
        # default settings from Go's crypto/tls package.
        # max-version = "tls1.2"
      
      
      Thomas Braun B 2 Replies Last reply Reply Quote 0
      • Thomas Braun
        Thomas Braun Most Active @Beowolf last edited by

        @beowolf

        Wenn die InfluxDB von einem entfernten Server gesichert werden soll, müssen in der influxdb.conf auf dem entfernten Server die Remote-Rechte für den RPC-Dienst angepasst werden.

        bind-address = "<InfluxDB-IP>:8088"

        oder

        bind-address = "0.0.0.0:8088"

        Die ist bei dir nicht richtig eingetragen.

        1 Reply Last reply Reply Quote 0
        • B
          Beowolf @Beowolf last edited by

          Muß dort

          bind-address = "192.168.49.31:8088"
          

          die ipadresse vom influxdb rechner rein?

          Thomas Braun 1 Reply Last reply Reply Quote 0
          • Thomas Braun
            Thomas Braun Most Active @Beowolf last edited by

            @beowolf
            192.168.49.43
            oder
            0.0.0.0

            1 Reply Last reply Reply Quote 0
            • crunchip
              crunchip Forum Testing Most Active last edited by

              und natürlich den influxdb service nach der Änderung neu starten

              B 1 Reply Last reply Reply Quote 0
              • B
                Beowolf @crunchip last edited by

                @crunchip
                Ich bekomme immer noch diese Meldung.

                Started iobroker ...
                [DEBUG] [mount] - Wake on LAN wait 25 Seconds for NAS!
                [DEBUG] [mount] - Wake on LAN MAC-Address: 38:EA:A7:A0:22:2B
                [DEBUG] [mount] - mount activ... umount is started before mount!!
                [DEBUG] [mount] - umount successfully completed
                [DEBUG] [mount] - noserverino option: false
                [DEBUG] [mount] - cifs-mount command: "sudo mount -t cifs -o username=Administrator,password=****,rw,file_mode=0777,dir_mode=0777,vers=3.1.1 //192.168.49.31/Software/Backups/ioBroker /opt/iobroker/backups"
                [DEBUG] [mount] - first mount attempt with smb option failed. try next mount attempt without smb option ...
                [DEBUG] [mount] - cifs-mount command: "sudo mount -t cifs -o username=Administrator,password=****,rw,file_mode=0777,dir_mode=0777 //192.168.49.31/Software/Backups/ioBroker /opt/iobroker/backups"
                [DEBUG] [mount] - mount successfully completed
                [DEBUG] [mount] - done
                [DEBUG] [iobroker] - host.raspberrypi 6776 states saved
                
                [DEBUG] [iobroker] - host.raspberrypi 6907 objects saved
                
                [DEBUG] [iobroker] - Backup created: /opt/iobroker/backups/iobroker_2022_06_01-10_18_56_backupiobroker.tar.gz
                
                [DEBUG] [iobroker] - done
                [DEBUG] [influxDB] - InfluxDB-Backup started ...
                [DEBUG] [influxDB] - Start InfluxDB Backup ...
                [DEBUG] [influxDB] - InfluxDB Backup tmp directory created 
                [DEBUG] [influxDB] - Try deleting the InfluxDB tmp directory
                [DEBUG] [influxDB] - InfluxDB tmp directory was successfully deleted
                [DEBUG] [influxDB] - 2022/06/01 10:19:09 backing up metastore to /opt/iobroker/backups/influxDB_2022_06_01-10_19_09_backupiobroker/meta.00
                
                [ERROR] [influxDB] - Error: Command failed: influxd backup -portable -database InfluxDB -host 192.168.49.43:8088 "/opt/iobroker/backups/influxDB_2022_06_01-10_19_09_backupiobroker"
                2022/06/01 10:19:09 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2s and retrying (0)...
                2022/06/01 10:19:11 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2s and retrying (1)...
                2022/06/01 10:19:13 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2s and retrying (2)...
                2022/06/01 10:19:15 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2s and retrying (3)...
                2022/06/01 10:19:17 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2s and retrying (4)...
                2022/06/01 10:19:19 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2s and retrying (5)...
                2022/06/01 10:19:21 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 3.01s and retrying (6)...
                2022/06/01 10:19:24 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 11.441s and retrying (7)...
                2022/06/01 10:19:36 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 43.477s and retrying (8)...
                2022/06/01 10:20:19 Download shard 0 failed dial tcp 192.168.49.43:8088: connect: connection refused.  Waiting 2m45.216s and retrying (9)...
                backup: dial tcp 192.168.49.43:8088: connect: connection refused
                
                
                1 Reply Last reply Reply Quote 0
                • crunchip
                  crunchip Forum Testing Most Active last edited by

                  @beowolf installiert hast du aber den influxdb-service auf deinem iobroker und der Dienst läuft auch?
                  Den influxdb-service hast du auf deinem anderen Pi (influxDB ist mit Grafana auf "Pi2-2 192.168.49.43)
                  neu gestartet?

                  B 1 Reply Last reply Reply Quote 0
                  • B
                    Beowolf @crunchip last edited by

                    @crunchip

                    jepp. gerade noch mal gemacht.

                    DJMarc75 1 Reply Last reply Reply Quote 0
                    • DJMarc75
                      DJMarc75 @Beowolf last edited by DJMarc75

                      @beowolf

                      ich hab in meiner influxdb.conf folgendes noch eingetragen geändert:

                      # Bind address to use for the RPC service for backup and restore.
                      bind-address = "192.168.178.21:8088"
                      

                      also die IP natürlich anpassen

                      edit: Zeile 15

                      Einstellungen in Backitup
                      Screenshot 2022-06-01 112016.png

                      B 1 Reply Last reply Reply Quote 0
                      • B
                        Beowolf @DJMarc75 last edited by

                        @djmarc75
                        habe ich auch so

                        DJMarc75 1 Reply Last reply Reply Quote 0
                        • DJMarc75
                          DJMarc75 @Beowolf last edited by

                          @beowolf
                          auch die bind-adress in Zeile 15 in der influxdb.conf?

                          B 1 Reply Last reply Reply Quote 1
                          • B
                            Beowolf @DJMarc75 last edited by

                            @djmarc75
                            jepp

                            DJMarc75 1 Reply Last reply Reply Quote 0
                            • DJMarc75
                              DJMarc75 @Beowolf last edited by DJMarc75

                              @beowolf
                              Also ich hab quasi die selbe Konstellation wie Du aber ich habe in der influxdb.conf nichts ausser die binding-adress in Zeile 15 und unter HTML die binding-adress aktiviert und angepasst.
                              Das ganze andere was Du aktiviert hast ist bei mir nicht aktiviert (# davor) und es funktioniert tadellos.

                              EDIT: Sorry für die Frage: von welchem Rechner hast du die influxdb.conf bearbeitet?

                              B 1 Reply Last reply Reply Quote 0
                              • B
                                Beowolf @DJMarc75 last edited by

                                @djmarc75
                                ioBroker pi

                                DJMarc75 1 Reply Last reply Reply Quote 0
                                • DJMarc75
                                  DJMarc75 @Beowolf last edited by

                                  @beowolf na das kann nicht funktionieren. Du musst die auf dem influx läuft bearbeiten.

                                  B 1 Reply Last reply Reply Quote 2
                                  • B
                                    Beowolf @DJMarc75 last edited by

                                    @djmarc75

                                    Kaum macht man es richtig - funktioniert es. 👍 👍 👍 👍 👍 👍 👍 👍 👍 👍

                                    Started iobroker ...
                                    [DEBUG] [mount] - Wake on LAN wait 25 Seconds for NAS!
                                    [DEBUG] [mount] - Wake on LAN MAC-Address: 38:EA:A7:A0:22:2B
                                    [DEBUG] [mount] - noserverino option: false
                                    [DEBUG] [mount] - cifs-mount command: "sudo mount -t cifs -o username=Administrator,password=****,rw,file_mode=0777,dir_mode=0777,vers=3.1.1 //192.168.49.31/Software/Backups/ioBroker /opt/iobroker/backups"
                                    [DEBUG] [mount] - first mount attempt with smb option failed. try next mount attempt without smb option ...
                                    [DEBUG] [mount] - cifs-mount command: "sudo mount -t cifs -o username=Administrator,password=****,rw,file_mode=0777,dir_mode=0777 //192.168.49.31/Software/Backups/ioBroker /opt/iobroker/backups"
                                    [DEBUG] [mount] - mount successfully completed
                                    [DEBUG] [mount] - done
                                    [DEBUG] [iobroker] - host.raspberrypi 6776 states saved
                                    
                                    [DEBUG] [iobroker] - host.raspberrypi 6907 objects saved
                                    
                                    [DEBUG] [iobroker] - Backup created: /opt/iobroker/backups/iobroker_2022_06_01-12_19_31_backupiobroker.tar.gz
                                    
                                    [DEBUG] [iobroker] - done
                                    [DEBUG] [influxDB] - InfluxDB-Backup started ...
                                    [DEBUG] [influxDB] - Start InfluxDB Backup ...
                                    [DEBUG] [influxDB] - InfluxDB Backup tmp directory created 
                                    [DEBUG] [influxDB] - Backup created: /opt/iobroker/backups/influxDB_2022_06_01-12_19_43_backupiobroker.tar.gz
                                    [DEBUG] [influxDB] - Try deleting the InfluxDB tmp directory
                                    [DEBUG] [influxDB] - InfluxDB tmp directory was successfully deleted
                                    [DEBUG] [influxDB] - InfluxDB-Backup for is finish
                                    [DEBUG] [influxDB] - done
                                    [DEBUG] [grafana] - Start Grafana Backup ...
                                    [DEBUG] [grafana] - Created grafana_tmp directory: "/opt/iobroker/backups/grafana_tmp"
                                    [DEBUG] [grafana] - Created dashboard directory
                                    [DEBUG] [grafana] - Created dashboards_manually_restore directory
                                    [DEBUG] [grafana] - Created datasource directory
                                    [DEBUG] [grafana] - start Grafana request ...
                                    [DEBUG] [grafana] - Grafana is available ... Status: 200
                                    [DEBUG] [grafana] - found Dashboard: olheizung
                                    [DEBUG] [grafana] - found Dashboard: pflanzstreifen-garage
                                    [DEBUG] [grafana] - found Dashboard: brutkasten
                                    [DEBUG] [grafana] - found Dashboard: rosenpflanzkasten
                                    [DEBUG] [grafana] - found Dashboard: pflanzstreifen-eingang
                                    [DEBUG] [grafana] - found Dashboard: umweltdaten-stall
                                    [DEBUG] [grafana] - found Dashboard: obergeschoss
                                    [DEBUG] [grafana] - found Dashboard: dachgeschoss
                                    [DEBUG] [grafana] - found Dashboard: wohnhaus
                                    [DEBUG] [grafana] - found Dashboard: wetterdaten-stall
                                    [DEBUG] [grafana] - found Dashboard: umweltdaten
                                    [DEBUG] [grafana] - found Dashboard: pflanzbeet-pflox
                                    [DEBUG] [grafana] - found Dashboard: wasserspiel
                                    [DEBUG] [grafana] - found Dashboard: phase-1
                                    [DEBUG] [grafana] - found Dashboard: erdgeschoss
                                    [DEBUG] [grafana] - found Dashboard: pflanzkasten-pflox
                                    [DEBUG] [grafana] - start Grafana backup compress ...
                                    [DEBUG] [grafana] - Backup created: /opt/iobroker/backups/grafana_2022_06_01-12_19_49_backupiobroker.tar.gz
                                    [DEBUG] [grafana] - Try deleting the Grafana tmp directory: "/opt/iobroker/backups/grafana_tmp"
                                    [DEBUG] [grafana] - Grafana tmp directory "/opt/iobroker/backups/grafana_tmp" successfully deleted
                                    [DEBUG] [grafana] - done
                                    [DEBUG] [cifs] - done
                                    [DEBUG] [clean] - done
                                    [DEBUG] [pushover] - done
                                    [DEBUG] [historyHTML] - new history html values created
                                    [DEBUG] [historyHTML] - done
                                    [DEBUG] [historyJSON] - new history json values created
                                    [DEBUG] [historyJSON] - done
                                    [DEBUG] [umount] - mount activ, umount is started ...
                                    [DEBUG] [umount] - umount successfully completed
                                    [DEBUG] [umount] - done
                                    [EXIT] 0
                                    
                                    DJMarc75 1 Reply Last reply Reply Quote 0
                                    • DJMarc75
                                      DJMarc75 @Beowolf last edited by

                                      @beowolf sagte in Sicherung influxDB - Fehlermeldung:

                                      Kaum macht man es richtig

                                      Geht mir oft auch so 🙄

                                      B 1 Reply Last reply Reply Quote 1
                                      • B
                                        Beowolf @DJMarc75 last edited by

                                        @djmarc75
                                        Vielen Dank für die Hilfe.

                                        DJMarc75 1 Reply Last reply Reply Quote 0
                                        • DJMarc75
                                          DJMarc75 @Beowolf last edited by

                                          @beowolf sagte in Sicherung influxDB - Fehlermeldung:

                                          Vielen Dank für die Hilfe.

                                          👍

                                          1 Reply Last reply Reply Quote 1
                                          • First post
                                            Last post

                                          Support us

                                          ioBroker
                                          Community Adapters
                                          Donate
                                          FAQ Cloud / IOT
                                          HowTo: Node.js-Update
                                          HowTo: Backup/Restore
                                          Downloads
                                          BLOG

                                          958
                                          Online

                                          31.9k
                                          Users

                                          80.1k
                                          Topics

                                          1.3m
                                          Posts

                                          4
                                          27
                                          1170
                                          Loading More Posts
                                          • Oldest to Newest
                                          • Newest to Oldest
                                          • Most Votes
                                          Reply
                                          • Reply as topic
                                          Log in to reply
                                          Community
                                          Impressum | Datenschutz-Bestimmungen | Nutzungsbedingungen
                                          The ioBroker Community 2014-2023
                                          logo