Search code examples
centosopenshiftactivemq-classicredhatopenshift-origin

How to persist data console in a cluster of ActiveMQ [Openshift]


I have a cluster of ActiveMQ deployed in Master/Slave mode in Openshift, but I have a problem. I can persist the data of topics and queues with any problem. When a pod comes down, do not lose the messages. But I lose the data on the console of ActiveMQ. I attached below my activemq.xml Someone have the same problem?

Thanks in advance.

` file:${activemq.conf}/credentials.properties

<!--
    The <broker> element is used to configure the ActiveMQ broker.
-->
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" useJmx="true" dataDirectory="${activemq.data}" persistent="true" deleteAllMessagesOnStartup="false" useShutdownHook="false" schedulerSupport="true" >

    <!--
        For better performances use VM cursor and small memory limit.
        For more information, see:

        http://activemq.apache.org/message-cursors.html

        Also, if your producer is "hanging", it's probably due to producer flow control.
        For more information, see:
        http://activemq.apache.org/producer-flow-control.html
    -->
    <destinationInterceptors>
       <virtualDestinationInterceptor>
           <virtualDestinations>
               <virtualTopic name="VirtualTopic.>" prefix="Consumer.*." selectorAware="false"/>
           </virtualDestinations>
       </virtualDestinationInterceptor>
    </destinationInterceptors>

    <destinationPolicy>
        <policyMap>
          <policyEntries>
            <policyEntry topic=">" producerFlowControl="true">
                <!-- The constantPendingMessageLimitStrategy is used to prevent
                     slow topic consumers to block producers and affect other consumers
                     by limiting the number of messages that are retained
                     For more information, see:

                     http://activemq.apache.org/slow-consumer-handling.html

                -->
              <pendingMessageLimitStrategy>
                <constantPendingMessageLimitStrategy limit="1000"/>
              </pendingMessageLimitStrategy>
            </policyEntry>
            <policyEntry queue=">" producerFlowControl="true" memoryLimit="1mb">
              <!-- Use VM cursor for better latency
                   For more information, see:

                   http://activemq.apache.org/message-cursors.html

              <pendingQueuePolicy>
                <vmQueueCursor/>
              </pendingQueuePolicy>
              -->
            </policyEntry>
          </policyEntries>
        </policyMap>
    </destinationPolicy>


    <!--
        The managementContext is used to configure how ActiveMQ is exposed in
        JMX. By default, ActiveMQ uses the MBean server that is started by
        the JVM. For more information, see:

        http://activemq.apache.org/jmx.html
    -->
    <managementContext>
        <managementContext createConnector="false"/>
    </managementContext>

    <!--
        Configure message persistence for the broker. The default persistence
        mechanism is the KahaDB store (identified by the kahaDB tag).

    For more information, see:
        ctivemq.data}
        http://activemq.apache.org/persistence.html
    -->
    <persistenceAdapter>
        <kahaDB directory="${activemq.data}/kahadb"/>
    </persistenceAdapter>


      <!--
        The systemUsage controls the maximum amount of space the broker will
        use before slowing down producers. For more information, see:
        http://activemq.apache.org/producer-flow-control.html
        If using ActiveMQ embedded - the following limits could safely be used:

    <systemUsage>
        <systemUsage>
            <memoryUsage>
                <memoryUsage limit="20 mb"/>
            </memoryUsage>
            <storeUsage>
                <storeUsage limit="1 gb"/>
            </storeUsage>
            <tempUsage>
                <tempUsage limit="100 mb"/>
            </tempUsage>
        </systemUsage>
    </systemUsage>
    -->
      <systemUsage>
        <systemUsage>
            <memoryUsage>
                <memoryUsage limit="64 mb"/>
            </memoryUsage>
            <storeUsage>
                <storeUsage limit="100 gb"/>
            </storeUsage>
            <tempUsage>
                <tempUsage limit="50 gb"/>
            </tempUsage>
        </systemUsage>
    </systemUsage>

    <!--
        The transport connectors expose ActiveMQ over a given protocol to
        clients and other brokers. For more information, see:

        http://activemq.apache.org/configuring-transports.html

   -->
    <transportConnectors>
        <!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
        <transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
        <transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
    </transportConnectors>

    <!-- destroy the spring context on shutdown to stop jetty -->
    <shutdownHooks>
        <bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
    </shutdownHooks>

</broker>

<!--
    Enable web consoles, REST and Ajax APIs and demos

    Take a look at ${ACTIVEMQ_HOME}/conf/jetty.xml for more details
-->
<import resource="jetty.xml"/>

`


Solution

  • The console does not offer any sort of data persistence, the statistics are mostly all reset to zero on restart other than those relating to Queue depth or other Durable Topic Subscription related metrics.