springboot+redis+kafka集羣配置 分佈式

redis 配置  需要注意的地方是多服務器的時候,格式不一樣


@Configuration
@ConditionalOnClass({JedisCluster.class})
public class RedisConfig {
        @Value("${spring.redis.cluster.nodes}")
        private String clusterNodes;
        @Value("${spring.redis.timeout}")
        private int timeout;
        @Value("${spring.redis.pool.max-idle}")
        private int maxIdle;
        @Value("${spring.redis.pool.max-wait}")
        private long maxWaitMillis;
        @Value("${spring.redis.commandTimeout}")
        private int commandTimeout;
        @Value("${spring.redis.password}")
        private String password;
        
        @Bean
        public JedisCluster getJedisCluster() {
            String[] cNodes = clusterNodes.split(",");
            Set<HostAndPort> nodes =new HashSet<>();
            //分割出集羣節點
            for(String node : cNodes) {
                String[] hp = node.split(":");
                nodes.add(new HostAndPort(hp[0],Integer.parseInt(hp[1])));
            }
            JedisPoolConfig jedisPoolConfig =new JedisPoolConfig();
            jedisPoolConfig.setMaxIdle(maxIdle);
            jedisPoolConfig.setMaxWaitMillis(maxWaitMillis);
            //創建集羣對象
//      JedisCluster jedisCluster = new JedisCluster(nodes,commandTimeout);
            return new JedisCluster(nodes, 1000, timeout, 1000,
            		password, jedisPoolConfig);
           // return new JedisCluster(nodes,1000,commandTimeout,1000,"zhangfeifei123!!==",jedisPoolConfig);
        }

        /**
         * 設置數據存入redis 的序列化方式
         *</br>redisTemplate序列化默認使用的jdkSerializeable,存儲二進制字節碼,導致key會出現亂碼,所以自定義
         *序列化類
         *
         * @paramredisConnectionFactory
         */
        @Bean
        public RedisTemplate<Object,Object> redisTemplate(RedisConnectionFactory redisConnectionFactory)throws UnknownHostException {
            RedisTemplate<Object,Object> redisTemplate = new RedisTemplate<>();
            redisTemplate.setConnectionFactory(redisConnectionFactory);
            Jackson2JsonRedisSerializer jackson2JsonRedisSerializer =new Jackson2JsonRedisSerializer(Object.class);
            ObjectMapper objectMapper =new ObjectMapper();
            objectMapper.setVisibility(PropertyAccessor.ALL,JsonAutoDetect.Visibility.ANY);
            objectMapper.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);
            jackson2JsonRedisSerializer.setObjectMapper(objectMapper);

            redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
            redisTemplate.setKeySerializer(new StringRedisSerializer());

            redisTemplate.afterPropertiesSet();

            return redisTemplate;
        }



}

kafka 配置,只加了依賴包

因爲是springboot1.5.6版本, 所以用 kafka1.3.8版本, 這個要注意下

	<dependency>
			<groupId>org.springframework.kafka</groupId>
			<artifactId>spring-kafka</artifactId>
			<version>1.3.8.RELEASE</version>
		</dependency>

下面是完整配置文件 , IP替換掉就OK

server:
  port: 5088
  tomcat:
    uri-encoding: utf-8
logging:
  config: classpath:logback.xml
#  file: /app/log/zenlayerda.log
#  level:
#    root: error
#    com.zenlayer.cloud: debug
spring:
  redis:
    timeout: 0
    commandTimeout: 5000
#    host: 10.64.3.36
#    port: 6379
#    password: 123456
    cluster:
      nodes: 196.168.30.150:6001,196.168.30.150:6002,196.168.30.154:6003,196.168.30.154:6004,196.168.30.158:6005,196.168.30.158:6006 
    password: 123456
    pool:
      max-active: 600
      max-wait: 1000
      max-idle: 300
      min-idle: 0
  kafka:
#    bootstrap-servers: 196.168.30.203:9092
    bootstrap-servers: 196.168.30.98:9092,196.168.30.166:9092,196.168.30.170:9092
    # 指定listener 容器中的線程數,用於提高併發量
    listener:
      concurrency: 3
    consumer:
      auto-offset-reset: earliest
      group-id: 0
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
#每次批量發送消息的數量
    producer:
      retries: 1
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      batch-size: 65536  # 每次批量發送消息的數量
      buffer-memory: 524288
      bootstrap-servers: 196.168.30.98:9092,196.168.30.166:9092,196.168.30.170:9092    

 

下面貼一下日誌格式配置文件  logback.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
	<!--定義日誌文件的存儲地址 勿在 LogBack 的配置中使用相對路徑 -->
	<property name="LOG_HOME" value="logs" />
	<!-- 控制檯輸出 -->
	<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{50} -
				%msg%n</pattern>
		</encoder>
	</appender>
	
	<!-- 按照每天生成日誌文件 -->
	<appender name="FILE"
		class="ch.qos.logback.core.rolling.RollingFileAppender">
		<Prudent>true</Prudent>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!--日誌文件輸出的文件名 -->
			<FileNamePattern>${LOG_HOME}/bandwidth.log.%d{yyyy-MM-dd}.log
			</FileNamePattern>
			<!--日誌文件保留天數 -->
			<MaxHistory>1500</MaxHistory>
		</rollingPolicy>
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<!-- <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} 
				- %msg%n</pattern> -->
			<pattern>%d{yyyy-MM-dd HH:mm:ss} - %msg%n</pattern>
		</encoder>
	</appender>
	
	<appender name="dev_one" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<Prudent>true</Prudent>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!--日誌文件輸出的文件名 -->
			<FileNamePattern>${LOG_HOME}/dev_one.log.%d{yyyy-MM-dd}.log</FileNamePattern>
			<!--日誌文件保留天數 -->
			<MaxHistory>1500</MaxHistory>
		</rollingPolicy>
		
		
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<!-- <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} 
				- %msg%n</pattern> -->
			<pattern>%d{yyyy-MM-dd HH:mm:ss} - %msg%n</pattern>
		</encoder>
    </appender>
    <appender name="dev_five" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<Prudent>true</Prudent>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!--日誌文件輸出的文件名 -->
			<FileNamePattern>${LOG_HOME}/dev_five.log.%d{yyyy-MM-dd}.log
			</FileNamePattern>
			<!--日誌文件保留天數 -->
			<MaxHistory>1500</MaxHistory>
		</rollingPolicy>
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<!-- <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> -->
			<pattern>%msg%n</pattern>
		</encoder>
    </appender>
    <appender name="flow_one" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<Prudent>true</Prudent>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!--日誌文件輸出的文件名 -->
			<FileNamePattern>${LOG_HOME}/flow_one.log.%d{yyyy-MM-dd}.log
			</FileNamePattern>
			<!--日誌文件保留天數 -->
			<MaxHistory>1500</MaxHistory>
		</rollingPolicy>
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<!-- <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} 
				- %msg%n</pattern> -->
			<pattern>%d{yyyy-MM-dd HH:mm:ss} - %msg%n</pattern>
		</encoder>
    </appender>
    <appender name="flow_five" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<Prudent>true</Prudent>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!--日誌文件輸出的文件名 -->
			<FileNamePattern>${LOG_HOME}/flow_five.log.%d{yyyy-MM-dd}.log
			</FileNamePattern>
			<!--日誌文件保留天數 -->
			<MaxHistory>1500</MaxHistory>
		</rollingPolicy>
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<!-- <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} 
				- %msg%n</pattern> -->
			<pattern>%d{yyyy-MM-dd HH:mm:ss} - %msg%n</pattern>
		</encoder>
    </appender>
    
    <appender name="error_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<Prudent>true</Prudent>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!--日誌文件輸出的文件名 -->
			<FileNamePattern>${LOG_HOME}/error_log.log.%d{yyyy-MM-dd}.log
			</FileNamePattern>
			<!--日誌文件保留天數 -->
			<MaxHistory>1500</MaxHistory>
		</rollingPolicy>
		<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
			<!--格式化輸出:%d表示日期,%thread表示線程名,%-5level:級別從左顯示5個字符寬度%msg:日誌消息,%n是換行符 -->
			<!-- <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} 
				- %msg%n</pattern> -->
			<pattern>%d{yyyy-MM-dd HH:mm:ss} - %msg%n</pattern>
		</encoder>
    </appender>
    
    
    
    <!-- additivity=false ensures analytics data only goes to the analytics log  原始數組日誌snmp_data_five_with_flow_original,  rrd過濾的日誌snmp_data_five_with_flow_rrd-->
   <!-- <logger name="snmp_data_one_no_flow_queue" level="INFO" additivity="false">
        <appender-ref ref="dev_one"/>
    </logger>-->
    <logger name="snmp_data_five_with_flow_original" level="INFO" additivity="false">
        <appender-ref ref="dev_five"/>
    </logger>
<!--    <logger name="snmp_data_one_with_flow_queue" level="INFO" additivity="false">
        <appender-ref ref="flow_one"/>
    </logger>-->
    <logger name="snmp_data_five_with_flow_rrd" level="INFO" additivity="false">
        <appender-ref ref="flow_five"/>
    </logger>
    <logger name="no_connect_error_log" level="INFO" additivity="false">
        <appender-ref ref="error_log"/>
    </logger>
    
    
    

	<!-- 日誌輸出級別  INFO DEBUG -->
	<root level="INFO">
		<appender-ref ref="FILE" />
	</root>
</configuration>

關鍵點都貼出來了,結合實際修改

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章