Flink執行常見報錯總結

1、kafa 分區數小於應用部署節點數

處理方式:kafka 爲topic增加partition;至少大於等於應用部署節點數

命令: bin/kafka-topics.sh --alter --zookeeper 10.112.179.12:2181 --partitions 10 --topic gome

2020-03-10 17:16:39,706 INFO  org.apache.kafka.common.utils.AppInfoParser                   - Kafka version : 2.1.1
2020-03-10 17:16:39,706 INFO  org.apache.kafka.common.utils.AppInfoParser                   - Kafka commitId : 21234bee31165527
2020-03-10 17:16:39,804 INFO  org.apache.kafka.clients.Metadata                             - Cluster ID: 6549BuJTQemr4P3ZcUGy0Q
2020-03-10 17:16:39,807 INFO  org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase  - Consumer subtask 0 will start reading 1 partitions with offsets in restored state: {KafkaTopicPartition{topic='employee', partition=0}=-915623761773}
2020-03-10 17:16:39,808 INFO  org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase  - Consumer subtask 0 creating fetcher with offsets {KafkaTopicPartition{topic='employee', partition=0}=-915623761773}.
2020-03-10 17:16:39,814 INFO  org.apache.kafka.clients.consumer.ConsumerConfig              - ConsumerConfig values: 
	auto.commit.interval.ms = 5000
	auto.offset.reset = latest
	bootstrap.servers = [10.0.3.74:9092]
	check.crcs = true
	client.dns.lookup = default
	client.id = 
	connections.max.idle.ms = 540000
	default.api.timeout.ms = 60000
	enable.auto.commit = false
	exclude.internal.topics = true
	fetch.max.bytes = 52428800
	fetch.max.wait.ms = 500
	fetch.min.bytes = 1
	group.id = flink
	heartbeat.interval.ms = 3000
	interceptor.classes = []
	internal.leave.group.on.close = true
	isolation.level = read_uncommitted
	key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
	max.partition.fetch.bytes = 1048576
	max.poll.interval.ms = 300000
	max.poll.records = 500
	metadata.max.age.ms = 300000
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
	receive.buffer.bytes = 65536
	reconnect.backoff.max.ms = 1000
	reconnect.backoff.ms = 50
	request.timeout.ms = 30000
	retry.backoff.ms = 100
	sasl.client.callback.handler.class = null
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism = GSSAPI
	security.protocol = PLAINTEXT
	send.buffer.bytes = 131072
	session.timeout.ms = 10000
	ssl.cipher.suites = null
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer

2020-03-10 17:16:39,824 INFO  org.apache.kafka.common.utils.AppInfoParser                   - Kafka version : 2.1.1
2020-03-10 17:16:39,824 INFO  org.apache.kafka.common.utils.AppInfoParser                   - Kafka commitId : 21234bee31165527
2020-03-10 17:16:39,824 WARN  org.apache.kafka.common.utils.AppInfoParser                   - Error registering AppInfo mbean
javax.management.InstanceAlreadyExistsException: kafka.consumer:type=app-info,id=consumer-2
	at com.sun.jmx.mbeanserver.Repository.addMBean(Repository.java:437)
	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerWithRepository(DefaultMBeanServerInterceptor.java:1898)
	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerDynamicMBean(DefaultMBeanServerInterceptor.java:966)
	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerObject(DefaultMBeanServerInterceptor.java:900)
	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerMBean(DefaultMBeanServerInterceptor.java:324)
	at com.sun.jmx.mbeanserver.JmxMBeanServer.registerMBean(JmxMBeanServer.java:522)
	at org.apache.kafka.common.utils.AppInfoParser.registerAppInfo(AppInfoParser.java:62)
	at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:797)
	at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:652)
	at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:632)
	at org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread.getConsumer(KafkaConsumerThread.java:477)
	at org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread.run(KafkaConsumerThread.java:167)
	
	

2、org.apache.flink.streaming.connectors.kafka.internal.Handover$ClosedException

參考:https://issues.apache.org/jira/browse/FLINK-10721

java.lang.Exception: org.apache.flink.streaming.connectors.kafka.internal.Handover$ClosedException
	at org.apache.flink.streaming.runtime.tasks.SourceStreamTask$LegacySourceFunctionThread.checkThrowSourceExecutionException(SourceStreamTask.java:232)
	at org.apache.flink.streaming.runtime.tasks.SourceStreamTask.processInput(SourceStreamTask.java:133)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.run(StreamTask.java:321)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.runAndHandleCancel(StreamTask.java:286)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:426)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:705)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:530)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.flink.streaming.connectors.kafka.internal.Handover$ClosedException
	at org.apache.flink.streaming.connectors.kafka.internal.Handover.close(Handover.java:182)
	at org.apache.flink.streaming.connectors.kafka.internal.KafkaFetcher.cancel(KafkaFetcher.java:175)
	at org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase.cancel(FlinkKafkaConsumerBase.java:818)
	at org.apache.flink.streaming.api.operators.StreamSource.cancel(StreamSource.java:134)
	at org.apache.flink.streaming.runtime.tasks.SourceStreamTask.cancelTask(SourceStreamTask.java:158)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.cancel(StreamTask.java:528)
	at org.apache.flink.runtime.taskmanager.Task$TaskCanceler.run(Task.java:1434)
	... 1 more

3、org.apache.flink.runtime.checkpoint.CheckpointCoordinator日誌

日誌中這種,修改log4j ,設置org.apache.flink.runtime.checkpoint.CheckpointCoordinator=WARN,屏蔽flink checkpoint 打印的info 日誌

2020-03-20 10:13:12,758 INFO  org.apache.flink.runtime.checkpoint.CheckpointCoordinator     - Triggering checkpoint 13 @ 1584670392758 for job 5caeffc827f28381023b4e8479f7b5f7.
2020-03-20 10:13:12,767 INFO  org.apache.flink.runtime.checkpoint.CheckpointCoordinator     - Completed checkpoint 13 for job 5caeffc827f28381023b4e8479f7b5f7 (1228 bytes in 9 ms).
2020-03-20 10:13:12,914 INFO  org.apache.flink.runtime.checkpoint.CheckpointCoordinator     - Triggering checkpoint 11 @ 1584670392914 for job 28a31585514afdd39baf1d48eef87065.
2020-03-20 10:13:12,923 INFO  org.apache.flink.runtime.checkpoint.CheckpointCoordinator     - Completed checkpoint 11 for job 28a31585514afdd39baf1d48eef87065 (1232 bytes in 9 ms).

4、es創建索引時以"-"開頭,此時創建索引報錯


2020-04-14 00:41:08,771 ERROR org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase  - Failed Elasticsearch item request: [-2020-04-14] ElasticsearchException[Elasticsearch exception [type=invalid_index_name_exception, reason=Invalid index name [-2020-04-14], must not start with '_', '-', or '+']]
[-2020-04-14] ElasticsearchException[Elasticsearch exception [type=invalid_index_name_exception, reason=Invalid index name [-2020-04-14], must not start with '_', '-', or '+']]
	at org.elasticsearch.ElasticsearchException.innerFromXContent(ElasticsearchException.java:510)
	at org.elasticsearch.ElasticsearchException.fromXContent(ElasticsearchException.java:421)
	at org.elasticsearch.action.bulk.BulkItemResponse.fromXContent(BulkItemResponse.java:135)
	at org.elasticsearch.action.bulk.BulkResponse.fromXContent(BulkResponse.java:198)
	at org.elasticsearch.client.RestHighLevelClient.parseEntity(RestHighLevelClient.java:653)
	at org.elasticsearch.client.RestHighLevelClient.lambda$performRequestAsyncAndParseEntity$3(RestHighLevelClient.java:549)
	at org.elasticsearch.client.RestHighLevelClient$1.onSuccess(RestHighLevelClient.java:580)
	at org.elasticsearch.client.RestClient$FailureTrackingResponseListener.onSuccess(RestClient.java:621)
	at org.elasticsearch.client.RestClient$1.completed(RestClient.java:375)
	at org.elasticsearch.client.RestClient$1.completed(RestClient.java:366)
	at org.apache.http.concurrent.BasicFuture.completed(BasicFuture.java:119)
	at org.apache.http.impl.nio.client.DefaultClientExchangeHandlerImpl.responseCompleted(DefaultClientExchangeHandlerImpl.java:177)
	at org.apache.http.nio.protocol.HttpAsyncRequestExecutor.processResponse(HttpAsyncRequestExecutor.java:436)
	at org.apache.http.nio.protocol.HttpAsyncRequestExecutor.inputReady(HttpAsyncRequestExecutor.java:326)
	at org.apache.http.impl.nio.DefaultNHttpClientConnection.consumeInput(DefaultNHttpClientConnection.java:265)
	at org.apache.http.impl.nio.client.InternalIODispatch.onInputReady(InternalIODispatch.java:81)
	at org.apache.http.impl.nio.client.InternalIODispatch.onInputReady(InternalIODispatch.java:39)
	at org.apache.http.impl.nio.reactor.AbstractIODispatch.inputReady(AbstractIODispatch.java:114)
	at org.apache.http.impl.nio.reactor.BaseIOReactor.readable(BaseIOReactor.java:162)
	at org.apache.http.impl.nio.reactor.AbstractIOReactor.processEvent(AbstractIOReactor.java:337)
	at org.apache.http.impl.nio.reactor.AbstractIOReactor.processEvents(AbstractIOReactor.java:315)
	at org.apache.http.impl.nio.reactor.AbstractIOReactor.execute(AbstractIOReactor.java:276)
	at org.apache.http.impl.nio.reactor.BaseIOReactor.execute(BaseIOReactor.java:104)
	at org.apache.http.impl.nio.reactor.AbstractMultiworkerIOReactor$Worker.run(AbstractMultiworkerIOReactor.java:588)
	at java.lang.Thread.run(Thread.java:748)
2020-04-14 00:41:08,787 ERROR org.apache.flink.streaming.runtime.tasks.StreamTask           - Error during disposal of stream operator.
java.lang.RuntimeException: An error occurred in ElasticsearchSink.
	at org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase.checkErrorAndRethrow(ElasticsearchSinkBase.java:381)
	at org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase.close(ElasticsearchSinkBase.java:343)
	at org.apache.flink.api.common.functions.util.FunctionUtils.closeFunction(FunctionUtils.java:43)
	at org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator.dispose(AbstractUdfStreamOperator.java:117)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.disposeAllOperators(StreamTask.java:605)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:504)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:705)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:530)
	at java.lang.Thread.run(Thread.java:748)
	

5、flink-root-taskexecutor-1-localhost.localdomain.out文件過大

可能是因爲stream.print(),將kafka數據直接打印了

6、es6.x後1種index只能存儲1種type


11:40:47.225 [flink-akka.actor.default-dispatcher-12] INFO  o.a.f.r.e.ExecutionGraph - Could not restart the job ssh handle 2 es and mysql (97fed07fb8491ea1a3995ed91479efeb) because the restart strategy prevented it.
java.lang.RuntimeException: An error occurred in ElasticsearchSink.
	at org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase.checkErrorAndRethrow(ElasticsearchSinkBase.java:381)
	at org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase.checkAsyncErrorsAndRequests(ElasticsearchSinkBase.java:386)
	at org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase.invoke(ElasticsearchSinkBase.java:307)
	at org.apache.flink.streaming.api.operators.StreamSink.processElement(StreamSink.java:56)
	at org.apache.flink.streaming.runtime.io.StreamOneInputProcessor.processElement(StreamOneInputProcessor.java:164)
	at org.apache.flink.streaming.runtime.io.StreamOneInputProcessor.processInput(StreamOneInputProcessor.java:143)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.processInput(StreamTask.java:279)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.run(StreamTask.java:321)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.runAndHandleCancel(StreamTask.java:286)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:426)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:705)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:530)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.elasticsearch.ElasticsearchException: Elasticsearch exception [type=illegal_argument_exception, reason=Rejecting mapping update to [aqgk_jzglxt-2020-04-15] as the final mapping would have more than 1 type: [cron, ssh]]
	at org.elasticsearch.ElasticsearchException.innerFromXContent(ElasticsearchException.java:510)
	at org.elasticsearch.ElasticsearchException.fromXContent(ElasticsearchException.java:421)
	at org.elasticsearch.action.bulk.BulkItemResponse.fromXContent(BulkItemResponse.java:135)
	at org.elasticsearch.action.bulk.BulkResponse.fromXContent(BulkResponse.java:198)
	at org.elasticsearch.client.RestHighLevelClient.parseEntity(RestHighLevelClient.java:653)
	at org.elasticsearch.client.RestHighLevelClient.lambda$performRequestAsyncAndParseEntity$3(RestHighLevelClient.java:549)
	at org.elasticsearch.client.RestHighLevelClient$1.onSuccess(RestHighLevelClient.java:580)
	at org.elasticsearch.client.RestClient$FailureTrackingResponseListener.onSuccess(RestClient.java:621)
	at org.elasticsearch.client.RestClient$1.completed(RestClient.java:375)
	at org.elasticsearch.client.RestClient$1.completed(RestClient.java:366)
	at org.apache.http.concurrent.BasicFuture.completed(BasicFuture.java:119)
	at org.apache.http.impl.nio.client.DefaultClientExchangeHandlerImpl.responseCompleted(DefaultClientExchangeHandlerImpl.java:177)
	at org.apache.http.nio.protocol.HttpAsyncRequestExecutor.processResponse(HttpAsyncRequestExecutor.java:436)
	at org.apache.http.nio.protocol.HttpAsyncRequestExecutor.inputReady(HttpAsyncRequestExecutor.java:326)
	at org.apache.http.impl.nio.client.InternalRequestExecutor.inputReady(InternalRequestExecutor.java:83)
	at org.apache.http.impl.nio.DefaultNHttpClientConnection.consumeInput(DefaultNHttpClientConnection.java:265)
	at org.apache.http.impl.nio.client.InternalIODispatch.onInputReady(InternalIODispatch.java:81)
	at org.apache.http.impl.nio.client.InternalIODispatch.onInputReady(InternalIODispatch.java:39)
	at org.apache.http.impl.nio.reactor.AbstractIODispatch.inputReady(AbstractIODispatch.java:114)
	at org.apache.http.impl.nio.reactor.BaseIOReactor.readable(BaseIOReactor.java:162)
	at org.apache.http.impl.nio.reactor.AbstractIOReactor.processEvent(AbstractIOReactor.java:337)
	at org.apache.http.impl.nio.reactor.AbstractIOReactor.processEvents(AbstractIOReactor.java:315)
	at org.apache.http.impl.nio.reactor.AbstractIOReactor.execute(AbstractIOReactor.java:276)
	at org.apache.http.impl.nio.reactor.BaseIOReactor.execute(BaseIOReactor.java:104)
	at org.apache.http.impl.nio.reactor.AbstractMultiworkerIOReactor$Worker.run(AbstractMultiworkerIOReactor.java:588)
	... 1 common frames omitted

7、庫連接失敗

java.lang.NullPointerException
	at com.develop.flink.kafka2mysql.TomcatLog2MysqlSink.open(TomcatLog2MysqlSink.java:145)
	at org.apache.flink.api.common.functions.util.FunctionUtils.openFunction(FunctionUtils.java:36)
	at org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator.open(AbstractUdfStreamOperator.java:102)
	at org.apache.flink.streaming.api.operators.StreamSink.open(StreamSink.java:48)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.openAllOperators(StreamTask.java:552)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:416)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:705)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:530)
	at java.lang.Thread.run(Thread.java:748)
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章