Airflow Worker 没有监听默认的 RabbitMQ 队列

2024-02-24

我已经使用rabbitmq代理配置了Airflow,服务:

airflow worker
airflow scheduler
airflow webserver

正在运行,没有任何错误。调度程序正在推动任务执行default兔子MQ队列:

即使我尝试过airflow worker -q=default- 工作人员仍未收到要运行的任务。我的airflow.cfg设置文件:

[core]
# The home folder for airflow, default is ~/airflow
airflow_home = /home/my_projects/ksaprice_project/airflow

# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = /home/my_projects/ksaprice_project/airflow/dags

# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = /home/my_projects/ksaprice_project/airflow/logs

remote_base_log_folder = 
remote_log_conn_id =
# Use server-side encryption for logs stored in S3
encrypt_s3_logs = False
# DEPRECATED option for remote log storage, use remote_base_log_folder instead!
s3_log_folder =

executor = CeleryExecutor

# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = postgresql+psycopg2://name:password@ksaprice_postgres:5432/airflow

sql_alchemy_pool_size = 5

# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite.
sql_alchemy_pool_recycle = 3600

# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32

# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16

# Are DAGs paused by default at creation
dags_are_paused_at_creation = True

# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128

# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16

# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True

# Where your Airflow plugins are stored
plugins_folder = /home/my_projects/ksaprice_project/airflow/plugins

# Secret key to save connection passwords in the db
fernet_key = SomeKey

# Whether to disable pickling dags
donot_pickle = False

# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30

# The class to use for running task instances in a subprocess
task_runner = BashTaskRunner

# If set, tasks without a `run_as_user` argument will be run with this user
# Can be used to de-elevate a sudo user running Airflow when executing tasks
default_impersonation =

# What security module to use (for example kerberos):
security =

# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
unit_test_mode = False

[cli]
# In what way should the cli access the API. The LocalClient will use the
# database directly, while the json_client will use the api running on the
# webserver
api_client = airflow.api.client.local_client
endpoint_url = http://localhost:8080

[api]
# How to authenticate users of the API
auth_backend = airflow.api.auth.backend.default

[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0


[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080

# The ip specified when starting the web server
web_server_host = 0.0.0.0

# The port on which to run the web server
web_server_port = 8080

# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
web_server_ssl_cert =
web_server_ssl_key =

# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120

# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1

# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30

# Secret key used to run your flask app
secret_key = temporary_key

# Number of workers to run the Gunicorn web server
workers = 4

# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync

# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -

# Expose the configuration file in the web server
expose_config = False

# Set to true to turn on authentication:
# http://pythonhosted.org/airflow/security.html#web-authentication
authenticate = False

# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False

# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user

# Default DAG orientation. Valid values are:
# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
dag_orientation = LR

# Puts the webserver in demonstration mode; blurs the names of Operators for
# privacy.
demo_mode = False

# The amount of time (in secs) webserver will wait for initial handshake
# while fetching logs from other worker machine
log_fetch_timeout_sec = 5

# By default, the webserver shows paused DAGs. Flip this to hide paused
# DAGs by default
hide_paused_dags_by_default = False    

[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above

# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor

# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16

# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793    
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.

#broker_url = pyamqp://user:pw@ksaprice_rabbitmq/ksaprice_rabbitmq_vh
broker_url = amqp://user:pw@ksaprice_rabbitmq/ksaprice_rabbitmq_vh
    # Another key Celery setting
celery_result_backend = db+postgresql://name:pw@ksaprice_postgres:5432/airflow

# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the IP that Celery Flower runs on
flower_host = 0.0.0.0

# This defines the port that Celery Flower runs on
flower_port = 5555

# Default queue that tasks get assigned to and that worker listen on.
default_queue = default


[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5

# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5

# after how much time should the scheduler terminate in seconds
# -1 indicates to run continuously (see also num_runs)
run_duration = -1

# after how much time a new DAGs should be picked up from the filesystem
min_file_process_interval = 0

dag_dir_list_interval = 300

# How often should stats be printed to the logs
print_stats_interval = 30

child_process_log_directory = /home/my_projects/ksaprice_project/airflow/logs/scheduler

# Local task jobs periodically heartbeat to the DB. If the job has
# not heartbeat in this many seconds, the scheduler will mark the
# associated task instance as failed and will re-schedule the task.
scheduler_zombie_task_threshold = 300

# Turn off scheduler catchup by setting this to False.
# Default behavior is unchanged and
# Command Line Backfills still work, but the scheduler
# will not do scheduler catchup if this is False,
# however it can be set on a per DAG basis in the
# DAG definition (catchup)
catchup_by_default = True

# Statsd (https://github.com/etsy/statsd) integration settings
statsd_on = False
statsd_host = localhost
statsd_port = 8125
statsd_prefix = airflow

# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run. However airflow will never
# use more threads than the amount of cpu cores available.
max_threads = 2

authenticate = False

rabbitmqctl report:

Reporting server status on {{2017,8,3},{13,15,38}}


Status of node ksaprice_rabbitmq@4eed789778c0
[{pid,115},
 {running_applications,
     [{rabbitmq_management,"RabbitMQ Management Console","3.6.10"},
      {rabbitmq_management_agent,"RabbitMQ Management Agent","3.6.10"},
      {rabbitmq_web_dispatch,"RabbitMQ Web Dispatcher","3.6.10"},
      {rabbit,"RabbitMQ","3.6.10"},
      {mnesia,"MNESIA  CXC 138 12","4.14.2"},
      {amqp_client,"RabbitMQ AMQP Client","3.6.10"},
      {rabbit_common,
          "Modules shared by rabbitmq-server and rabbitmq-erlang-client",
          "3.6.10"},
      {inets,"INETS  CXC 138 49","6.3.4"},
      {os_mon,"CPO  CXC 138 46","2.4.1"},
      {syntax_tools,"Syntax tools","2.1.1"},
      {cowboy,"Small, fast, modular HTTP server.","1.0.4"},
      {cowlib,"Support library for manipulating Web protocols.","1.0.2"},
      {ranch,"Socket acceptor pool for TCP protocols.","1.3.0"},
      {ssl,"Erlang/OTP SSL application","8.1"},
      {public_key,"Public key infrastructure","1.3"},
      {crypto,"CRYPTO","3.7.2"},
      {compiler,"ERTS  CXC 138 10","7.0.3"},
      {xmerl,"XML parser","1.3.12"},
      {asn1,"The Erlang ASN1 compiler version 4.0.4","4.0.4"},
      {sasl,"SASL  CXC 138 11","3.0.2"},
      {stdlib,"ERTS  CXC 138 10","3.2"},
      {kernel,"ERTS  CXC 138 10","5.1.1"}]},
 {os,{unix,linux}},
 {erlang_version,
     "Erlang/OTP 19 [erts-8.2.1] [source] [64-bit] [smp:2:2] [async-threads:64] [hipe] [kernel-poll:true]\n"},
 {memory,
     [{total,70578840},
      {connection_readers,0},
      {connection_writers,0},
      {connection_channels,0},
      {connection_other,2832},
      {queue_procs,192136},
      {queue_slave_procs,0},
      {plugins,2117704},
      {other_proc,17561640},
      {mnesia,88872},
      {metrics,207264},
      {mgmt_db,771920},
      {msg_index,48056},
      {other_ets,2535184},
      {binary,910704},
      {code,24680786},
      {atom,1033401},
      {other_system,20632773}]},
 {alarms,[]},
 {listeners,[{clustering,25672,"::"},{amqp,5672,"::"},{http,15672,"::"}]},
 {vm_memory_high_watermark,0.4},
 {vm_memory_limit,830581964},
 {disk_free_limit,50000000},
 {disk_free,55911219200},
 {file_descriptors,
     [{total_limit,1048476},
      {total_used,8},
      {sockets_limit,943626},
      {sockets_used,0}]},
 {processes,[{limit,1048576},{used,338}]},
 {run_queue,0},
 {uptime,3204},
 {kernel,{net_ticktime,60}}]

Cluster status of node ksaprice_rabbitmq@4eed789778c0
[{nodes,[{disc,[ksaprice_rabbitmq@4eed789778c0]}]},
 {running_nodes,[ksaprice_rabbitmq@4eed789778c0]},
 {cluster_name,<<"ksaprice_rabbitmq@4eed789778c0">>},
 {partitions,[]},
 {alarms,[{ksaprice_rabbitmq@4eed789778c0,[]}]}]

Application environment of node ksaprice_rabbitmq@4eed789778c0
[{amqp_client,[{prefer_ipv6,false},{ssl_options,[]}]},
 {asn1,[]},
 {compiler,[]},
 {cowboy,[]},
 {cowlib,[]},
 {crypto,[]},
 {inets,[]},
 {kernel,
     [{error_logger,tty},
      {inet_default_connect_options,[{nodelay,true}]},
      {inet_dist_listen_max,25672},
      {inet_dist_listen_min,25672}]},
 {mnesia,[{dir,"/var/lib/rabbitmq/mnesia/ksaprice_rabbitmq"}]},
 {os_mon,
     [{start_cpu_sup,false},
      {start_disksup,false},
      {start_memsup,false},
      {start_os_sup,false}]},
 {public_key,[]},
 {rabbit,
     [{auth_backends,[rabbit_auth_backend_internal]},
      {auth_mechanisms,['PLAIN','AMQPLAIN']},
      {background_gc_enabled,false},
      {background_gc_target_interval,60000},
      {backing_queue_module,rabbit_priority_queue},
      {channel_max,0},
      {channel_operation_timeout,15000},
      {cluster_keepalive_interval,10000},
      {cluster_nodes,{[],disc}},
      {cluster_partition_handling,ignore},
      {collect_statistics,fine},
      {collect_statistics_interval,5000},
      {config_entry_decoder,
          [{cipher,aes_cbc256},
           {hash,sha512},
           {iterations,1000},
           {passphrase,undefined}]},
      {credit_flow_default_credit,{400,200}},
      {default_permissions,[<<".*">>,<<".*">>,<<".*">>]},
      {default_user,<<"guest">>},
      {default_user_tags,[administrator]},
      {default_vhost,<<"/">>},
      {delegate_count,16},
      {disk_free_limit,50000000},
      {disk_monitor_failure_retries,10},
      {disk_monitor_failure_retry_interval,120000},
      {enabled_plugins_file,"/etc/rabbitmq/enabled_plugins"},
      {error_logger,tty},
      {fhc_read_buffering,false},
      {fhc_write_buffering,true},
      {frame_max,131072},
      {halt_on_upgrade_failure,true},
      {handshake_timeout,10000},
      {heartbeat,60},
      {hipe_compile,false},
      {hipe_modules,
          [rabbit_reader,rabbit_channel,gen_server2,rabbit_exchange,
           rabbit_command_assembler,rabbit_framing_amqp_0_9_1,rabbit_basic,
           rabbit_event,lists,queue,priority_queue,rabbit_router,rabbit_trace,
           rabbit_misc,rabbit_binary_parser,rabbit_exchange_type_direct,
           rabbit_guid,rabbit_net,rabbit_amqqueue_process,
           rabbit_variable_queue,rabbit_binary_generator,rabbit_writer,
           delegate,gb_sets,lqueue,sets,orddict,rabbit_amqqueue,
           rabbit_limiter,gb_trees,rabbit_queue_index,
           rabbit_exchange_decorator,gen,dict,ordsets,file_handle_cache,
           rabbit_msg_store,array,rabbit_msg_store_ets_index,rabbit_msg_file,
           rabbit_exchange_type_fanout,rabbit_exchange_type_topic,mnesia,
           mnesia_lib,rpc,mnesia_tm,qlc,sofs,proplists,credit_flow,pmon,
           ssl_connection,tls_connection,ssl_record,tls_record,gen_fsm,ssl]},
      {lazy_queue_explicit_gc_run_operation_threshold,1000},
      {log_levels,[{connection,info}]},
      {loopback_users,[]},
      {memory_monitor_interval,2500},
      {mirroring_flow_control,true},
      {mirroring_sync_batch_size,4096},
      {mnesia_table_loading_retry_limit,10},
      {mnesia_table_loading_retry_timeout,30000},
      {msg_store_credit_disc_bound,{4000,800}},
      {msg_store_file_size_limit,16777216},
      {msg_store_index_module,rabbit_msg_store_ets_index},
      {msg_store_io_batch_size,4096},
      {num_ssl_acceptors,1},
      {num_tcp_acceptors,10},
      {password_hashing_module,rabbit_password_hashing_sha256},
      {plugins_dir,
          "/usr/lib/rabbitmq/plugins:/usr/lib/rabbitmq/lib/rabbitmq_server-3.6.10/plugins"},
      {plugins_expand_dir,
          "/var/lib/rabbitmq/mnesia/ksaprice_rabbitmq-plugins-expand"},
      {queue_explicit_gc_run_operation_threshold,1000},
      {queue_index_embed_msgs_below,4096},
      {queue_index_max_journal_entries,32768},
      {reverse_dns_lookups,false},
      {sasl_error_logger,tty},
      {server_properties,[]},
      {ssl_allow_poodle_attack,false},
      {ssl_apps,[asn1,crypto,public_key,ssl]},
      {ssl_cert_login_from,distinguished_name},
      {ssl_handshake_timeout,5000},
      {ssl_listeners,[]},
      {ssl_options,[]},
      {tcp_listen_options,
          [{backlog,128},
           {nodelay,true},
           {linger,{true,0}},
           {exit_on_close,false}]},
      {tcp_listeners,[5672]},
      {trace_vhosts,[]},
      {vm_memory_high_watermark,0.4},
      {vm_memory_high_watermark_paging_ratio,0.5}]},
 {rabbit_common,[]},
 {rabbitmq_management,
     [{cors_allow_origins,[]},
      {cors_max_age,1800},
      {http_log_dir,none},
      {listener,[{port,15672}]},
      {load_definitions,none},
      {management_db_cache_multiplier,5},
      {process_stats_gc_timeout,300000},
      {stats_event_max_backlog,250}]},
 {rabbitmq_management_agent,
     [{rates_mode,basic},
      {sample_retention_policies,
          [{global,[{605,5},{3660,60},{29400,600},{86400,1800}]},
           {basic,[{605,5},{3600,60}]},
           {detailed,[{605,5}]}]}]},
 {rabbitmq_web_dispatch,[]},
 {ranch,[]},
 {sasl,[{errlog_type,error},{sasl_error_logger,tty}]},
 {ssl,[]},
 {stdlib,[]},
 {syntax_tools,[]},
 {xmerl,[]}]

Connections:

Channels:

Queues on ksaprice_rabbitmq_vh:
pid     name    durable auto_delete     arguments       owner_pid       exclusive       messages_ready  messages_unacknowledged messages        reductions      policy  exclusive_consumer_pid  exclusive_consumer_tag  consumers       consumer_utilisation    memory  slave_pids      synchronised_slave_pids recoverable_slaves      state   garbage_collection      messages_ram    messages_ready_ram      messages_unacknowledged_ram     messages_persistent     message_bytes   message_bytes_ready     message_bytes_unacknowledged    message_bytes_ram       message_bytes_persistent        head_message_timestamp  disk_reads      disk_writes     backing_queue_status    messages_paged_out      message_bytes_paged_out
<[email protected] /cdn-cgi/l/email-protection>        test2   true    false   []              false   12      0       12      60224                           0               143384                          running [{max_heap_size,0}, {min_bin_vheap_size,46422}, {min_heap_size,233}, {fullsweep_after,65535}, {minor_gcs,2}]    12      12      0       12      2550    2550    0       2550    2550            4       8       [{mode,default}, {q1,8}, {q2,0}, {delta,{delta,undefined,0,0,undefined}}, {q3,3}, {q4,1}, {len,12}, {target_ram_count,infinity}, {next_seq_id,16392}, {avg_ingress_rate,0.018154326288234535}, {avg_egress_rate,0.0}, {avg_ack_ingress_rate,0.0}, {avg_ack_egress_rate,0.0}]    0       0
<[email protected] /cdn-cgi/l/email-protection>       default true    false   []              false   12      0       12      96191                           0               143384                          running [{max_heap_size,0}, {min_bin_vheap_size,46422}, {min_heap_size,233}, {fullsweep_after,65535}, {minor_gcs,2}]    12      12      0       12      2550    2550    0       2550    2550            0       12      [{mode,default}, {q1,0}, {q2,0}, {delta,{delta,undefined,0,0,undefined}}, {q3,0}, {q4,12}, {len,12}, {target_ram_count,infinity}, {next_seq_id,12}, {avg_ingress_rate,0.029199425682653112}, {avg_egress_rate,0.0}, {avg_ack_ingress_rate,0.0}, {avg_ack_egress_rate,0.0}]      0       0

Queues on /:
pid     name    durable auto_delete     arguments       owner_pid       exclusive       messages_ready  messages_unacknowledged messages        reductions      policy  exclusive_consumer_pid  exclusive_consumer_tag  consumers       consumer_utilisation    memory  slave_pids      synchronised_slave_pids recoverable_slaves      state   garbage_collection      messages_ram    messages_ready_ram      messages_unacknowledged_ram     messages_persistent     message_bytes   message_bytes_ready     message_bytes_unacknowledged    message_bytes_ram       message_bytes_persistent        head_message_timestamp  disk_reads      disk_writes     backing_queue_status    messages_paged_out      message_bytes_paged_out
<[email protected] /cdn-cgi/l/email-protection>        test1   true    false   []              false   4       0       4       6152                            0               55712                           running [{max_heap_size,0}, {min_bin_vheap_size,46422}, {min_heap_size,233}, {fullsweep_after,65535}, {minor_gcs,9}]    4       4       0       4       850     850     0       850     850             4       0       [{mode,default}, {q1,0}, {q2,0}, {delta,{delta,undefined,0,0,undefined}}, {q3,3}, {q4,1}, {len,4}, {target_ram_count,infinity}, {next_seq_id,16384}, {avg_ingress_rate,0.0}, {avg_egress_rate,0.0}, {avg_ack_ingress_rate,0.0}, {avg_ack_egress_rate,0.0}]      0       0
<[email protected] /cdn-cgi/l/email-protection>        celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   true    false   []              false   4       0       4       6222                            0               55712                           running [{max_heap_size,0}, {min_bin_vheap_size,46422}, {min_heap_size,233}, {fullsweep_after,65535}, {minor_gcs,10}]   4       4       0       4       850     850     0       850     850             4       0       [{mode,default}, {q1,0}, {q2,0}, {delta,{delta,undefined,0,0,undefined}}, {q3,3}, {q4,1}, {len,4}, {target_ram_count,infinity}, {next_seq_id,16384}, {avg_ingress_rate,0.0}, {avg_egress_rate,0.0}, {avg_ack_ingress_rate,0.0}, {avg_ack_egress_rate,0.0}]      0       0
<[email protected] /cdn-cgi/l/email-protection>        test    true    false   []              false   4       0       4       6152                            0               55712                           running [{max_heap_size,0}, {min_bin_vheap_size,46422}, {min_heap_size,233}, {fullsweep_after,65535}, {minor_gcs,9}]    4       4       0       4       850     850     0       850     850             4       0       [{mode,default}, {q1,0}, {q2,0}, {delta,{delta,undefined,0,0,undefined}}, {q3,3}, {q4,1}, {len,4}, {target_ram_count,infinity}, {next_seq_id,16384}, {avg_ingress_rate,0.0}, {avg_egress_rate,0.0}, {avg_ack_ingress_rate,0.0}, {avg_ack_egress_rate,0.0}]      0       0
<[email protected] /cdn-cgi/l/email-protection>        test2   true    false   []              false   4       0       4       6162                            0               55712                           running [{max_heap_size,0}, {min_bin_vheap_size,46422}, {min_heap_size,233}, {fullsweep_after,65535}, {minor_gcs,9}]    4       4       0       4       850     850     0       850     850             4       0       [{mode,default}, {q1,0}, {q2,0}, {delta,{delta,undefined,0,0,undefined}}, {q3,3}, {q4,1}, {len,4}, {target_ram_count,infinity}, {next_seq_id,16384}, {avg_ingress_rate,0.0}, {avg_egress_rate,0.0}, {avg_ack_ingress_rate,0.0}, {avg_ack_egress_rate,0.0}]      0       0

Exchanges on ksaprice_rabbitmq_vh:
name    type    durable auto_delete     internal        arguments       policy
        direct  true    false   false   []
amq.direct      direct  true    false   false   []
amq.fanout      fanout  true    false   false   []
amq.headers     headers true    false   false   []
amq.match       headers true    false   false   []
amq.rabbitmq.trace      topic   true    false   true    []
amq.topic       topic   true    false   false   []
celery.pidbox   fanout  false   false   false   []
celeryev        topic   true    false   false   []
default direct  true    false   false   []
reply.celery.pidbox     direct  false   false   false   []
test2   direct  true    false   false   []

Exchanges on /:
name    type    durable auto_delete     internal        arguments       policy
        direct  true    false   false   []
amq.direct      direct  true    false   false   []
amq.fanout      fanout  true    false   false   []
amq.headers     headers true    false   false   []
amq.match       headers true    false   false   []
amq.rabbitmq.log        topic   true    false   true    []
amq.rabbitmq.trace      topic   true    false   true    []
amq.topic       topic   true    false   false   []
celeryev        topic   true    false   false   []
celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   direct  true    false   false   []
test    direct  true    false   false   []
test1   direct  true    false   false   []
test2   direct  true    false   false   []

Bindings on ksaprice_rabbitmq_vh:
source_name     source_kind     destination_name        destination_kind        routing_key     arguments       vhost
        exchange        default queue   default []      ksaprice_rabbitmq_vh
        exchange        test2   queue   test2   []      ksaprice_rabbitmq_vh
default exchange        default queue   default []      ksaprice_rabbitmq_vh
test2   exchange        test2   queue   test2   []      ksaprice_rabbitmq_vh

Bindings on /:
source_name     source_kind     destination_name        destination_kind        routing_key     arguments       vhost
        exchange        celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   queue   celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   []      /
        exchange        test    queue   test    []      /
        exchange        test1   queue   test1   []      /
        exchange        test2   queue   test2   []      /
celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   exchange        celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   queue   celeryev.2708e0df-7957-4e63-add9-b11beaabe6eb   []      /
test    exchange        test    queue   test    []      /
test1   exchange        test1   queue   test1   []      /
test2   exchange        test2   queue   test2   []      /

Consumers on ksaprice_rabbitmq_vh:

Consumers on /:

Permissions on ksaprice_rabbitmq_vh:
user    configure       write   read
admin   .*      .*      .*

Permissions on /:
user    configure       write   read
guest   .*      .*      .*

Policies on ksaprice_rabbitmq_vh:

Policies on /:

Parameters on ksaprice_rabbitmq_vh:

Parameters on /:

update: 我尝试的模块版本:airflow 1.8 with celery 3.x、airflow 1.8.1 with celery 4.1 和 celery 3.1.25,没有一个组合解决了这个问题。


我正在研究为什么我有类似的问题,工作人员一直在监听以 celeryev.{hashvalue} 为前缀的队列,而不是默认的队列,即使我设置了 -q=default 也是如此。我的问题的答案是在工作环境中设置环境变量 C_FORCE_ROOT=true,因为工作人员以 root 身份运行(我知道不建议这样做,如果不小心网络访问,则会带来巨大的安全风险)

C_FORCE_ROOT=true

我设置了这个,重新启动工作人员,它运行良好。

您可以在代码中看到,如果情况并非如此,则不允许该工作人员:http://docs.celeryproject.org/en/latest/_modules/celery/platforms.html http://docs.celeryproject.org/en/latest/_modules/celery/platforms.html

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

Airflow Worker 没有监听默认的 RabbitMQ 队列 的相关文章

  • Spring AMQP RabbitMQ 如何直接发送到Queue而不需要Exchange

    我正在使用 Spring AMQP 和 Rabbitmq 模板 如何直接将消息发送到队列而不使用Exchange 我该怎么做 我该怎么做 你不能 发布者不知道队列 只是交换和路由密钥 但是 所有队列都绑定到默认交换器 以队列名称作为其路由键
  • 何时使用 RabbitMQ 而不是 Kafka? [关闭]

    Closed 这个问题是基于意见的 help closed questions 目前不接受答案 我被要求评估 RabbitMQ 而不是 Kafka 但发现很难找到消息队列比 Kafka 更合适的情况 有谁知道消息队列在吞吐量 耐用性 延迟或
  • 在rabbitmq配置spring boot中在AMQP中配置多个Vhost

    我正在实现一个项目 我必须在rabbitmq中的不同虚拟主机之间发送消息 使用 SimpleRoutingConnectionFactory 但得到 java lang IllegalStateException 无法确定查找键的目标 Co
  • MongoDB 架构设计 - 实时聊天

    我正在启动一个项目 我认为该项目特别适合 MongoDB 因为它提供的速度和可扩展性 我目前感兴趣的模块是与实时聊天有关的 如果我要在传统的 RDBMS 中执行此操作 我会将其分为 频道 一个频道有很多用户 用户 一个用户有一个频道但有多条
  • 使用 Celery(RabbitMQ、Django)检索队列长度

    我在 django 项目中使用 Celery 我的代理是 RabbitMQ 我想检索队列的长度 我浏览了 Celery 的代码 但没有找到执行此操作的工具 我在 stackoverflow 上发现了这个问题 从客户端检查 RabbitMQ
  • 在 Airflow 中编写和导入自定义插件

    这实际上是两个问题合二为一 My AIRFLOW HOME结构如下 airflow dags plugins init py hooks init py my hook py another hook py operators init p
  • 如何检查何时为特定 dag 安排了下一次 Airflow DAG 运行?

    我已设置气流并运行一些 DAG 安排每天一次 0 0 我想检查下次计划运行特定 dag 的时间 但我看不到可以在管理员中执行此操作的位置 如果你愿意 你可以使用Airflow s CLI 有next execution option htt
  • AIRFLOW:在 jinja 模板中为 {{ds}} 使用 .replace() 或relativedelta()

    我的目标是根据气流宏变量 ds 返回上个月的第一天并使用它 例如在 Hive 操作符中 例如 对于 ds 2020 05 09 我预计返回 2020 04 01 我找到并尝试的解决方案是 SET hivevar LAST MONTH ds
  • 气流:找不到 dag_id

    我在不同的 AWS 机器上运行气流服务器和工作线程 我已经在它们之间同步了 dags 文件夹 然后运行airflow initdb在两者上 并在运行时检查 dag id 是否相同airflow list tasks
  • 使用 Airflow BigqueryOperator 向 BigQuery 表添加标签

    我必须向 bigquery 表添加标签 我知道可以通过 BigQuery UI 来完成此操作 但如何通过气流运算符来完成此操作 Use case 用于计费和搜索目的 由于多个团队在同一项目和数据集下工作 我们需要将各个团队创建的所有表组合在
  • MassTransit 生成我想忽略的_skipped 队列

    任何人都可以猜出问题是什么 因为我不知道如何解决这个问题 大众运输产生 skipped队列 我不知道为什么它会生成这些队列 它是在执行发布请求响应时生成的 请求客户端是使用 MassTransit RequestClientExtensio
  • Erl 无法连接到本地 EPMD。为什么?

    Erlang R14B04 erts 5 8 5 source 64 bit rq 1 async threads 0 kernel poll false Eshell V5 8 5 abort with G root ip 10 101
  • 更改 RabbitMQ 队列中的参数

    我有一个 RabbitMQ 队列 最初声明如下 var result channel QueueDeclare NewQueue true false false null 我正在尝试添加死信交换 因此我将代码更改为 channel Exc
  • AMQPRuntimeException:读取数据时出错。收到 0 而不是预期的 7 字节

    它曾经有效 但现在不再有效了 我正在使用 php amqplib 和 RabbitMQ 当我尝试创建新的 AMQP 连接时 connection new AMQPConnection localhost 5672 username pass
  • Amazon EC2 实例上和本地的 RabbitMQ?

    是否可以设置一个RabbitMQ服务器上的Amazon EC2 instance 并将我办公室的机器连接到此RabbitMQ服务器并向其发送 接收消息 我会被收取费用吗Amazon对于流入 流出我的带宽 消息RabbitMQ EC2 ins
  • 生产者/消费者的不同语言

    我想知道是否可以通过 AMQP 和 RabbitMQ 对生产者和消费者使用不同的语言 例如 Java 代表生产者 python php 代表消费者 或者反之亦然 是的 AMQP 与语言无关 这意味着只要您有可以连接到 AMQP 的客户端sa
  • RabbitMQ - 无法联系统计数据库。消息速率和队列长度将不会显示

    我已经设置了一个兔子经纪人集群 并且在管理门户插件中我收到以下消息 无法联系统计数据库 消息速率和队列长度将不会显示 我已经搜索过这个错误 但谷歌并不友善 任何人都可以阐明这一点吗 我最近在旧安装的RabbitMQ 2 8 7 上遇到了同样
  • Airflow:网络服务器未找到新的 DAG

    在 Airflow 中 我应该如何处理错误 此 DAG 在网络服务器 DagBag 对象中不可用 它显示在此列表中 因为调度程序将其在元数据数据库中标记为活动状态 我已将新的 DAG 复制到 Airflow 服务器 并尝试过 取消暂停并刷新
  • RabbitMQ:无法启动rabbitmq_management插件

    Version gt sudo rabbitmqctl status grep rabbit RabbitMQ rabbit RabbitMQ 3 5 6 Error gt sudo rabbitmq plugins enable rabb
  • 我可以在 Airflow 中的一个 DAG 下执行不同开始日期的任务吗?

    我有一个运行两个任务的 DAG A and B 而不是指定start date在 DAG 级别上 我已将其作为属性添加到运算符 我正在使用PythonOperator在本例中 并将其从 DAG 字典中删除 这两个任务每天都会运行 The s

随机推荐

  • 为什么 CodeIgniter 中的分页链接对我不起作用?

    我正在尝试对我的产品使用 codeigniter 分页 因此有多个产品页面 但它不适合我 我不知道为什么 这是我的控制器中的分页功能 code om in allecadeaus te bepalen hoeveel producten e
  • 如果必要的单元测试失败,我可以使单元测试不确定吗?

    考虑对字典对象进行单元测试 您可能编写的第一个单元测试只是将项目添加到字典并检查异常 下一个测试可能类似于测试计数是否准确 或者字典是否返回正确的键或值列表 然而 后面的每种情况都要求字典首先能够可靠地添加项目 如果添加项目的测试失败 我们
  • 贝宝和PHP?

    我有两种方法可以从用户处检查 1 if user input Amount field lt 5 user credit do update database the remain amount in my database table 2
  • 急切地评估 Prolog 中的所有谓词调用

    正在阅读有关元谓词的 SWI Prolog 文档 http www swi prolog org pldoc man section metapred 我最初假设call f 相当于f where f是一些谓词 但我观察到 在某些情况下 两
  • 是否可以用 Java 构建这样的自定义 GUI?

    我在 Photoshop 中制作了这个 我计划将其用于我的文件共享应用程序 我想知道是否可以为我的应用程序创建具有这种外观和感觉的 GUI 如果我无法仅使用 eclipse 或 NetBeans 来构建它 还有其他工具可以帮助我吗 哦 亲爱
  • 我何时以及为什么需要supportedRuntime元素和sku属性?

    在 Visual Studio 中创建的大多数 如果不是全部 C 以及 F 和 VB 库和可执行项目中 都会自动添加app config指定运行时版本和目标框架名称 TFM 的文件
  • Laravel 4.1:雄辩的偏移和限制

    如何限制 Eloquent 返回的数据 我尝试过这个 data Product all gt take 4 gt skip 3 它返回错误消息 Call to undefined method Illuminate Database Elo
  • 如何使用 sendmessage 发送 win32 中具有计时器过程的 wm_timer

    我有一个计时器 ID 1 它有一个timerproc作为回调函数 我正在 timeproc 中制作其他计时器 ID 2 3 它们使用WM TIMER事件 而不是另一个计时器进程 创建窗口时 我想立即生成定时器事件 ID 1 所以我像这样使用
  • wxWidgets - 事件表与 Connect()?

    我刚刚开始使用 C 学习 wxWidgets 3 0 版 我已经注意到 wxWidgets 中的事件处理是由事件表完成的 但一篇教程也提到了 Connect 实际上它只是说 本教程将使用事件表 而不是 Connect 我想知道事件表和 Co
  • 在标点符号前动态添加 标签

    我正在尝试弄清楚如何添加
  • WPEngine 备份中 WordPress 项目中的 PHP 解析错误

    我继承了一个 Wordpress 项目 并且正在尝试对其进行设置 我对 WordPress 的经验为零 也可以说我对 PHP 的经验为零 到目前为止 我已经成功在本地计算机上设置了环境 但我遇到了 PHP 解析错误 无法通过谷歌搜索找到解决
  • 为什么我的 RxJS 倒计时时钟不显示?

    我正在开发一个 Angular 9 测验应用程序 我正在使用 RxJS 作为倒计时器 在container scoreboard time time component ts 中 并且计时器似乎没有显示 stopTimer 函数应在计时器停
  • Visual Studio 团队服务与 GitHub

    我正在寻找为我的内部 闭源 项目设置 CI 工作流程 我目前正在使用 GitHub 进行 git 问题跟踪 我的应用程序是桌面应用程序 没有云 服务器部署 因此工作流程应该是 在 github 上提交 master 通过以下方式通知 CIg
  • NullInjectorError:没有 InjectionToken DocumentToken 的提供者

    我正在为所有 Angular 5 项目设置一个通用库 库是 GitHub 存储库的克隆角度库入门套件 https github com zurfyx angular library starter kit 一切正常 直到我尝试使用HttpC
  • 使用配置文件启动redis-server

    我的配置文件位于 root config redis rb 我这样启动redis redis server 如何启动 redis 以便它使用我的配置文件 另外 我讨厌乱搞ps grep尝试找到一个 pid 来关闭它 如何通过 cd 进入根目
  • ReplaceWith 自动关闭标签

    我有 3 个 div 我想用另一个 div 的开始标签替换第一个 div 用结束标签替换第三个 div 这就是我的意思 div 1 div div 2 div div 3 div 当我尝试替换 使用replaceWith 第一个div时 d
  • YouTube 数据 API v3 允许浏览器应用密钥的引荐来源网址未按预期工作

    我正在努力尝试正确配置新的 YouTube Data API v3 我需要从 Google Chrome 扩展程序访问 API 我已经创建了一个 API 密钥Google API 控制台 https code google com apis
  • 提交按钮正在下载 php 而不是运行它

    我正在尝试为我的网站制作联系表单 但是当我按提交时 会下载 php 文件而不是运行 我正在使用 chrome 但我认为这并不重要 我认为存在语法错误 但我已经搞乱了删除 添加等内容 即使没有语法错误 它仍然会下载文件而不是运行它 而且 是的
  • 如果输入值为空,则使用 Javascript 指定值“空”

    所以我有一个输入字段 如果它是空白的 我希望它的值是 空 一词 但是如果输入了任何值 我希望该值是输入的值 我想为此使用 javascript 知道如何做到这一点吗 UPDATE 抱歉 我认为我解释得不太好 我的意思不是占位符文本 我的意思
  • Airflow Worker 没有监听默认的 RabbitMQ 队列

    我已经使用rabbitmq代理配置了Airflow 服务 airflow worker airflow scheduler airflow webserver 正在运行 没有任何错误 调度程序正在推动任务执行default兔子MQ队列 即使