news 2026/4/18 11:18:31

GBase 8a Load使用hadoop+Kerberos环境操作全流程介绍

作者头像

张小明

前端开发工程师

1.2k 24
文章封面图
GBase 8a Load使用hadoop+Kerberos环境操作全流程介绍

1.环境准备

主机信息

IP:192.168.195.110
主机名:mjmhadoop

新建hadoop用户

[root@mjmhadoop ~]$ useradd mjmos
[root@mjmhadoop ~]$ passwd mjmos

目录准备

[root@mjmhadoop ~]$ chown -R mjmos:mjmos /opt/
[root@mjmhadoop ~]$ su - mjmos
[mjmos@mjmhadoop ~]$ cd /opt/
[mjmos@mjmhadoop /opt]$ mkdir /opt/software
[mjmos@mjmhadoop /opt]$ mkdir /opt/module

安装包准备

[mjmos@mjmhadoop /opt]$ wget https://repo.huaweicloud.com/java/jdk/8u202-b08/jdk-8u202-linux-x64.tar.gz
[mjmos@mjmhadoop /opt]$ wget https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.10.2/hadoop-2.10.2.tar.gz
[mjmos@mjmhadoop /opt]$ tar -zxvf jdk-8u202-linux-x64.tar.gz -C /opt/module/
[mjmos@mjmhadoop /opt]$ tar -zxvf hadoop-2.10.2.tar.gz -C /opt/module/

2.环境配置

hosts解析与环境变量

[mjmos@mjmhadoop /opt]$ exit [root@mjmhadoop ~]$ echo "192.168.195.110 mjmhadoop" >> /etc/hosts [root@mjmhadoop ~]$ cat >> /etc/profile << 'eof' #配置java环境变量 export JAVA_HOME=/opt/module/jdk1.8.0_202 export PATH=$PATH:$JAVA_HOME/bin #配置Hadoop环境变量 export HADOOP_HOME=/opt/module/hadoop-2.10.2 export PATH=$PATH:$HADOOP_HOME/bin export PATH=$PATH:$HADOOP_HOME/sbin #继续添加其他hadoop需要的环境变量 export HADOOP_MAPRED_HOME=$HADOOP_HOME export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib" #HADOOP VARIABLES END eof [root@mjmhadoop ~]$ source /etc/profile

hadoop-env配置

[root@mjmhadoop ~]$ su - mjmos [mjmos@mjmhadoop ~]$ sed -i 's#^export JAVA_HOME.*#export JAVA_HOME=/opt/module/jdk1.8.0_202#g' /opt/module/hadoop-2.10.2/etc/hadoop/hadoop-env.sh

core-site.xml文件配置

[mjmos@mjmhadoop ~]$ sed -i '/configuration/d' /opt/module/hadoop-2.10.2/etc/hadoop/core-site.xml [mjmos@mjmhadoop ~]$ cat >>/opt/module/hadoop-2.10.2/etc/hadoop/core-site.xml<<'eof' hadoop.tmp.dir /opt/module/hadoop-2.10.2/data/tmp fs.defaultFS hdfs://192.168.195.110:9000 eof

hdfs-site.xml文件配置

[mjmos@mjmhadoop ~]$ sed -i '/configuration/d' /opt/module/hadoop-2.10.2/etc/hadoop/hdfs-site.xml [mjmos@mjmhadoop ~]$ cat >>/opt/module/hadoop-2.10.2/etc/hadoop/hdfs-site.xml<<'eof' dfs.replication 1 eof

创建初始元数据

[mjmos@mjmhadoop ~]$ hdfs namenode -format

免密认证

[mjmos@mjmhadoop ~]$ ssh-keygen [mjmos@mjmhadoop ~]$ ssh-copy-id mjmos@192.168.195.110 [mjmos@mjmhadoop ~]$ ssh-copy-id mjmhadoop

3.环境验证

启动

# 在启动过程中有个免密好像有问题,输了个yes就行 [mjmos@mjmhadoop ~]$ start-all.sh

检查

[mjmos@mjmhadoop ~]$ jps 6898 ResourceManager 6770 SecondaryNameNode 6444 NameNode 7309 Jps 7039 NodeManager 6591 DataNode

访问

http://192.168.195.110:50070/

4.测试

hdfs新建目录

hdfs dfs -mkdir -p /test/tb1

本地新建文件
echo "miaojiaming1" > /home/mjmos/t1_1
echo "miaojiaming2" > /home/mjmos/t1_2

上传文件
hdfs dfs -put /home/mjmos/t1_1 /test/tb1
hdfs dfs -put /home/mjmos/t1_2 /test/tb1

查看文件
hdfs dfs -cat /test/tb1/t1_1
hdfs dfs -cat /test/tb1/t1_2

本地拉取测试
hdfs dfs -get hdfs://mjmos@192.168.195.110:9000/test/tb1/t1_1 /tmp
hdfs dfs -get hdfs://mjmos@192.168.195.110:9000/test/tb1/t1_2 /tmp
[mjmos@mjmhadoop ~]$ cat /tmp/t1_1
miaojiaming1
[mjmos@mjmhadoop ~]$ cat /tmp/t1_2
miaojiaming2

加载
load data infile 'hdfs://mjmos@192.168.195.110/test/tb1/t1_*' into table t1;

5.kerberos安装及配置

# 安装kerberos服务端
#yum install -y krb5-server krb5-libs krb5-workstation krb5-auth-dialog
[root@mjmhadoop ~]$ yum install -y krb5-server krb5-libs krb5-workstation

# KDC的相关信息配置
# MJM.COM自定义的 realm 名称可修改
[root@mjmhadoop ~]$ cat >/var/kerberos/krb5kdc/kdc.conf<<'eof'
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88

[realms]
MJM.COM = {
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
permitted_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
}
eof


# 配置Kerberos
[root@mjmhadoop /opt/module/hadoop-2.10.2/etc/hadoop]$ cat > /etc/krb5.conf <<'eof'
# Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/

[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log

[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
default_realm = MJM.COM

[realms]
MJM.COM = {
kdc = mjmhadoop
admin_server = mjmhadoop
}

[domain_realm]
.mjm.com = MJM.COM
mjm.com = MJM.COM
eof

# 创建/初始化Kerberos database
#这里会让输入密码
[root@mjmhadoop ~]# kdb5_util create -s -r MJM.COM

# 启动服务
[root@mjmhadoop ~]# service krb5kdc start
[root@mjmhadoop ~]# service kadmin start

# 为database administrator设置ACL权限
[root@mjmhadoop ~]# echo '*/admin@MJM.COM *' >/var/kerberos/krb5kdc/kadm5.acl

# 创建认证凭证
[root@mjmhadoop ~]$ kadmin.local -q "addprinc -randkey root/mjmhadoop@MJM.COM"
[root@mjmhadoop ~]$ kadmin.local -q "addprinc -randkey HTTP/mjmhadoop@MJM.COM"
[root@mjmhadoop ~]$ kadmin.local -q "addprinc -randkey mjmos/mjmhadoop@MJM.COM"

# 创建认证凭证文件
[root@mjmhadoop ~]$ mkdir /home/kerberos
[root@mjmhadoop ~]$ cd /home/kerberos
[root@mjmhadoop /home/kerberos]$ kadmin.local -q "xst -k root-mjmhadoop.keytab root/mjmhadoop@MJM.COM"
[root@mjmhadoop /home/kerberos]$ kadmin.local -q "xst -k mjmos-mjmhadoop.keytab mjmos/mjmhadoop@MJM.COM"
[root@mjmhadoop /home/kerberos]$ kadmin.local -q "xst -k http-mjmhadoop.keytab HTTP/mjmhadoop@MJM.COM"

# 合并keytab文件
[root@mjmhadoop /home/kerberos]$ ktutil
ktutil: rkt http-mjmhadoop.keytab
ktutil: rkt mjmos-mjmhadoop.keytab
ktutil: rkt root-mjmhadoop.keytab
ktutil: wkt merged.keytab
ktutil: q

# 验证下keytab文件内容
[root@mjmhadoop /home/kerberos]$ klist -k -t merged.keytab
Keytab name: FILE:merged.keytab
KVNO Timestamp Principal
---- ------------------- ------------------------------------------------------
2 06/20/2025 16:43:17 HTTP/mjmhadoop@MJM.COM
2 06/20/2025 16:43:17 HTTP/mjmhadoop@MJM.COM
2 06/20/2025 16:43:17 mjmos/mjmhadoop@MJM.COM
2 06/20/2025 16:43:17 mjmos/mjmhadoop@MJM.COM
2 06/20/2025 16:43:17 root/mjmhadoop@MJM.COM
2 06/20/2025 16:43:17 root/mjmhadoop@MJM.COM

# 查看当前认证用户
[root@mjmhadoop /home/kerberos]$ klist
klist: No credentials cache found (filename: /tmp/krb5cc_0)

# 使用合并的keytab文件认证
[root@mjmhadoop /home/kerberos]$ kinit -V -k -t /home/kerberos/merged.keytab root/mjmhadoop@MJM.COM
Using default cache: /tmp/krb5cc_0
Using principal: root/mjmhadoop@MJM.COM
Using keytab: /home/kerberos/merged.keytab
Authenticated to Kerberos v5

# 再次查看当前认证用户
[root@mjmhadoop /home/kerberos]$ klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: root/mjmhadoop@MJM.COM

Valid starting Expires Service principal
06/20/2025 17:17:26 06/21/2025 17:17:26 krbtgt/MJM.COM@MJM.COM
renew until 06/20/2025 17:17:26
## 删除当前认证的缓存:kdestroy

# 移动认证文件改权限
[root@mjmhadoop /home/kerberos]$ cp merged.keytab /opt/module/
[root@mjmhadoop /home/kerberos]$ chown mjmos:mjmos /opt/module/merged.keytab


# core-site.xml文件配置
[root@mjmhadoop /home/kerberos]$ cat > /opt/module/hadoop-2.10.2/etc/hadoop/core-site.xml <<'eof'





hadoop.tmp.dir
/opt/module/hadoop-2.10.2/data/tmp



fs.defaultFS

hdfs://192.168.195.110:9000

hadoop.security.authenticationkerberos
hadoop.security.authorizationtrue
hadoop.rpc.protectionauthentication

eof


# hdfs-site.xml文件配置
[root@mjmhadoop /home/kerberos]$ cat > /opt/module/hadoop-2.10.2/etc/hadoop/hdfs-site.xml <<'eof'





dfs.replication
1



dfs.block.access.token.enable
true


dfs.datanode.data.dir.perm
700


dfs.namenode.keytab.file
/opt/module/merged.keytab


dfs.namenode.kerberos.principal
root/_HOST@MJM.COM


dfs.namenode.kerberos.https.principal
HTTP/_HOST@MJM.COM


dfs.datanode.keytab.file
/opt/module/merged.keytab


dfs.datanode.kerberos.principal
root/_HOST@MJM.COM


dfs.datanode.kerberos.https.principal
HTTP/_HOST@MJM.COM


dfs.datanode.address
0.0.0.0:61004


dfs.datanode.http.address
0.0.0.0:61006



dfs.webhdfs.enabled
true


dfs.web.authentication.kerberos.keytab
/opt/module/merged.keytab


dfs.web.authentication.kerberos.principal
HTTP/_HOST@MJM.COM



dfs.http.policy
HTTPS_ONLY


dfs.data.transfer.protection
integrity


dfs.permissions.supergroup
supergroup
The name of the group of
super-users.


dfs.secondary.namenode.keytab.file
/opt/module/merged.keytab


dfs.secondary.namenode.kerberos.principal
root/_HOST@MJM.COM


eof

# 证书配置
# ca_key文件生成,这里输入的密码为test999
openssl req -new -x509 -keyout ca_key -out ca_cert -days 9999 -subj '/C=CN/ST=hunan/L=changsha/O=dtdream/OU=security/CN=mjm.com'
# 生成keystore文件
keytool -keystore keystore -alias localhost -validity 9999 -genkey -keyalg RSA -keysize 2048 -dname "CN=mjm.com, OU=test, O=test, L=changsha, ST=hunan, C=cn"
# 生成truststore文件同时导入CA证书
keytool -keystore truststore -alias CARoot -import -file ca_cert
# 从 keystore 中导出 cert
keytool -certreq -alias localhost -keystore keystore -file cert
# 用 CA 对 cert 签名,生成自签证书,注意最后的test999
openssl x509 -req -CA ca_cert -CAkey ca_key -in cert -out cert_signed -days 9999 -CAcreateserial -passin pass:test999
# 将 CA 的 cert 和导入 keystore文件
keytool -keystore keystore -alias CARoot -import -file ca_cert
# 用 CA 自签名之后的 cert 也导入 keystore文件
keytool -keystore keystore -alias localhost -import -file cert_signed


# keystore和truststore准备
[root@mjmhadoop /home/kerberos]$ cp keystore /opt/module/
[root@mjmhadoop /home/kerberos]$ cp truststore /opt/module/
[root@mjmhadoop /home/kerberos]$ chown mjmos:mjmos /opt/module/keystore
[root@mjmhadoop /home/kerberos]$ chown mjmos:mjmos /opt/module/truststore


# 解决DataNode报错,编译安装JSVC
启动DataNode时发生如下异常:
2018-03-02 17:20:15,261 FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Exception in secureMain
java.lang.RuntimeException: Cannot start secure DataNode without configuring either privileged resources or SASL RPC data transfer protection and SSL for HTTP. Using privileged resources in combination with SASL RPC data transfer protection is not supported.
----------------------------------------------------------
[mjmos@mjmhadoop /opt/module]$ cd /opt/module
[mjmos@mjmhadoop /opt/module]$ wget https://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz
[mjmos@mjmhadoop /opt/module]$ wget https://archive.apache.org/dist/commons/daemon/binaries/commons-daemon-1.0.15-bin.tar.gz
[mjmos@mjmhadoop /opt/module]$ tar xf commons-daemon-1.0.15-src.tar.gz
[mjmos@mjmhadoop /opt/module]$ tar xf commons-daemon-1.0.15-bin.tar.gz
[mjmos@mjmhadoop /opt/module]$ cd commons-daemon-1.0.15-src/src/native/unix/
[mjmos@mjmhadoop /opt/module/commons-daemon-1.0.15-src/src/native/unix]$ ./configure
[mjmos@mjmhadoop /opt/module/commons-daemon-1.0.15-src/src/native/unix]$ make
[mjmos@mjmhadoop /opt/module/commons-daemon-1.0.15-src/src/native/unix]$ cp jsvc /opt/module/hadoop-2.10.2/libexec/
[mjmos@mjmhadoop /opt/module/commons-daemon-1.0.15-src/src/native/unix]$ rm /opt/module/hadoop-2.10.2/share/hadoop/hdfs/lib/commons-daemon-*.jar
[mjmos@mjmhadoop /opt/module/commons-daemon-1.0.15-src/src/native/unix]$ cd /opt/module/
[mjmos@mjmhadoop /opt/module]$ cp commons-daemon-1.0.15/commons-daemon-1.0.15.jar /opt/module/hadoop-2.10.2/share/hadoop/hdfs/lib/
[mjmos@mjmhadoop /opt/module]$ sed -i 's#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}#export HADOOP_SECURE_DN_USER=mjmos#g' /opt/module/hadoop-2.10.2/etc/hadoop/hadoop-env.sh
[mjmos@mjmhadoop /opt/module]$ sed -i 's,#export JSVC_HOME=${JSVC_HOME},export JSVC_HOME=/opt/module/hadoop-2.10.2/libexec/,g' /opt/module/hadoop-2.10.2/etc/hadoop/hadoop-env.sh


# ssl-client.xml文件配置
[root@mjmhadoop /opt/module/hadoop-2.10.2/etc/hadoop]$ cat > /opt/module/hadoop-2.10.2/etc/hadoop/ssl-client.xml <<'eof'




ssl.server.truststore.location
/opt/module/truststore
Truststore to be used by NN and DN. Must be specified.


ssl.server.truststore.password
test999
Optional. Default value is "".


ssl.server.truststore.type
jks
Optional. The keystore file format, default value is "jks".


ssl.server.truststore.reload.interval
10000
Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds).


ssl.server.keystore.location
/opt/module/keystore
Keystore to be used by NN and DN. Must be specified.


ssl.server.keystore.password
test999
Must be specified.


ssl.server.keystore.keypassword
test999
Must be specified.


ssl.server.keystore.type
jks
Optional. The keystore file format, default value is "jks".


ssl.server.exclude.cipher.list
TLS_ECDHE_RSA_WITH_RC4_128_SHA,SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
SSL_RSA_WITH_DES_CBC_SHA,SSL_DHE_RSA_WITH_DES_CBC_SHA,
SSL_RSA_EXPORT_WITH_RC4_40_MD5,SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,
SSL_RSA_WITH_RC4_128_MD5
Optional. The weak security cipher suites that you want excluded
from SSL communication.


eof


# ssl-server.xml文件配置
[root@mjmhadoop /opt/module/hadoop-2.10.2/etc/hadoop]$ cat > /opt/module/hadoop-2.10.2/etc/hadoop/ssl-server.xml <<'eof'




ssl.server.truststore.location
/opt/module/truststore
Truststore to be used by NN and DN. Must be specified.


ssl.server.truststore.password
test999
Optional. Default value is "".


ssl.server.truststore.type
jks
Optional. The keystore file format, default value is "jks".


ssl.server.truststore.reload.interval
10000
Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds).


ssl.server.keystore.location
/opt/module/keystore
Keystore to be used by NN and DN. Must be specified.


ssl.server.keystore.password
test999
Must be specified.


ssl.server.keystore.keypassword
test999
Must be specified.


ssl.server.keystore.type
jks
Optional. The keystore file format, default value is "jks".


ssl.server.exclude.cipher.list
TLS_ECDHE_RSA_WITH_RC4_128_SHA,SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
SSL_RSA_WITH_DES_CBC_SHA,SSL_DHE_RSA_WITH_DES_CBC_SHA,
SSL_RSA_EXPORT_WITH_RC4_40_MD5,SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,
SSL_RSA_WITH_RC4_128_MD5
Optional. The weak security cipher suites that you want excluded
from SSL communication.


eof

# 启动hadoop服务
[root@mjmhadoop /opt/module]$ su - mjmos
Last login: Fri Jun 20 21:58:18 CST 2025 on pts/0
[mjmos@mjmhadoop ~]$ jps
14505 Jps
[mjmos@mjmhadoop ~]$ start
start-all.cmd start-balancer.sh start-dfs.sh start-secure-dns.sh start-yarn.cmd
start-all.sh start-dfs.cmd start-pulseaudio-x11 startx start-yarn.sh
[mjmos@mjmhadoop ~]$ start-all.sh
[mjmos@mjmhadoop ~]$ hadoop-daemon.sh start datanode
[mjmos@mjmhadoop ~]$ jps
14659 NameNode
14840 SecondaryNameNode
15165 DataNode
15246 Jps
[mjmos@mjmhadoop ~]$ exit
logout

# 确认现在是未认证的状态
[root@mjmhadoop /opt/module]$ klist
klist: No credentials cache found (filename: /tmp/krb5cc_0)

# 查看文件
[root@mjmhadoop /opt/module]$ hdfs dfs -cat /test/tb1/t1_1
[root@mjmhadoop /opt/module]$ hdfs dfs -cat /test/tb1/t1_2
#都会报错如下:
cat: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]; Host Details : local host is: "mjmhadoop/192.168.195.110"; destination host is: "mjmhadoop":9000;


# 使用合并的keytab文件认证
[root@mjmhadoop /opt/module]$ kinit -V -k -t /home/kerberos/merged.keytab root/mjmhadoop@MJM.COM
Using default cache: /tmp/krb5cc_0
Using principal: root/mjmhadoop@MJM.COM
Using keytab: /home/kerberos/merged.keytab
Authenticated to Kerberos v5
[root@mjmhadoop /opt/module]$ klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: root/mjmhadoop@MJM.COM

Valid starting Expires Service principal
06/20/2025 22:14:30 06/21/2025 22:14:30 krbtgt/MJM.COM@MJM.COM
renew until 06/20/2025 22:14:30


# 能成功的查看文件
[root@mjmhadoop /opt/module]$ hdfs dfs -cat /test/tb1/t1_1
25/06/20 22:14:58 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
miaojiaming1
[root@mjmhadoop /opt/module]$ hdfs dfs -cat /test/tb1/t1_2
25/06/20 22:15:03 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
miaojiaming2


# 本地拉取测试
[root@mjmhadoop /opt/module]$ hdfs dfs -get hdfs://mjmos@192.168.195.110:9000/test/tb1/t1_1 /tmp
[root@mjmhadoop /opt/module]$ hdfs dfs -get hdfs://mjmos@192.168.195.110:9000/test/tb1/t1_2 /tmp


load data infile 'hdfs://mjmos@192.168.195.110/test/tb1/t1_*' into table t1;

版权声明: 本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若内容造成侵权/违法违规/事实不符,请联系邮箱:809451989@qq.com进行投诉反馈,一经查实,立即删除!
网站建设 2026/4/17 5:40:09

基于深度学习的无人机视角检测系统演示与介绍(YOLOv12/v11/v8/v5模型+Pyqt5界面+训练代码+数据集)

视频演示 基于深度学习的无人机视角检测系统1. 前言​ 无人机凭借其灵活性强、成本低、视角独特等优势&#xff0c;已成为环境监测、交通管理、农业勘测等领域的重要工具。然而&#xff0c;无人机航拍图像中的目标往往尺寸较小、分布密集&#xff0c;且常受到光照变化、复杂背…

作者头像 李华
网站建设 2026/4/17 12:56:10

声纹识别终极指南:如何用pipecat构建零密码认证系统

声纹识别终极指南&#xff1a;如何用pipecat构建零密码认证系统 【免费下载链接】pipecat Open Source framework for voice and multimodal conversational AI 项目地址: https://gitcode.com/GitHub_Trending/pi/pipecat 你是否厌倦了记忆复杂的密码&#xff1f;是否担…

作者头像 李华
网站建设 2026/4/18 6:23:39

Android高斯模糊终极指南:Blurry库完全解析

Android高斯模糊终极指南&#xff1a;Blurry库完全解析 【免费下载链接】Blurry Blurry is an easy blur library for Android 项目地址: https://gitcode.com/gh_mirrors/bl/Blurry 还在为Android应用中实现精美的模糊效果而头疼吗&#xff1f;复杂的算法、性能瓶颈、兼…

作者头像 李华
网站建设 2026/4/18 8:02:51

性价比高的车联网时序数据库哪个靠谱

性价比高的车联网时序数据库&#xff1a;TDengine的优势展现行业痛点分析车联网时序数据库领域面临着诸多技术挑战。随着车联网的快速发展&#xff0c;车辆产生的数据量呈爆炸式增长&#xff0c;数据的实时性、准确性要求极高。同时&#xff0c;车联网数据具有多源异构的特点&a…

作者头像 李华
网站建设 2026/4/17 8:39:25

永磁电机RBP神经自适应PID控制探索:理论、MATLAB仿真与资料分享

永磁电机RBP神经自适应PID控制&#xff08;送配套资料 MATLAB仿真模型 永磁电机转速3000转&#xff0c;2s时&#xff0c;负载转矩由10到15 电流环采用PID&#xff0c;转速环采用RBP神经元自适应PID控制 送配套资料 包括原理讲解和代码讲解在电机控制领域&#xff0c;永磁电机凭…

作者头像 李华