技术标签: spring Sequoiadb学习笔记 java 数据库
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE USER 'metauser'@'%' IDENTIFIED BY 'metauser';
GRANT ALL ON *.* TO 'metauser'@'%';
CREATE DATABASE metastore CHARACTER SET 'latin1' COLLATE 'latin1_bin';
FLUSH PRIVILEGES;
quit;
cat > /opt/apache-hive-1.2.2-bin/conf/hive-site.xml<< EOF
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>hive.test.authz.sstd.hs2.mode</name>
<value>true</value>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>hive.users.in.admin.role</name>
<value>root</value>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>9073</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.sequoiadb.spark.sql.hive.SequoiadbAuth</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
</property>
</configuration>
EOF
cp spark-authorizer-2.1.1.jar /opt/apache-hive-1.2.2-bin/auxlib
cp mysql-connector-java-5.1.7-bin.jar /opt/apache-hive-1.2.2-bin/auxlib
export HADOOP_HOME=/opt/hadoop-2.9.2
apache-hive-1.2.2-bin/bin/schematool -dbType mysql -initSchema
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
use metastore;
create table DBUSER (dbuser varchar(100), passwd char(50), primary key (dbuser));
insert into DBUSER(dbuser, passwd) values ('root', md5('admin'));
为 thrift server 预先创建了一个 root 的用户,密码为 ‘admin’ 未来如果要增加用户,用类似的 insert 命令添加
delimiter ||
create trigger dbs_trigger
before insert on DBS
for each row
begin
set new.OWNER_NAME="public";
set new.OWNER_TYPE="ROLE";
end ||
delimiter ;
cp spark-authorizer-2.1.1.jar /opt/spark/jars
cp mysql-connector-java-5.1.7-bin.jar /opt/spark/jars
cat > /opt/spark/conf/hive-site.xml<< EOF
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>hive.security.authorization.createtable.owner.grants</name>
<value>INSERT,SELECT</value>
</property>
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
</property>
<property>
<name>hive.test.authz.sstd.hs2.mode</name>
<value>true</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.sequoiadb.spark.sql.hive.SequoiadbAuth</value>
</property>
</configuration>
EOF
spark.sql.extensions=org.apache.ranger.authorization.spark.authorizer.SequoiadbSparkSQLExtension
./opt/spark/sbin/start-all.sh
./opt/spark/sbin/start-thriftserver.sh
netstat -anp|grep 10000
./bin/beeline -u jdbc:hive2://localhost:10000 -n root -p admin
在 spark sql 中创建数据表,执行建表的USER 对该表拥有 INSERT 和 SELECT 权限 如果其他 USER希望访问该表,应该在 hive 的thrift server 中,执行 grant 命令,以赋予其他 USER 对应权限
${HIVE_HOME}/bin/hiveserver2 >${HIVE_HOME}/hive_thriftserver.log 2>&1 &
./bin/beeline -u jdbc:hive2://localhost:9073 -n root -p admin
set role admin;
grant SELECT on table test to user USERNAME;
grant INSERT on table test to user USERNAME;
var db=new Sdb("localhost",11810);
db.createDomain("scottdomain",["datagroup1","datagroup2","datagroup3"],{
AutoSplit:true});
db.createCS("scott",{
Domain:"scottdomain"});
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
create database scott;
use scott;
create table emp(
empno int unsigned auto_increment primary key COMMENT '雇员编号',
ename varchar(15) COMMENT '雇员姓名',
job varchar(10) COMMENT '雇员职位',
mgr int unsigned COMMENT '雇员对应的领导的编号',
hiredate date COMMENT '雇员的雇佣日期',
sal decimal(7,2) COMMENT '雇员的基本工资',
comm decimal(7,2) COMMENT '奖金',
deptno int unsigned COMMENT '所在部门'
)ENGINE = sequoiadb COMMENT = "雇员表, sequoiadb: { table_options: { ShardingKey: { 'empno': 1 }, ShardingType: 'hash', 'Compressed': true, 'CompressionType': 'lzw', 'AutoSplit': true, 'EnsureShardingIndex': false } }";
INSERT INTO emp VALUES (7369,'SMITH','CLERK',7902,'1980-12-17',800,NULL,20);
INSERT INTO emp VALUES (7499,'ALLEN','SALESMAN',7698,'1981-2-20',1600,300,30);
INSERT INTO emp VALUES (7521,'WARD','SALESMAN',7698,'1981-2-22',1250,500,30);
INSERT INTO emp VALUES (7566,'JONES','MANAGER',7839,'1981-4-2',2975,NULL,20);
INSERT INTO emp VALUES (7654,'MARTIN','SALESMAN',7698,'1981-9-28',1250,1400,30);
INSERT INTO emp VALUES (7698,'BLAKE','MANAGER',7839,'1981-5-1',2850,NULL,30);
INSERT INTO emp VALUES (7782,'CLARK','MANAGER',7839,'1981-6-9',2450,NULL,10);
INSERT INTO emp VALUES (7788,'SCOTT','ANALYST',7566,'87-7-13',3000,NULL,20);
INSERT INTO emp VALUES (7839,'KING','PRESIDENT',NULL,'1981-11-17',5000,NULL,10);
INSERT INTO emp VALUES (7844,'TURNER','SALESMAN',7698,'1981-9-8',1500,100,30);
INSERT INTO emp VALUES (7876,'ADAMS','CLERK',7788,'87-7-13',1100,NULL,20);
INSERT INTO emp VALUES (7900,'JAMES','CLERK',7698,'1981-12-3',950,NULL,30);
INSERT INTO emp VALUES (7902,'FORD','ANALYST',7566,'1981-12-3',3000,NULL,20);
INSERT INTO emp VALUES (7934,'MILLER','CLERK',7782,'1982-1-23',1300,NULL,10);
<!--添加druid连接池依赖-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>1.1.18</version>
</dependency>
<!-- mybatis依赖 -->
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-core</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-extension</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!--添加 spark通过jdbc连接的依赖包-->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-metastore</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.2</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.4</version>
</dependency>
<dependency>
<groupId>org.apache.thrift</groupId>
<artifactId>libthrift</artifactId>
<version>0.9.2</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-hive-thriftserver -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive-thriftserver_2.11</artifactId>
<version>2.0.1</version>
<scope>provided</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-network-common -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-network-common_2.11</artifactId>
<version>2.0.1</version>
</dependency>
<!--添加SequoiaDB驱动包-->
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>sequoiadb-driver</artifactId>
<version>3.2.1</version>
</dependency>
<!--添加SequoiaDB和spark连接驱动包-->
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>spark-sequoiadb_2.11</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>spark-sequoiadb-scala_2.11.2</artifactId>
<version>1.12</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-sql -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.2.2</version>
</dependency>
<!--添加hive jdbc连接组件-->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.2.0</version>
</dependency>
server.port=8090
#datasource config
#指定连接池类型
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
#指定驱动
spring.datasource.driver-class-name=org.apache.hive.jdbc.HiveDriver
#指定连接地址、用户名和密码
spring.datasource.url=jdbc:hive2://192.168.80.132:10000/default
spring.datasource.username=root
spring.datasource.password=admin
#初始化连接数量
spring.datasource.druid.initialSize=1
#最大空闲连接数
spring.datasource.druid.minIdle=5
#最大并发连接数
spring.datasource.druid.maxActive=20
#配置获取连接等待超时的时间
spring.datasource.druid.maxWait=60000
#配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
spring.datasource.druid.timeBetweenEvictionRunMillis=60000
#配置一个连接在池中最小生存的时间,单位是毫秒
spring.datasource.druid.minEvictableIdelTimeMillis=300000
#用来检测连接是否有效的sql,要求是一个查询语句
spring.datasource.druid.validation-query=SELECT 1
#mybatis
#mybatis-plus.mapper-locations=classpath:mapper/*.xml
#mybatis-plus.configuration.cache-enabled=false
#映射xml文件位置
mybatis.mapper-locations=classpath:mapper/*.xml
#需要扫描实体类的位置
mybatis.type-aliases-package=com.sdb.spark.demo.entity
#spring mvc配置静态文件
spring.mvc.static-path-pattern=/static/**
#热部署
spring.devtools.restart.enabled=true
spring.devtools.restart.additional-paths=src/main/java
spring.devtools.restart.exclude=WEB-INF/**
/**
* 雇员表
*
* @author yousongxian
* @date 2020-07-29
*/
public class Emp {
private Integer empno;//雇员编号
private String ename;//雇员姓名、
private String job;//雇员职位
private Integer mgr;//雇员对应的领导的编号
private String hiredate;//雇员的雇佣日期
private Double sal;//雇员的基本工资
private Double comm;//奖金
private Integer deptno;//所在部门
}
//省略getter和setter方法
@Override
public String toString() {
return "Emp{" +
"empno=" + empno +
", ename='" + ename + '\'' +
", job='" + job + '\'' +
", mgr=" + mgr +
", hiredate='" + hiredate + '\'' +
", sal=" + sal +
", comm=" + comm +
", deptno=" + deptno +
'}';
}
<!-- 通用查询映射结果 -->
<resultMap id="BaseResultMap" type="com.sdb.spark.demo.entity.Emp">
<id column="empno" property="empno"/>
<result column="ename" property="ename" />
<result column="job" property="job" />
<result column="mgr" property="mgr" />
<result column="hiredate" property="hiredate" />
<result column="sal" property="sal" />
<result column="comm" property="comm" />
<result column="deptno" property="deptno" />
</resultMap>
<!-- 通用查询结果列 -->
<select id="selectAll" resultType="map" parameterType="string">
select * from ${tablename}
</select>
<update id="createTableEmp">
CREATE TABLE emp
(
empno INT,
ename STRING,
job STRING,
mgr INT,
hiredate date,
sal decimal(7,2),
comm decimal(7,2),
deptno INT
)
USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)</update>
<update id="createTableEmpSchema">
CREATE TABLE emp_schema USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)
</update>
<update id="createTableAsSelect" parameterType="map">
CREATE TABLE ${tablename} USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
domain 'scottdomain',
collectionspace 'scott',
collection #{tablename},
shardingkey '{"_id":1}',
shadingtype 'hash',
autosplit true
)AS ${condition}
</update>
<mapper namespace="com.sdb.spark.demo.mapper.EmpMapper">
注意:方法名称跟xml映射文件中定义的方法id必须一致
List<Map<String,Object>> selectAll(String tablename);//查询全部
int createTableEmp();//创建emp表
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String>map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
List<Map<String,Object>> selectAll(String tablename);
int createTableEmp();
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String> map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
@Service
public class EmpServiceImpl implements EmpService {
@Autowired
private EmpMapper empMapper;
@Override
public List<Map<String,Object>> selectAll(String tablename) {
return empMapper.selectAll(tablename);
}
@Override
public int createTableEmp() {
return empMapper.createTableEmp();
}
@Override
public int createTableEmpSchema() {
return empMapper.createTableEmpSchema();
}
@Override
public int createTableAsSelect(Map<String,String>map) {
return empMapper.createTableAsSelect(map);
}
@Override
public int insertEmp(Emp emp) {
return empMapper.insertEmp(emp);
}
}
@Autowired
private EmpService empService;
@Test
public List<Map<String,Object>> selectAll(){
String tablename="emp";
List<Map<String,Object>>resultlist=new ArrayList<Map<String, Object>>();
resultlist =empService.selectAll(tablename);
for(Map<String,Object>map:resultlist){
for(Map.Entry<String,Object>m:map.entrySet()){
System.out.print(m.getKey()+"="+m.getValue()+"\t");
}
System.out.println();
}
return resultlist;
}
@Test
public void createTable(){
empService.createTableEmp();
}
@Test
public void createTableEmpSchema(){
empService.createTableEmpSchema();
}
@Test
public void createTableAsSelect(){
Map<String,String> map=new HashMap<String, String>();
String tablename="emp_as_select";
//String condition="select empno,ename from emp";
map.put("tablename",tablename);
map.put("condition",condition);
if(map.get("tablename").equals("")||map.get("tablename")==null||map.get("condition").equals("")||map.get("condition")==null){
System.out.println("请输入正确表明和条件");
}else {
empService.createTableAsSelect(map);
}
}
@SpringBootApplication(scanBasePackages = {
"com.sdb.spark.demo.service.Impl"})
@MapperScan(basePackages = {
"com.sdb.spark.demo.mapper"})
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}
文章浏览阅读460次。DescriptionSolution首先我们可以知道两个性质:1、路径u-v和路径v-w可以合并为路径u-w;2、路径u1-v1加路径u2-v2和路径u1-v2加路径u2-v1是等价的(就是起始点和终点可以互换) 那么知道这些性质之后就很好做了。我们只用知道每个点多少次做起点和多少次做终点。 我们设f[i]表示满足i子树的需求i上的值要是多少。 那么枚举i的所有儿子,判断a[i]-f[i],_gdoi2018省选模拟树
文章浏览阅读2.8k次,点赞4次,收藏28次。我们进行简单的运算即可实现倒序。_字符串替换pta
文章浏览阅读4k次,点赞5次,收藏22次。traceroute 180.101.50.188————————测试到180.101.50.188有多少个网关。vim /etc/sysconfig/static-routes——————————修改。netstat -antp | grep 22———————查看端口号22的相关信息。systemctl restart network————————————重启。systemctl restart network————————重新启动。_linux如何开启网络连接
文章浏览阅读4w次,点赞23次,收藏6次。本人pr小白,今天编辑视频时候遇到了问题,也解决了,所以分享记录一下。问题一视频下面原来有字幕的,可是导入的视频变大了,现在看不到了怎么办?还有就是,频导入之后画质好像变糊了又是为什么?解决:将箭头放到要编辑的视频那里,右击,然后点击设为帧大小这样完整的视频就出来了。问题二如果视频模糊,就是序列设置的不对 要先新建序列一般的都是1920×1080本人博客:https://blog.csdn.net/weixin_46654114本人b站求关注:https://space.bi_avi视频到pr里会放大
文章浏览阅读1.8k次。注:key可以不用引号,value使用单引号,但key中存在_或-等一些特殊字符时,需要加上引号,避免出错。注:key可以不用引号,value也不用引号,但key中存在_或-等一些特殊字符时,需要加上引号,避免出错。注:使用@Value注解获取,apollo中未配置时默认为null。注:使用@Value注解获取,apollo中未配置时默认为null。2.apollo中的Map配置。1.apollo中的Map配置。注:使用逗号分隔,不用引号。..._apollo list
文章浏览阅读4.4k次,点赞22次,收藏12次。本文来自:中国科学技术大学公众号北京时间12月4日国际顶尖杂志《Science》刊发了中国科学技术大学潘建伟、陆朝阳等组成的研究团队的一项重磅研究成果让我们一起来看看吧!中国科学家实现“量子计算优越性”里程碑中国科学技术大学潘建伟、陆朝阳等组成的研究团队与中科院上海微系统所、国家并行计算机工程技术研究中心合作,构建了76个光子100个模式的量子计算原型机“九章”,实现了具有实用前景的“高斯玻色取样”任务的快速求解。根据现有理论该量子计算系统处理高斯玻色取样的速度比目前最快的超级计算机快一百万._中国科学院比马普所强
文章浏览阅读952次。再查查资料,有没有DEBIAN问题解答中心呢?_debian 无法打开命令行窗口
文章浏览阅读2.3k次,点赞3次,收藏10次。2345王牌输入法的卸载输入法卸载了也还有2345这个流氓输入法,研究3个小时找到了2345输入法在语言栏的根源所在,希望能帮到你windows键加R键打开运行,输入regedit 然后ctrl+F键 搜索下面路径,打开后就会看见语言栏里的输入法了,直接删除加粗这个文件夹,就删除了HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\CTF\TIP{0055AAB0-EACB-46DB-9BB4-1B97FC046D02}\LanguageProfile\0x00000804\ _89e1d5c2-a068-44b6-b820-f8406c8a4706
文章浏览阅读2.2k次。1、开机启动system/core/init/main.cppint main(int argc, char** argv) {#if __has_feature(address_sanitizer) __asan_set_error_report_callback(AsanReportCallback);#endif if (!strcmp(basename(argv[0]), "ueventd")) { return ueventd_main(argc,._android setenforce
文章浏览阅读485次。涉及源码android-8.0.0_r1\system\core\rootdir\init.rcandroid-8.0.0_r1\system\core\rootdir\init.*.rcandroid-8.0.0_r1\frameworks\base\core\java\com\android\internal\os\Zygote.javaandroid-8.0.0_r1\frameworks\base\core\java\com\android\internal\os\ZygoteInit.ja_app_process zygote
文章浏览阅读8.7k次。默认情况下,Kubernetes集群网络没任何网络限制,Pod可以与任何其他pod通信,在某些场景下就需要进行网络控制,减少网络攻击面,提高安全性,这就会用到网络策略。需求:test命名空间下所有pod可以互相访问,也可以访问其他命名空间pod,但其他命名空间pod不能访问test命名空间pod。允许prod命名空间中的pod访问,及其他命名空间中的pod标签为app=client1的pod访问。需求:允许其他命名空间test命名空间指定pod,pod标签app=web。测试外部pod访问(拒绝访问)_networkpolicy用法
文章浏览阅读4.7k次。3月23日,在工业互联网产业联盟(以下简称“联盟/AII”)举办的工业互联网边缘计算研讨会上,中国信息通信研究院技术与标准所主任工程师刘阳以“边缘计算发展中的若干热点问题及思考”为题做演讲,分享了边缘计算的概念、边缘计算和雾计算的区别,并对如何推动边缘计算的产业发展提出了自己的建议。 刘阳认为“边缘计算是在靠近物或数据源头的网络边缘侧,融合网络、计算、存储、应用核心能力的开放平台,就近_关于边缘计算的思考问题