Skip to content

Commit

Permalink
Merge pull request #50 from burhanxz/feature/support_create_filestorage
Browse files Browse the repository at this point in the history
Feature/support create filestorage
  • Loading branch information
Oldbread3 authored Apr 29, 2022
2 parents 457c415 + 93094e5 commit 6c4489d
Show file tree
Hide file tree
Showing 36 changed files with 750 additions and 75 deletions.
58 changes: 15 additions & 43 deletions docs/zh_CN/quickstart-oss.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,76 +3,46 @@
- 首先源码编译安装 PolarDB-X,可参考[源码编译部署](quickstart-development.md)
- 或者通过K8S构建出 PolarDB-X集群,可参考[K8S部署](http://www.github.com/ApsaraDB/galaxykube/blob/main/docs/zh/deploy/quick-start.md)

安装完成后通过如下命令进行OSS存储初始化:
安装完成后连接PolarDB-X集群,通过如下SQL命令进行OSS存储初始化:

- -e: 初始化存储引擎类型
- -P: 之前准备的dnPasswordKey
- --uri: OSS存储uri
- --ep: OSS存储 end-point
- --ak: Access Key
- --sk: Secret Key

注(关于--ep参数):在阿里云OSS产品界面,通过“对象存储-Bucket列表-bucket-概览”可以获取到一组endpoint(地域节点)信息。仅当在相同Region的ECS部署环境下,使用经典网络访问或vpc网络访问endpoint。否则使用外网访问endpoint。

例子:
```
bin/startup.sh \
-e OSS \
--uri oss://oss-bucket/ \
--ep oss-cn-huhehaote.aliyuncs.com \
--ak akakakakakakakakakakakakakak \
--sk sksksksksksksksksksksksksksk \
-P asdf1234ghjk5678
create filestorage oss with ('file_uri' = 'oss://oss-bucket-name/', 'endpoint'='oss-endpoint', 'access_key_id'='your_ak', 'access_key_secret'='your_sk');
```

注1:启动实例后,通过执行show file storage语句来验证是否配置成功。

注2:startup.sh脚本位于CN安装bin目录下。
- file_uri: OSS存储uri
- endpoint: OSS存储 end-point
- access_key_id: Access Key
- access_key_secret: Secret Key

如果你是通过K8S构建, 请通过下面方式登陆进入CN节点,获取dnPasswordKey并进行初始化
```
// 获取CN节点POD
kubectl get pods | grep cn
注1:(关于endpoint参数):在阿里云OSS产品界面,通过“对象存储-Bucket列表-bucket-概览”可以获取到一组endpoint(地域节点)信息。仅当在相同Region的ECS部署环境下,使用经典网络访问或vpc网络访问endpoint。否则使用外网访问endpoint。

// 登陆任意一个CN节点
kubectl exec -it [your-cn-pod-name] bash

// 获取dnPasswordKey
env | grep dnPasswordKey
// startup.sh位于/home/admin/drds-server/bin目录下
cd /home/admin/drds-server/bin
```
注2:通过执行show file storage语句来验证是否配置成功。


# 本地磁盘存储配置
对于没有OSS但希望体验用户,可以通过本地磁盘模拟OSS

注意:本地磁盘为CN节点本地磁盘,如果你是通过K8S方式部署,请限制CN节点个数为1个

安装完成后通过如下命令进行本地磁盘存储初始化:
注意:file_uri为CN节点能访问的数据目录路径,如果你无法保证多个CN能够共同访问这一数据目录路径,请限制CN节点个数为1个。

- -e: 初始化存储引擎类型
- -P: 之前准备的dnPasswordKey
- --uri: 本地磁盘uri,即本地存储目录。
通过如下命令进行本地磁盘存储初始化:

例子:
```
bin/startup.sh \
-e LOCAL_DISK \
--uri file:///tmp/local-dir \
-P asdf1234ghjk5678
create filestorage local_disk with ('file_uri' = 'file://tmp/orc/');
```

存储引擎改为`engine = 'local_disk'`即可使用本地磁盘存储,例如:
建表时存储引擎改为`engine = 'local_disk'`即可使用本地磁盘存储,例如:
```
create table sbtest1 like sysbench.sbtest1 engine = 'local_disk' archive_mode = 'loading';
```


# 开始体验

例子中的存储引擎都会以`engine = 'oss'`展示,如果你使用的是本地存储请修改为`engine = 'local_disk'`
注:例子中的存储引擎都会以`engine = 'oss'`展示,如果你使用的是本地存储请修改为`engine = 'local_disk'`

## Hello World
```
Expand Down Expand Up @@ -103,6 +73,8 @@ select * from oss_t1;
## TTL
Innodb数据自动过期并归档到OSS存储上示例

注:例子中的存储引擎都会以`engine = 'oss'`展示,如果你使用的是本地存储请修改为`engine = 'local_disk'`

```
create database ttl_test partition_mode = 'auto';
use ttl_test;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
package org.apache.calcite.rel.ddl;

import org.apache.calcite.plan.Convention;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.DDL;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.sql.SqlDdl;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.parser.SqlParserPos;

import java.util.List;
import java.util.Map;

public class CreateFileStorage extends DDL {
private final String engineName;
private final Map<String, String> with;

protected CreateFileStorage(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl,
RelDataType rowType, String engineName, Map<String, String> with) {
super(cluster, traits, ddl, rowType);
this.engineName = engineName;
this.with = with;
this.sqlNode = ddl;
this.setTableName(new SqlIdentifier(engineName, SqlParserPos.ZERO));
}

public static CreateFileStorage create(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl,
RelDataType rowType, String engineName, Map<String, String> with) {

return new CreateFileStorage(cluster, traits, ddl, rowType, engineName, with);
}

@Override
public CreateFileStorage copy(
RelTraitSet traitSet, List<RelNode> inputs) {
assert traitSet.containsIfApplicable(Convention.NONE);
return new CreateFileStorage(this.getCluster(), traitSet, this.ddl, rowType, engineName, with);
}

public String getEngineName() {
return engineName;
}

public Map<String, String> getWith() {
return with;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
package org.apache.calcite.sql;

import com.google.common.collect.ImmutableList;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
import org.apache.calcite.sql.parser.SqlParserPos;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.sql.validate.SqlValidator;
import org.apache.calcite.sql.validate.SqlValidatorScope;
import org.apache.calcite.util.Pair;

import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class SqlCreateFileStorage extends SqlDdl {
private static final SqlOperator OPERATOR = new SqlCreateFileStorageOperator();

private SqlIdentifier engineName;
private List<Pair<SqlIdentifier, SqlIdentifier>> with;

public SqlCreateFileStorage(SqlParserPos pos,
SqlIdentifier engineName,
List<Pair<SqlIdentifier, SqlIdentifier>> with) {
super(OPERATOR, pos);
this.engineName = engineName;
this.with = with;
}

public SqlIdentifier getEngineName() {
return engineName;
}

public SqlCreateFileStorage setEngineName(SqlIdentifier engineName) {
this.engineName = engineName;
return this;
}

public List<Pair<SqlIdentifier, SqlIdentifier>> getWith() {
return with;
}

public Map<String, String> getWithValue() {
Map<String, String> map = new HashMap<>();
for (Pair<SqlIdentifier, SqlIdentifier> pair : with) {
map.put(pair.left.toString().toUpperCase(), pair.right.toString());
}
return map;
}

public SqlCreateFileStorage setWith(
List<Pair<SqlIdentifier, SqlIdentifier>> with) {
this.with = with;
return this;
}

@Override
public List<SqlNode> getOperandList() {
return ImmutableList.of();
}

@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
final SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.SELECT, "CREATE FILESTORAGE", "");

engineName.unparse(writer, leftPrec, rightPrec);

final SqlWriter.Frame withFrame = writer.startList(SqlWriter.FrameTypeEnum.SELECT, " WITH (", ")");

for (Pair<SqlIdentifier, SqlIdentifier> pair : with) {
pair.left.unparse(writer, leftPrec, rightPrec);
writer.sep("=");
pair.right.unparse(writer, leftPrec, rightPrec);
}

writer.endList(withFrame);

writer.endList(frame);
}

public static class SqlCreateFileStorageOperator extends SqlSpecialOperator {

public SqlCreateFileStorageOperator(){
super("CREATE FILESTORAGE", SqlKind.CREATE_FILESTORAGE);
}

@Override
public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
final RelDataTypeFactory typeFactory = validator.getTypeFactory();
final RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);

return typeFactory.createStructType(ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("CREATE FILESTORAGE RESULT",
0, columnType)));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1665,6 +1665,11 @@ public enum SqlKind {
*/
ALTER_TABLEGROUP,

/**
* file storage management: create fileStorage
*/
CREATE_FILESTORAGE,

/**
* file storage management: alter fileStorage
*/
Expand Down Expand Up @@ -1911,13 +1916,13 @@ public enum SqlKind {
DROP_DATABASE,
MOVE_DATABASE, CHECK_GLOBAL_INDEX, ALTER_TABLEGROUP, CREATE_TABLEGROUP,
CHANGE_CONSENSUS_ROLE, ALTER_SYSTEM_SET_CONFIG, ALTER_TABLE_SET_TABLEGROUP,
REFRESH_TOPOLOGY, DROP_TABLEGROUP, ALTER_FILESTORAGE, DROP_FILESTORAGE
REFRESH_TOPOLOGY, DROP_TABLEGROUP, ALTER_FILESTORAGE, DROP_FILESTORAGE, CREATE_FILESTORAGE
));

public static final EnumSet<SqlKind> DDL_SUPPORTED_BY_NEW_ENGINE =
EnumSet.of(RENAME_TABLE, TRUNCATE_TABLE, DROP_TABLE, CREATE_INDEX, DROP_INDEX, ALTER_TABLE, CREATE_TABLE,
ALTER_TABLEGROUP, ALTER_TABLE_SET_TABLEGROUP, REFRESH_TOPOLOGY, CHECK_GLOBAL_INDEX, ALTER_RULE,
MOVE_DATABASE, ALTER_FILESTORAGE, DROP_FILESTORAGE);
MOVE_DATABASE, ALTER_FILESTORAGE, DROP_FILESTORAGE, CREATE_FILESTORAGE);

public static final EnumSet<SqlKind> SUPPORT_DDL =
EnumSet.of(CREATE_TABLE, ALTER_TABLE, DROP_TABLE,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -987,7 +987,8 @@ private SqlNode validateScopedExpression(
registerQuery(scope, null, outermostNode, outermostNode, null, false);
}
if (topNode.getKind() == SqlKind.CREATE_TABLE || topNode.getKind() == SqlKind.DROP_TABLE
|| topNode.getKind() == SqlKind.DROP_VIEW || topNode.getKind() == SqlKind.DROP_FILESTORAGE) {
|| topNode.getKind() == SqlKind.DROP_VIEW || topNode.getKind() == SqlKind.DROP_FILESTORAGE
|| topNode.getKind() == SqlKind.CREATE_FILESTORAGE) {
if (topNode.getKind() == SqlKind.CREATE_TABLE) {
outermostNode.validate(this, scope);
}
Expand Down Expand Up @@ -3929,6 +3930,7 @@ private void registerQuery(
case IDENTIFIER:
case ALTER_FILESTORAGE:
case DROP_FILESTORAGE:
case CREATE_FILESTORAGE:
setValidatedNodeType(node, RelOptUtil.createDmlRowType(
SqlKind.INSERT, typeFactory));
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@
import org.apache.calcite.rel.ddl.AlterTableSetTableGroup;
import org.apache.calcite.rel.ddl.ChangeConsensusRole;
import org.apache.calcite.rel.ddl.CreateDatabase;
import org.apache.calcite.rel.ddl.CreateFileStorage;
import org.apache.calcite.rel.ddl.CreateIndex;
import org.apache.calcite.rel.ddl.CreateTable;
import org.apache.calcite.rel.ddl.CreateTableGroup;
Expand Down Expand Up @@ -183,6 +184,7 @@
import org.apache.calcite.sql.SqlChangeConsensusRole;
import org.apache.calcite.sql.SqlCharStringLiteral;
import org.apache.calcite.sql.SqlCreateDatabase;
import org.apache.calcite.sql.SqlCreateFileStorage;
import org.apache.calcite.sql.SqlCreateIndex;
import org.apache.calcite.sql.SqlCreateTable;
import org.apache.calcite.sql.SqlCreateTableGroup;
Expand Down Expand Up @@ -3367,6 +3369,8 @@ protected RelRoot convertQueryRecursive(SqlNode query, boolean top, RelDataType
return RelRoot.of(convertUnArchive((SqlUnArchive) query), kind);
case DROP_FILESTORAGE:
return RelRoot.of(convertDropFileStorage((SqlDropFileStorage) query), kind);
case CREATE_FILESTORAGE:
return RelRoot.of(convertCreateFileStorage((SqlCreateFileStorage) query), kind);
default:
if (kind.belongsTo(SqlKind.DAL)) {
return RelRoot.of(convertDal((SqlDal) query), kind);
Expand Down Expand Up @@ -3480,6 +3484,13 @@ private RelNode convertDropFileStorage(SqlDropFileStorage query) {
targetRowType, query.getName().toString());
}

private RelNode convertCreateFileStorage(SqlCreateFileStorage query) {
final RelDataType targetRowType = validator.getValidatedNodeType(query);
assert targetRowType != null;
return CreateFileStorage.create(getCluster(), getCluster().traitSetOf(Convention.NONE), query,
targetRowType, query.getEngineName().toString(), query.getWithValue());
}

private RelNode convertAlterTableGroup(SqlAlterTableGroup query) {;
Map<SqlNode, RexNode> partRexInfoCtx = getRexInfoFromSqlAlterSpec(query.getAlters());
query.setPartRexInfoCtx(partRexInfoCtx);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,11 @@ public abstract class AbstractCharsetHandler implements CharsetHandler {
private static final long SIGNED_MAX_LONG = 0x7fffffffffffffffL;

protected Charset charset;
protected CharsetEncoder encoder;
protected CollationHandler collationHandler;
protected CollationName collationName;

AbstractCharsetHandler(Charset charset, CollationName collationName) {
this.charset = charset;
this.encoder = charset.newEncoder();
this.collationName = collationName;
this.collationHandler = null;
}
Expand Down Expand Up @@ -80,10 +78,12 @@ public Slice encode(String unicodeChars) throws CharacterCodingException {
return Slices.EMPTY_SLICE;
}

encoder.reset();
CharBuffer charBuffer = CharBuffer.wrap(unicodeChars);

// For un-mappable characters, throw error.
CharsetEncoder encoder = charset.newEncoder();
ByteBuffer buffer = encoder.encode(charBuffer);

if (!buffer.hasRemaining()) {
return Slices.EMPTY_SLICE;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ public enum CollationName {

public static ImmutableList<CollationName> POLAR_DB_X_IMPLEMENTED_COLLATION_NAMES = ImmutableList.of(
// for utf8
UTF8_GENERAL_CI, UTF8_BIN, UTF8_UNICODE_CI, UTF8_GENERAL_MYSQL500_CI,
UTF8_GENERAL_CI, UTF8_BIN, UTF8_UNICODE_CI,

// for utf8mb
UTF8MB4_GENERAL_CI, UTF8MB4_BIN, UTF8MB4_UNICODE_CI,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,8 @@ public enum ErrorCode {
ERR_UNEXPECTED_REL_TREE(ErrorType.OSS, 11010),
ERR_EXECUTE_ON_OSS(ErrorType.Executor, 11011),
ERR_FILE_STORAGE_READ_ONLY(ErrorType.OSS, 11012),
ERR_OSS_CONNECT(ErrorType.OSS, 11013);
ERR_OSS_CONNECT(ErrorType.OSS, 11013),
ERR_FILE_STORAGE_EXISTS(ErrorType.OSS, 11014);


private int code;
Expand Down
3 changes: 2 additions & 1 deletion polardbx-common/src/main/resources/res/ErrorCode.properties
Original file line number Diff line number Diff line change
Expand Up @@ -279,4 +279,5 @@ ERR_BACK_FILL_CHECK={0}
ERR_UNARCHIVE_FIRST=There is some table archived to oss table, please execute "{0}" first.
ERR_EXECUTE_ON_OSS={0}
ERR_OSS_FORMAT={0}
ERR_OSS_CONNECT={0}
ERR_OSS_CONNECT={0}
ERR_FILE_STORAGE_EXISTS={0}
Loading

0 comments on commit 6c4489d

Please sign in to comment.