跳到主要内容

COPY INTO <table>

Introduced or updated: v1.2.704

COPY INTO 允许您从以下任一位置的文件加载数据:

  • 用户/内部/外部暂存区(Stage):请参阅 什么是暂存区(Stage)? 了解 Databend 中的暂存区(Stage)。
  • 存储服务中创建的存储桶或容器
  • 可通过 URL(以 "https://..." 开头)访问文件的远程服务器
  • IPFS 和 Hugging Face 存储库

另请参阅:COPY INTO <location>

语法

/* 标准数据加载 */
COPY INTO [<database_name>.]<table_name> [ ( <col_name> [ , <col_name> ... ] ) ]
FROM { userStage | internalStage | externalStage | externalLocation }
[ FILES = ( '<file_name>' [ , '<file_name>' ] [ , ... ] ) ]
[ PATTERN = '<regex_pattern>' ]
[ FILE_FORMAT = (
FORMAT_NAME = '<your-custom-format>'
| TYPE = { CSV | TSV | NDJSON | PARQUET | ORC | AVRO } [ formatTypeOptions ]
) ]
[ copyOptions ]

/* 带转换的数据加载 */
COPY INTO [<database_name>.]<table_name> [ ( <col_name> [ , <col_name> ... ] ) ]
FROM ( SELECT [<alias>.]$<file_col_num>[.<element>] [ , [<alias>.]$<file_col_num>[.<element>] ... ]
FROM { userStage | internalStage | externalStage } )
[ FILES = ( '<file_name>' [ , '<file_name>' ] [ , ... ] ) ]
[ PATTERN = '<regex_pattern>' ]
[ FILE_FORMAT = (
FORMAT_NAME = '<your-custom-format>'
| TYPE = { CSV | TSV | NDJSON | PARQUET | ORC | AVRO } [ formatTypeOptions ]
) ]
[ copyOptions ]

其中:

userStage ::= @~[/<path>]

internalStage ::= @<internal_stage_name>[/<path>]

externalStage ::= @<external_stage_name>[/<path>]

externalLocation ::=
/* 类 Amazon S3 存储 */
's3://<bucket>[/<path>]'
CONNECTION = (
[ ENDPOINT_URL = '<endpoint-url>' ]
[ ACCESS_KEY_ID = '<your-access-key-ID>' ]
[ SECRET_ACCESS_KEY = '<your-secret-access-key>' ]
[ ENABLE_VIRTUAL_HOST_STYLE = TRUE | FALSE ]
[ MASTER_KEY = '<your-master-key>' ]
[ REGION = '<region>' ]
[ SECURITY_TOKEN = '<security-token>' ]
[ ROLE_ARN = '<role-arn>' ]
[ EXTERNAL_ID = '<external-id>' ]
)

/* Azure Blob 存储 */
| 'azblob://<container>[/<path>]'
CONNECTION = (
ENDPOINT_URL = '<endpoint-url>'
ACCOUNT_NAME = '<account-name>'
ACCOUNT_KEY = '<account-key>'
)

/* Google Cloud Storage */
| 'gcs://<bucket>[/<path>]'
CONNECTION = (
CREDENTIAL = '<your-base64-encoded-credential>'
)

/* 阿里云 OSS */
| 'oss://<bucket>[/<path>]'
CONNECTION = (
ACCESS_KEY_ID = '<your-ak>'
ACCESS_KEY_SECRET = '<your-sk>'
ENDPOINT_URL = '<endpoint-url>'
[ PRESIGN_ENDPOINT_URL = '<presign-endpoint-url>' ]
)

/* 腾讯云对象存储 */
| 'cos://<bucket>[/<path>]'
CONNECTION = (
SECRET_ID = '<your-secret-id>'
SECRET_KEY = '<your-secret-key>'
ENDPOINT_URL = '<endpoint-url>'
)

/* 远程文件 */
| 'https://<url>'

/* IPFS */
| 'ipfs://<your-ipfs-hash>'
CONNECTION = (ENDPOINT_URL = 'https://<your-ipfs-gateway>')

/* Hugging Face */
| 'hf://<repo-id>[/<path>]'
CONNECTION = (
[ REPO_TYPE = 'dataset' | 'model' ]
[ REVISION = '<revision>' ]
[ TOKEN = '<your-api-token>' ]
)

formatTypeOptions ::=
/* 所有格式通用选项 */
[ COMPRESSION = AUTO | GZIP | BZ2 | BROTLI | ZSTD | DEFLATE | RAW_DEFLATE | XZ | NONE ]

/* CSV 特定选项 */
[ RECORD_DELIMITER = '<character>' ]
[ FIELD_DELIMITER = '<character>' ]
[ SKIP_HEADER = <integer> ]
[ QUOTE = '<character>' ]
[ ESCAPE = '<character>' ]
[ NAN_DISPLAY = '<string>' ]
[ NULL_DISPLAY = '<string>' ]
[ ERROR_ON_COLUMN_COUNT_MISMATCH = TRUE | FALSE ]
[ EMPTY_FIELD_AS = null | string | field_default ]
[ BINARY_FORMAT = HEX | BASE64 ]

/* TSV 特定选项 */
[ RECORD_DELIMITER = '<character>' ]
[ FIELD_DELIMITER = '<character>' ]

/* NDJSON 特定选项 */
[ NULL_FIELD_AS = NULL | FIELD_DEFAULT ]
[ MISSING_FIELD_AS = ERROR | NULL | FIELD_DEFAULT ]
[ ALLOW_DUPLICATE_KEYS = TRUE | FALSE ]

/* PARQUET 特定选项 */
[ MISSING_FIELD_AS = ERROR | FIELD_DEFAULT ]

/* ORC 特定选项 */
[ MISSING_FIELD_AS = ERROR | FIELD_DEFAULT ]

/* AVRO 特定选项 */
[ MISSING_FIELD_AS = ERROR | FIELD_DEFAULT ]

copyOptions ::=
[ SIZE_LIMIT = <num> ]
[ PURGE = <bool> ]
[ FORCE = <bool> ]
[ DISABLE_VARIANT_CHECK = <bool> ]
[ ON_ERROR = { continue | abort | abort_N } ]
[ MAX_FILES = <num> ]
[ RETURN_FAILED_ONLY = <bool> ]
[ COLUMN_MATCH_MODE = { case-sensitive | case-insensitive } ]

备注

对于远程文件,您可使用 glob 模式指定多个文件。例如:

  • ontime_200{6,7,8}.csv 表示 ontime_2006.csvontime_2007.csvontime_2008.csv
  • ontime_200[6-8].csv 表示相同文件集

关键参数

文件格式选项

FILE_FORMAT 参数支持不同文件类型,每种类型有特定格式选项。各文件格式可用选项如下:

所有格式通用选项

选项描述可选值默认值
COMPRESSION数据文件压缩算法AUTO, GZIP, BZ2, BROTLI, ZSTD, DEFLATE, RAW_DEFLATE, XZ, NONEAUTO

TYPE = CSV

选项描述默认值
RECORD_DELIMITER记录分隔符换行符
FIELD_DELIMITER字段分隔符逗号 (,)
SKIP_HEADER跳过的标题行数0
QUOTE字段引用符双引号 (")
ESCAPE转义字符NONE
NAN_DISPLAYNaN 值表示字符串NaN
NULL_DISPLAYNULL 值表示字符串\N
ERROR_ON_COLUMN_COUNT_MISMATCH列数不匹配时报错TRUE
EMPTY_FIELD_AS空字段处理方式null
BINARY_FORMAT二进制数据编码格式HEX

TYPE = TSV

选项描述默认值
RECORD_DELIMITER记录分隔符换行符
FIELD_DELIMITER字段分隔符制表符 (\t)

TYPE = NDJSON

选项描述默认值
NULL_FIELD_ASnull 字段处理方式NULL
MISSING_FIELD_AS缺失字段处理方式ERROR
ALLOW_DUPLICATE_KEYS是否允许重复键FALSE

TYPE = PARQUET

选项描述默认值
MISSING_FIELD_AS缺失字段处理方式ERROR

TYPE = ORC

选项描述默认值
MISSING_FIELD_AS缺失字段处理方式ERROR

TYPE = AVRO

选项描述默认值
MISSING_FIELD_AS缺失字段处理方式ERROR

复制选项

参数描述默认值
SIZE_LIMIT最大加载行数0(无限制)
PURGE加载成功后清除源文件false
FORCE允许重新加载重复文件false(跳过重复文件)
DISABLE_VARIANT_CHECK无效 JSON 替换为 NULLfalse(遇到无效 JSON 报错)
ON_ERROR错误处理方式:continueabortabort_Nabort
MAX_FILES最大加载文件数(上限 15,000)-
RETURN_FAILED_ONLY仅返回加载失败的文件false
COLUMN_MATCH_MODEParquet 列名匹配模式case-insensitive
提示

导入海量数据(如日志)时,建议将 PURGEFORCE 同时设为 true。此配置可高效导入数据且无需与元服务器交互(更新已复制文件集),但需注意可能导致重复数据导入。

输出

COPY INTO 返回数据加载结果摘要,包含以下列:

类型是否可为空描述
FILEVARCHARNO源文件相对路径
ROWS_LOADEDINTNO从文件加载的行数
ERRORS_SEENINTNO文件中的错误行数
FIRST_ERRORVARCHARYES首个错误信息
FIRST_ERROR_LINEINTYES首个错误所在行号

RETURN_FAILED_ONLY 设为 true,输出仅包含加载失败的文件。

示例

示例 1:从暂存区加载

以下示例展示从各类暂存区(Stage)加载数据到 Databend:

COPY INTO mytable
FROM @~
PATTERN = '.*[.]parquet'
FILE_FORMAT = (TYPE = PARQUET);

示例 2:从外部位置加载

以下示例展示从各类外部源加载数据到 Databend:

此示例使用 AWS 访问密钥和秘密访问密钥连接 Amazon S3,从 CSV 文件加载 10 行数据:

-- 通过 AWS 访问密钥和秘密访问密钥认证
COPY INTO mytable
FROM 's3://mybucket/data.csv'
CONNECTION = (
ACCESS_KEY_ID = '<your-access-key-ID>',
SECRET_ACCESS_KEY = '<your-secret-access-key>'
)
FILE_FORMAT = (
TYPE = CSV,
FIELD_DELIMITER = ',',
RECORD_DELIMITER = '\n',
SKIP_HEADER = 1
)
SIZE_LIMIT = 10;

此示例通过 AWS IAM 角色和外部 ID 认证连接 Amazon S3,加载 'mybucket' 中匹配指定模式的 CSV 文件:

-- 通过 AWS IAM 角色和外部 ID 认证
COPY INTO mytable
FROM 's3://mybucket/'
CONNECTION = (
ENDPOINT_URL = 'https://<endpoint-URL>',
ROLE_ARN = 'arn:aws:iam::123456789012:role/my_iam_role',
EXTERNAL_ID = '123456'
)
PATTERN = '.*[.]csv'
FILE_FORMAT = (
TYPE = CSV,
FIELD_DELIMITER = ',',
RECORD_DELIMITER = '\n',
SKIP_HEADER = 1
);

示例 3:加载压缩数据

此示例将 Amazon S3 上的 GZIP 压缩 CSV 文件加载到 Databend:

COPY INTO mytable
FROM 's3://mybucket/data.csv.gz'
CONNECTION = (
ENDPOINT_URL = 'https://<endpoint-URL>',
ACCESS_KEY_ID = '<your-access-key-ID>',
SECRET_ACCESS_KEY = '<your-secret-access-key>'
)
FILE_FORMAT = (
TYPE = CSV,
FIELD_DELIMITER = ',',
RECORD_DELIMITER = '\n',
SKIP_HEADER = 1,
COMPRESSION = AUTO
);

示例 4:使用模式过滤文件

此示例演示如何使用 PATTERN 参数通过模式匹配从 Amazon S3 加载 CSV 文件,筛选文件名含 'sales' 且扩展名为 '.csv' 的文件:

COPY INTO mytable
FROM 's3://mybucket/'
PATTERN = '.*sales.*[.]csv'
FILE_FORMAT = (
TYPE = CSV,
FIELD_DELIMITER = ',',
RECORD_DELIMITER = '\n',
SKIP_HEADER = 1
);

其中 .* 表示任意字符的零次或多次出现,方括号转义文件扩展名前的点号 .

加载所有 CSV 文件:

COPY INTO mytable
FROM 's3://mybucket/'
PATTERN = '.*[.]csv'
FILE_FORMAT = (
TYPE = CSV,
FIELD_DELIMITER = ',',
RECORD_DELIMITER = '\n',
SKIP_HEADER = 1
);

指定含多级目录的文件路径时,需考虑匹配策略:

  • 若需匹配前缀后的特定子路径,在模式中包含前缀(如 'multi_page/')后指定子路径模式(如 '_page_1')
-- 文件路径:parquet/multi_page/multi_page_1.parquet
COPY INTO ... FROM @data/parquet/ PATTERN = 'multi_page/.*_page_1.*') ...
  • 若需匹配路径中任意位置的模式,在模式前后添加 .*(如 .*multi_page_1.*
-- 文件路径:parquet/multi_page/multi_page_1.parquet
COPY INTO ... FROM @data/parquet/ PATTERN ='.*multi_page_1.*') ...

示例 5:加载到含额外列的表

此示例使用示例文件 books.csv 演示如何将数据加载到含额外列的表中:

books.csv
Transaction Processing,Jim Gray,1992
Readings in Database Systems,Michael Stonebraker,2004

Alt text

默认情况下,COPY INTO 按文件字段顺序匹配表列加载数据。需确保文件数据与表结构对齐,例如:

CREATE TABLE books
(
title VARCHAR,
author VARCHAR,
date VARCHAR
);

COPY INTO books
FROM 'https://datafuse-1253727613.cos.ap-hongkong.myqcloud.com/data/books.csv'
FILE_FORMAT = (TYPE = CSV);

若表的列数多于文件,可指定目标加载列:

CREATE TABLE books_with_language
(
title VARCHAR,
language VARCHAR,
author VARCHAR,
date VARCHAR
);

COPY INTO books_with_language (title, author, date)
FROM 'https://datafuse-1253727613.cos.ap-hongkong.myqcloud.com/data/books.csv'
FILE_FORMAT = (TYPE = CSV);

若表的末尾存在额外列,可使用 FILE_FORMAT 选项 ERROR_ON_COLUMN_COUNT_MISMATCH 加载数据(当前仅支持 CSV 格式):

CREATE TABLE books_with_extra_columns
(
title VARCHAR,
author VARCHAR,
date VARCHAR,
language VARCHAR,
region VARCHAR
);

COPY INTO books_with_extra_columns
FROM 'https://datafuse-1253727613.cos.ap-hongkong.myqcloud.com/data/books.csv'
FILE_FORMAT = (TYPE = CSV, ERROR_ON_COLUMN_COUNT_MISMATCH = false);
备注

表内额外列可通过 CREATE TABLEALTER TABLE COLUMN 设置默认值。未显式设置时,将应用数据类型默认值(如整型列默认为 0)。

示例 6:使用自定义格式加载 JSON

此示例从内容如下的 CSV 文件 "data.csv" 加载数据:

1,"U00010","{\"carPriceList\":[{\"carTypeId":10,\"distance":5860},{\"carTypeId":11,\"distance\":5861}]}"
2,"U00011","{\"carPriceList\":[{\"carTypeId":12,\"distance":5862},{\"carTypeId":13,\"distance\":5863}]}"

每行含三列数据,第三列为包含 JSON 的字符串。因 JSON 含双引号,需设置反斜杠 \ 为转义字符以正确加载:

步骤 1:创建自定义文件格式

-- 创建转义符为反斜杠 \ 的自定义 CSV 格式
CREATE FILE FORMAT my_csv_format
TYPE = CSV
ESCAPE = '\\';

步骤 2:创建目标表

CREATE TABLE t
(
id INT,
seq VARCHAR,
p_detail VARCHAR
);

步骤 3:使用自定义格式加载

COPY INTO t FROM @t_stage FILES=('data.csv')
FILE_FORMAT=(FORMAT_NAME='my_csv_format');

示例 7:加载无效 JSON

向 Variant 列加载数据时,Databend 会自动校验 JSON 有效性并在无效时报错。例如用户暂存区中 invalid_json_string.parquet 文件含无效 JSON 数据时:

SELECT *
FROM @~/invalid_json_string.parquet;

┌────────────────────────────────────┐
│ a │ b │
├─────────────────┼──────────────────┤
5 │ {"k":"v"} │
6[1,
└────────────────────────────────────┘

DESC t2;

┌──────────────────────────────────────────────┐
│ Field │ TypeNullDefault │ Extra │
├────────┼─────────┼────────┼─────────┼────────┤
│ a │ VARCHAR │ YES │ NULL │ │
│ b │ VARIANT │ YES │ NULL │ │
└──────────────────────────────────────────────┘

加载数据时将报错:

root@localhost:8000/default>  COPY INTO t2 FROM @~/invalid_json_string.parquet FILE_FORMAT = (TYPE = PARQUET) ON_ERROR = CONTINUE;
error: APIError: ResponseError with 1006: EOF while parsing a value, pos 3 while evaluating function `parse_json('[1,')`

通过设置 DISABLE_VARIANT_CHECK = true 可跳过 JSON 有效性检查:

COPY INTO t2 FROM @~/invalid_json_string.parquet
FILE_FORMAT = (TYPE = PARQUET)
DISABLE_VARIANT_CHECK = true
ON_ERROR = CONTINUE;

┌───────────────────────────────────────────────────────────────────────────────────────────────┐
File │ Rows_loaded │ Errors_seen │ First_error │ First_error_line │
├─────────────────────────────┼─────────────┼─────────────┼──────────────────┼──────────────────┤
│ invalid_json_string.parquet │ 20NULLNULL
└───────────────────────────────────────────────────────────────────────────────────────────────┘

SELECT * FROM t2;
-- 无效 JSON 在 Variant 列中存储为 NULL
┌──────────────────────────────────────┐
│ a │ b │
├──────────────────┼───────────────────┤
5 │ {"k":"v"} │
6null
└──────────────────────────────────────┘