[Groonga-commit] groonga/grnci at e13504c [master] Update v2.

Zurück zum Archiv-Index

Susumu Yata null+****@clear*****
Wed Jun 28 16:46:30 JST 2017


Susumu Yata	2017-06-28 16:46:30 +0900 (Wed, 28 Jun 2017)

  New Revision: e13504ce76635272a3aedfbf28db78af2459fad6
  https://github.com/groonga/grnci/commit/e13504ce76635272a3aedfbf28db78af2459fad6

  Message:
    Update v2.

  Added files:
    v2/json.go
    v2/json_test.go
  Modified files:
    v2/command.go
    v2/db.go
    v2/db_test.go
    v2/error.go
    v2/error_test.go
    v2/response.go
    v2/type.go

  Modified: v2/command.go (+120 -81)
===================================================================
--- v2/command.go    2017-06-20 23:55:39 +0900 (ad68a19)
+++ v2/command.go    2017-06-28 16:46:30 +0900 (1b21bad)
@@ -4,8 +4,8 @@ import (
 	"io"
 	"reflect"
 	"sort"
-	"strconv"
 	"strings"
+	"time"
 )
 
 // formatParamValue is a function to format a parameter value.
@@ -13,61 +13,46 @@ type formatParamValue func(value interface{}) (string, error)
 
 // formatParamValueDefault is the default formatParamValue.
 var formatParamValueDefault = func(value interface{}) (string, error) {
-	switch v := value.(type) {
-	case bool:
-		return strconv.FormatBool(v), nil
-	case int:
-		return strconv.FormatInt(int64(v), 10), nil
-	case int8:
-		return strconv.FormatInt(int64(v), 10), nil
-	case int16:
-		return strconv.FormatInt(int64(v), 10), nil
-	case int32:
-		return strconv.FormatInt(int64(v), 10), nil
-	case int64:
-		return strconv.FormatInt(v, 10), nil
-	case uint:
-		return strconv.FormatUint(uint64(v), 10), nil
-	case uint8:
-		return strconv.FormatUint(uint64(v), 10), nil
-	case uint16:
-		return strconv.FormatUint(uint64(v), 10), nil
-	case uint32:
-		return strconv.FormatUint(uint64(v), 10), nil
-	case uint64:
-		return strconv.FormatUint(v, 10), nil
-	case float32:
-		return strconv.FormatFloat(float64(v), 'f', -1, 32), nil
-	case float64:
-		return strconv.FormatFloat(v, 'f', -1, 64), nil
-	case string:
-		return v, nil
-	// TODO: case time.Time:
-	// TODO: case Geo:
-	default:
-		return "", NewError(InvalidCommand, map[string]interface{}{
-			"value": value,
-			"type":  reflect.TypeOf(value).Name(),
-			"error": "The type is not supported.",
-		})
-	}
-}
-
-// yesNo returns yes or no.
-func yesNo(value bool) string {
-	if value {
-		return "yes"
+	v := reflect.ValueOf(value)
+	switch v.Kind() {
+	case reflect.Bool:
+		return formatBool(v.Bool()), nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return formatInt(v.Int()), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return formatUint(v.Uint()), nil
+	case reflect.Float32:
+		return formatFloat(v.Float(), 32), nil
+	case reflect.Float64:
+		return formatFloat(v.Float(), 64), nil
+	case reflect.String:
+		return formatString(v.String()), nil
+	case reflect.Struct:
+		switch v := value.(type) {
+		case time.Time:
+			return formatTime(v), nil
+		case Geo:
+			return formatGeo(v), nil
+		}
 	}
-	return "no"
+	return "", NewError(InvalidCommand, map[string]interface{}{
+		"value": value,
+		"type":  reflect.TypeOf(value).Name(),
+		"error": "The type is not supported.",
+	})
 }
 
-// formatParamValueYesNo formats an yes/no value.
+// formatParamValueYesNo formats an 3/no value.
 func formatParamValueYesNo(value interface{}) (string, error) {
-	switch v := value.(type) {
-	case bool:
-		return yesNo(v), nil
-	case string:
-		switch v {
+	v := reflect.ValueOf(value)
+	switch v.Kind() {
+	case reflect.Bool:
+		if v.Bool() {
+			return "yes", nil
+		}
+		return "no", nil
+	case reflect.String:
+		switch v := v.String(); v {
 		case "yes", "no":
 			return v, nil
 		default:
@@ -87,34 +72,83 @@ func formatParamValueYesNo(value interface{}) (string, error) {
 
 // formatParamValueCSV formats comma-separated values.
 func formatParamValueCSV(value interface{}) (string, error) {
-	switch v := value.(type) {
-	case string:
-		return v, nil
-	case []string:
-		return strings.Join(v, ","), nil
-	default:
-		return "", NewError(InvalidCommand, map[string]interface{}{
-			"value": value,
-			"type":  reflect.TypeOf(value).Name(),
-			"error": "The type is not supported.",
-		})
+	v := reflect.ValueOf(value)
+	switch v.Kind() {
+	case reflect.String:
+		return formatString(v.String()), nil
+	case reflect.Array, reflect.Slice:
+		if v.Type().Elem().Kind() != reflect.String {
+			break
+		}
+		var buf []byte
+		n := v.Len()
+		for i := 0; i < n; i++ {
+			if i != 0 {
+				buf = append(buf, ',')
+			}
+			buf = append(buf, formatString(v.Index(i).String())...)
+		}
+		return string(buf), nil
 	}
+	return "", NewError(InvalidCommand, map[string]interface{}{
+		"value": value,
+		"type":  reflect.TypeOf(value).Name(),
+		"error": "The type is not supported.",
+	})
 }
 
 // formatParamValueFlags formats pipe-separated values.
 func formatParamValueFlags(value interface{}) (string, error) {
-	switch v := value.(type) {
-	case string:
-		return v, nil
-	case []string:
-		return strings.Join(v, "|"), nil
-	default:
-		return "", NewError(InvalidCommand, map[string]interface{}{
-			"value": value,
-			"type":  reflect.TypeOf(value).Name(),
-			"error": "The type is not supported.",
-		})
+	v := reflect.ValueOf(value)
+	switch v.Kind() {
+	case reflect.String:
+		return formatString(v.String()), nil
+	case reflect.Array, reflect.Slice:
+		if v.Type().Elem().Kind() != reflect.String {
+			break
+		}
+		var buf []byte
+		n := v.Len()
+		for i := 0; i < n; i++ {
+			if i != 0 {
+				buf = append(buf, '|')
+			}
+			buf = append(buf, formatString(v.Index(i).String())...)
+		}
+		return string(buf), nil
 	}
+	return "", NewError(InvalidCommand, map[string]interface{}{
+		"value": value,
+		"type":  reflect.TypeOf(value).Name(),
+		"error": "The type is not supported.",
+	})
+}
+
+// formatParamValueMatchColumns formats pipe-separated values.
+func formatParamValueMatchColumns(value interface{}) (string, error) {
+	v := reflect.ValueOf(value)
+	switch v.Kind() {
+	case reflect.String:
+		return formatString(v.String()), nil
+	case reflect.Array, reflect.Slice:
+		if v.Type().Elem().Kind() != reflect.String {
+			break
+		}
+		var buf []byte
+		n := v.Len()
+		for i := 0; i < n; i++ {
+			if i != 0 {
+				buf = append(buf, "||"...)
+			}
+			buf = append(buf, formatString(v.Index(i).String())...)
+		}
+		return string(buf), nil
+	}
+	return "", NewError(InvalidCommand, map[string]interface{}{
+		"value": value,
+		"type":  reflect.TypeOf(value).Name(),
+		"error": "The type is not supported.",
+	})
 }
 
 // formatParamValueBorder formats an include/exclude value.
@@ -144,6 +178,11 @@ func formatParamValueBorder(value interface{}) (string, error) {
 	}
 }
 
+// formatParamValueJSON returns the JSON-encoded value.
+func formatParamValueJSON(value interface{}) (string, error) {
+	return string(jsonAppendValue(nil, reflect.ValueOf(value))), nil
+}
+
 type paramFormat struct {
 	key      string           // Parameter key
 	format   formatParamValue // Custom function to format a parameter value.
@@ -328,7 +367,7 @@ var commandFormats = map[string]*commandFormat{
 		nil,
 		newParamFormat("name", nil, true),
 		newParamFormat("table", nil, true),
-		newParamFormat("match_columns", formatParamValueCSV, false),
+		newParamFormat("match_columns", formatParamValueMatchColumns, false),
 		newParamFormat("query", nil, false),
 		newParamFormat("filter", nil, false),
 		newParamFormat("scorer", nil, false),
@@ -350,7 +389,7 @@ var commandFormats = map[string]*commandFormat{
 	"delete": newCommandFormat(
 		nil,
 		newParamFormat("table", nil, true),
-		newParamFormat("key", nil, false),
+		newParamFormat("key", formatParamValueJSON, false),
 		newParamFormat("id", nil, false),
 		newParamFormat("filter", nil, false),
 	),
@@ -435,7 +474,7 @@ var commandFormats = map[string]*commandFormat{
 		newParamFormat("max", nil, false),
 		newParamFormat("max_border", formatParamValueBorder, false),
 		newParamFormat("filter", nil, false),
-		newParamFormat("sortby", nil, false),
+		newParamFormat("sortby", formatParamValueCSV, false),
 		newParamFormat("output_columns", formatParamValueCSV, false),
 		newParamFormat("offset", nil, false),
 		newParamFormat("limit", nil, false),
@@ -446,9 +485,9 @@ var commandFormats = map[string]*commandFormat{
 		newParamFormat("drilldown_limit", nil, false),
 		newParamFormat("drilldown_calc_types", formatParamValueCSV, false),
 		newParamFormat("drilldown_calc_target", nil, false),
-		newParamFormat("sort_keys", nil, false),
+		newParamFormat("sort_keys", formatParamValueCSV, false),
 		newParamFormat("drilldown_sort_keys", formatParamValueCSV, false),
-		newParamFormat("match_columns", formatParamValueCSV, false),
+		newParamFormat("match_columns", formatParamValueMatchColumns, false),
 		newParamFormat("query", nil, false),
 		newParamFormat("drilldown_filter", nil, false),
 	),
@@ -523,11 +562,11 @@ var commandFormats = map[string]*commandFormat{
 	"select": newCommandFormat(
 		formatParamSelect,
 		newParamFormat("table", nil, true),
-		newParamFormat("match_columns", formatParamValueCSV, false),
+		newParamFormat("match_columns", formatParamValueMatchColumns, false),
 		newParamFormat("query", nil, false),
 		newParamFormat("filter", nil, false),
 		newParamFormat("scorer", nil, false),
-		newParamFormat("sortby", nil, false),
+		newParamFormat("sortby", formatParamValueCSV, false),
 		newParamFormat("output_columns", formatParamValueCSV, false),
 		newParamFormat("offset", nil, false),
 		newParamFormat("limit", nil, false),

  Modified: v2/db.go (+753 -67)
===================================================================
--- v2/db.go    2017-06-20 23:55:39 +0900 (ba8ae4a)
+++ v2/db.go    2017-06-28 16:46:30 +0900 (bbd69e4)
@@ -6,7 +6,6 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"reflect"
 	"strings"
 	"time"
@@ -400,8 +399,8 @@ func NewDBDumpOptions() *DBDumpOptions {
 }
 
 // Dump executes dump.
-// On success, it is the caller's responsibility to close the response.
-func (db *DB) Dump(options *DBDumpOptions) (Response, error) {
+// On success, it is the caller's responsibility to close the result.
+func (db *DB) Dump(options *DBDumpOptions) (io.ReadCloser, Response, error) {
 	if options == nil {
 		options = NewDBDumpOptions()
 	}
@@ -414,7 +413,44 @@ func (db *DB) Dump(options *DBDumpOptions) (Response, error) {
 	if options.Tables != "" {
 		params["tables"] = options.Tables
 	}
-	return db.Invoke("dump", params, nil)
+	resp, err := db.Invoke("dump", params, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	return resp, resp, err
+}
+
+// DBIOFlushOptions stores options for DB.IOFlush.
+type DBIOFlushOptions struct {
+	TargetName string // --target_name
+	Recursive  bool   // --recursive
+	OnlyOpened bool   // --only_opened
+}
+
+// NewDBIOFlushOptions returns the default DBIOFlushOptions.
+func NewDBIOFlushOptions() *DBIOFlushOptions {
+	return &DBIOFlushOptions{
+		Recursive: true,
+	}
+}
+
+// IOFlush executes io_flush.
+func (db *DB) IOFlush(options *DBIOFlushOptions) (bool, Response, error) {
+	if options == nil {
+		options = NewDBIOFlushOptions()
+	}
+	params := map[string]interface{}{
+		"recursive":   options.Recursive,
+		"only_opened": options.OnlyOpened,
+	}
+	if options.TargetName != "" {
+		params["target_name"] = options.TargetName
+	}
+	resp, err := db.Invoke("io_flush", params, nil)
+	if err != nil {
+		return false, nil, err
+	}
+	return db.recvBool(resp)
 }
 
 // DBLoadOptions stores options for DB.Load.
@@ -450,30 +486,29 @@ func (db *DB) Load(tbl string, values io.Reader, options *DBLoadOptions) (int, R
 	return db.recvInt(resp)
 }
 
-// encodeRow encodes a row.
-func (db *DB) encodeRow(body []byte, row reflect.Value, fis []*StructFieldInfo) []byte {
+// appendRow appends the JSON-encoded row to buf nad returns the exetended buffer.
+func (db *DB) appendRow(body []byte, row reflect.Value, cfs []*columnField) []byte {
 	body = append(body, '[')
-	for i, fi := range fis {
+	for i, fi := range cfs {
 		if i != 0 {
 			body = append(body, ',')
 		}
-		body = encodeValue(body, row.Field(fi.Index))
+		body = jsonAppendValue(body, row.Field(fi.Index))
 	}
 	body = append(body, ']')
 	return body
 }
 
-// encodeRows encodes rows.
-func (db *DB) encodeRows(body []byte, rows reflect.Value, fis []*StructFieldInfo) []byte {
+// appendRows appends the JSON-encoded rows to buf nad returns the exetended buffer.
+func (db *DB) appendRows(body []byte, rows reflect.Value, cfs []*columnField) []byte {
 	n := rows.Len()
 	for i := 0; i < n; i++ {
 		if i != 0 {
 			body = append(body, ',')
 		}
 		row := rows.Index(i)
-		body = db.encodeRow(body, row, fis)
+		body = db.appendRow(body, row, cfs)
 	}
-	log.Printf("body = %s", body)
 	return body
 }
 
@@ -482,26 +517,28 @@ func (db *DB) LoadRows(tbl string, rows interface{}, options *DBLoadOptions) (in
 	if options == nil {
 		options = NewDBLoadOptions()
 	}
-	si, err := GetStructInfo(rows)
+	rs, err := getRowStruct(rows)
 	if err != nil {
 		return 0, nil, err
 	}
-	var fis []*StructFieldInfo
+	var cfs []*columnField
 	if options.Columns == nil {
-		fis = si.Fields
-		for _, fi := range fis {
-			options.Columns = append(options.Columns, fi.ColumnName)
+		for _, cf := range rs.Columns {
+			if cf.Loadable {
+				options.Columns = append(options.Columns, cf.Name)
+				cfs = append(cfs, cf)
+			}
 		}
 	} else {
 		for _, col := range options.Columns {
-			fi, ok := si.FieldsByColumnName[col]
+			cf, ok := rs.ColumnsByName[col]
 			if !ok {
 				return 0, nil, NewError(InvalidCommand, map[string]interface{}{
 					"column": col,
-					"error":  "The column has no assciated field.",
+					"error":  "The column has no associated field.",
 				})
 			}
-			fis = append(fis, fi)
+			cfs = append(cfs, cf)
 		}
 	}
 
@@ -522,11 +559,11 @@ func (db *DB) LoadRows(tbl string, rows interface{}, options *DBLoadOptions) (in
 				"error": "The type is not supported.",
 			})
 		}
-		body = db.encodeRow(body, v, fis)
+		body = db.appendRow(body, v, cfs)
 	case reflect.Array, reflect.Slice:
-		body = db.encodeRows(body, v, fis)
+		body = db.appendRows(body, v, cfs)
 	case reflect.Struct:
-		body = db.encodeRow(body, v, fis)
+		body = db.appendRow(body, v, cfs)
 	default:
 		return 0, nil, NewError(InvalidCommand, map[string]interface{}{
 			"type":  reflect.TypeOf(rows).Name(),
@@ -614,6 +651,308 @@ func (db *DB) LogReopen() (bool, Response, error) {
 	return db.recvBool(resp)
 }
 
+// DBLogicalCountOptions stores options for DB.LogicalCount.
+type DBLogicalCountOptions struct {
+	Min       time.Time //--min
+	MinBorder bool      // --min_border
+	Max       time.Time // --max
+	MaxBorder bool      // --max_border
+	Filter    string    // --filter
+}
+
+// NewDBLogicalCountOptions returns the default DBLogicalCountOptions.
+func NewDBLogicalCountOptions() *DBLogicalCountOptions {
+	return &DBLogicalCountOptions{
+		MinBorder: true,
+		MaxBorder: true,
+	}
+}
+
+// LogicalCount executes logical_count.
+func (db *DB) LogicalCount(logicalTable, shardKey string, options *DBLogicalCountOptions) (int, Response, error) {
+	params := map[string]interface{}{
+		"logical_table": logicalTable,
+		"shard_key":     shardKey,
+	}
+	if options == nil {
+		options = NewDBLogicalCountOptions()
+	}
+	if !options.Min.IsZero() {
+		params["min"] = options.Min
+	}
+	params["min_border"] = options.MinBorder
+	if !options.Max.IsZero() {
+		params["max"] = options.Max
+	}
+	params["max_border"] = options.MaxBorder
+	if options.Filter != "" {
+		params["filter"] = options.Filter
+	}
+	resp, err := db.Invoke("logical_count", params, nil)
+	if err != nil {
+		return 0, nil, err
+	}
+	return db.recvInt(resp)
+}
+
+// DBLogicalParameters is a result of logical_parameters.
+type DBLogicalParameters struct {
+	RangeIndex string `json:"range_index"`
+}
+
+// LogicalParameters executes logical_parameters.
+func (db *DB) LogicalParameters(rangeIndex string) (*DBLogicalParameters, Response, error) {
+	var params map[string]interface{}
+	if rangeIndex != "" {
+		params = map[string]interface{}{
+			"range_index": rangeIndex,
+		}
+	}
+	resp, err := db.Invoke("logical_parameters", params, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return nil, resp, err
+	}
+	var result DBLogicalParameters
+	if err := json.Unmarshal(jsonData, &result); err != nil {
+		return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+			"method": "json.Unmarshal",
+			"error":  err.Error(),
+		})
+	}
+	return &result, resp, nil
+}
+
+// LogicalRangeFilter executes logical_range_filter.
+func (db *DB) LogicalRangeFilter() (bool, Response, error) {
+	// TODO
+	return false, nil, nil
+}
+
+// DBLogicalSelectOptions stores options for DB.LogicalSelect.
+// http://groonga.org/docs/reference/commands/logical_select.html
+type DBLogicalSelectOptions struct {
+	Min                    time.Time //--min
+	MinBorder              bool      // --min_border
+	Max                    time.Time // --max
+	MaxBorder              bool      // --max_border
+	Filter                 string    // --filter
+	SortKeys               []string  // --sort_keys
+	OutputColumns          []string  // --output_columns
+	Offset                 int       // --offset
+	Limit                  int       // --limit
+	Drilldown              []string  // --drilldown
+	DrilldownSortKeys      []string  // --drilldown_sort_keys
+	DrilldownOutputColumns []string  // --drilldown_output_columns
+	DrilldownOffset        int       // --drilldown_offset
+	DrilldownLimit         int       // --drilldown_limit
+	DrilldownCalcTypes     []string  // --drilldown_calc_types
+	DrilldownCalcTarget    string    // --drilldown_calc_target
+	MatchColumns           []string  // --match_columns
+	Query                  string    // --query
+	DrilldownFilter        string    // --drilldown_filter
+	Columns                map[string]*DBSelectOptionsColumn
+	Drilldowns             map[string]*DBSelectOptionsDrilldown
+}
+
+// NewDBLogicalSelectOptions returns the default DBLogicalSelectOptions.
+func NewDBLogicalSelectOptions() *DBLogicalSelectOptions {
+	return &DBLogicalSelectOptions{
+		Limit:          10,
+		DrilldownLimit: 10,
+	}
+}
+
+// LogicalSelect executes logical_select.
+func (db *DB) LogicalSelect(logicalTable, shardKey string, options *DBLogicalSelectOptions) (io.ReadCloser, Response, error) {
+	if options == nil {
+		options = NewDBLogicalSelectOptions()
+	}
+	params := map[string]interface{}{
+		"command_version": 2,
+		"logical_table":   logicalTable,
+		"shard_key":       shardKey,
+	}
+	if options.MatchColumns != nil {
+		params["match_columns"] = options.MatchColumns
+	}
+	if options.Query != "" {
+		params["query"] = options.Query
+	}
+	if options.Filter != "" {
+		params["filter"] = options.Filter
+	}
+	if options.SortKeys != nil {
+		params["sort_keys"] = options.SortKeys
+	}
+	if options.OutputColumns != nil {
+		params["output_columns"] = options.OutputColumns
+	}
+	if options.OutputColumns != nil {
+		params["query"] = options.Query
+	}
+	if options.Offset != 0 {
+		params["offset"] = options.Offset
+	}
+	if options.Limit != 10 {
+		params["limit"] = options.Limit
+	}
+	if options.Drilldown != nil {
+		params["drilldown"] = options.Drilldown
+	}
+	if options.DrilldownSortKeys != nil {
+		params["drilldown_sort_keys"] = options.DrilldownSortKeys
+	}
+	if options.DrilldownOutputColumns != nil {
+		params["drilldown_output_columns"] = options.DrilldownOutputColumns
+	}
+	if options.DrilldownOffset != 0 {
+		params["drilldown_offset"] = options.DrilldownOffset
+	}
+	if options.DrilldownLimit != 10 {
+		params["drilldown_limit"] = options.DrilldownLimit
+	}
+	if options.DrilldownCalcTypes != nil {
+		params["drilldown_calc_types"] = options.DrilldownCalcTypes
+	}
+	if options.DrilldownCalcTarget != "" {
+		params["drilldown_calc_target"] = options.DrilldownCalcTarget
+	}
+	if options.DrilldownFilter != "" {
+		params["drilldown_filter"] = options.DrilldownFilter
+	}
+	for name, col := range options.Columns {
+		col.setParams("--columns["+name+"]", params)
+	}
+	for label, drilldown := range options.Drilldowns {
+		drilldown.setParams("--drilldowns["+label+"]", params)
+	}
+	resp, err := db.Invoke("logical_select", params, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	return resp, resp, err
+}
+
+// LogicalSelectRows executes logical_select.
+func (db *DB) LogicalSelectRows(logicalTable, shardKey string, rows interface{}, options *DBLogicalSelectOptions) (int, Response, error) {
+	if options == nil {
+		options = NewDBLogicalSelectOptions()
+	}
+	rs, err := getRowStruct(rows)
+	if err != nil {
+		return 0, nil, err
+	}
+	var cfs []*columnField
+	if options.OutputColumns == nil {
+		cfs = rs.Columns
+		for _, cf := range cfs {
+			options.OutputColumns = append(options.OutputColumns, cf.Name)
+		}
+	} else {
+		for _, col := range options.OutputColumns {
+			cf, ok := rs.ColumnsByName[col]
+			if !ok {
+				return 0, nil, NewError(InvalidCommand, map[string]interface{}{
+					"column": col,
+					"error":  "The column has no associated field.",
+				})
+			}
+			cfs = append(cfs, cf)
+		}
+	}
+	result, resp, err := db.LogicalSelect(logicalTable, shardKey, options)
+	if err != nil {
+		return 0, nil, err
+	}
+	defer result.Close()
+	data, err := ioutil.ReadAll(result)
+	if err != nil {
+		return 0, resp, err
+	}
+	if resp.Err() != nil {
+		return 0, resp, err
+	}
+	n, err := db.parseRows(rows, data, cfs)
+	return n, resp, err
+}
+
+// DBLogicalShard is a result of logical_shard_list.
+type DBLogicalShard struct {
+	Name string `json:"name"`
+}
+
+// LogicalShardList executes logical_shard_list.
+func (db *DB) LogicalShardList(logicalTable string) ([]DBLogicalShard, Response, error) {
+	resp, err := db.Invoke("logical_shard_list", map[string]interface{}{
+		"logical_table": logicalTable,
+	}, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return nil, resp, err
+	}
+	var result []DBLogicalShard
+	if err := json.Unmarshal(jsonData, &result); err != nil {
+		return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+			"method": "json.Unmarshal",
+			"error":  err.Error(),
+		})
+	}
+	return result, resp, nil
+}
+
+// DBLogicalTableRemoveOptions stores options for DB.LogicalTableRemove.
+type DBLogicalTableRemoveOptions struct {
+	Min       time.Time //--min
+	MinBorder bool      // --min_border
+	Max       time.Time // --max
+	MaxBorder bool      // --max_border
+	Dependent bool      // --dependent
+	Force     bool      // --force
+}
+
+// NewDBLogicalTableRemoveOptions returns the default DBLogicalTableRemoveOptions.
+func NewDBLogicalTableRemoveOptions() *DBLogicalTableRemoveOptions {
+	return &DBLogicalTableRemoveOptions{
+		MinBorder: true,
+		MaxBorder: true,
+	}
+}
+
+// LogicalTableRemove executes logical_table_remove.
+func (db *DB) LogicalTableRemove(logicalTable, shardKey string, options *DBLogicalTableRemoveOptions) (bool, Response, error) {
+	params := map[string]interface{}{
+		"logical_table": logicalTable,
+		"shard_key":     shardKey,
+	}
+	if options == nil {
+		options = NewDBLogicalTableRemoveOptions()
+	}
+	if !options.Min.IsZero() {
+		params["min"] = options.Min
+	}
+	params["min_border"] = options.MinBorder
+	if !options.Max.IsZero() {
+		params["max"] = options.Max
+	}
+	params["max_border"] = options.MaxBorder
+	params["dependent"] = options.Dependent
+	params["force"] = options.Force
+	resp, err := db.Invoke("logical_table_remove", params, nil)
+	if err != nil {
+		return false, nil, err
+	}
+	return db.recvBool(resp)
+}
+
 // DBNormalizedText is a result of normalize.
 type DBNormalizedText struct {
 	Normalized string   `json:"normalized"`
@@ -686,6 +1025,218 @@ func (db *DB) ObjectExist(name string) (bool, Response, error) {
 	return db.recvBool(resp)
 }
 
+// DBObjectIDName is a part of DBObject*.
+type DBObjectIDName struct {
+	ID   int    `json:"id"`
+	Name string `json:"name"`
+}
+
+// DBObjectType is a part of DBObject*.
+type DBObjectType struct {
+	ID   int            `json:"id"`
+	Name string         `json:"name"`
+	Type DBObjectIDName `json:"type"`
+	Size int            `json:"size"`
+}
+
+// DBObjectColumnType is a part of DBObjectColumn.
+type DBObjectColumnType struct {
+	Name string         `json:"name"`
+	Raw  DBObjectIDName `json:"raw"`
+}
+
+// DBObjectColumnStatistics is a part of DBObjectColumn.
+type DBObjectColumnStatistics struct {
+	MaxSectionID              int   `json:"max_section_id"`
+	NGarbageSegments          int   `json:"n_garbage_segments"`
+	MaxArraySegmentID         int   `json:"max_array_segment_id"`
+	NArraySegments            int   `json:"n_array_segments"`
+	MaxBufferSegmentID        int   `json:"max_buffer_segment_id"`
+	NBufferSegments           int   `json:"n_buffer_segments"`
+	MaxInUsePhysicalSegmentID int   `json:"max_in_use_physical_segment_id"`
+	NUnmanagedSegments        int   `json:"n_unmanaged_segments"`
+	TotalChunkSize            int   `json:"total_chunk_size"`
+	MaxInUseChunkID           int   `json:"max_in_use_chunk_id"`
+	NGarbageChunks            []int `json:"n_garbage_chunks"`
+}
+
+// DBObjectColumnValue is a part of DBObjectColumn.
+type DBObjectColumnValue struct {
+	Type       DBObjectType             `json:"type"`
+	Section    bool                     `json:"section"`
+	Weight     bool                     `json:"weight"`
+	Position   bool                     `json:"position"`
+	Size       int                      `json:"size"`
+	Statistics DBObjectColumnStatistics `json:"statistics"`
+}
+
+// DBObjectColumnSource is a par of DBObjectColumn.
+type DBObjectColumnSource struct {
+	ID       int           `json:"id"`
+	Name     string        `json:"name"`
+	Table    DBObjectTable `json:"table"`
+	FullName string        `json:"full_name"`
+}
+
+// DBObjectColumn is a result of object_inspect.
+type DBObjectColumn struct {
+	ID       int                    `json:"id"`
+	Name     string                 `json:"name"`
+	Table    DBObjectTable          `json:"table"`
+	FullName string                 `json:"full_name"`
+	Type     DBObjectColumnType     `json:"type"`
+	Value    DBObjectColumnValue    `json:"value"`
+	Sources  []DBObjectColumnSource `json:"sources"`
+}
+
+// DBObjectKey is a part of DBObjectTable.
+type DBObjectKey struct {
+	Type         string `json:"type"`
+	TotalSize    int    `json:"total_size"`
+	MaxTotalSize int    `json:"max_total_size"`
+}
+
+// DBObjectValue is a part of DBObjectTable.
+type DBObjectValue struct {
+	Type DBObjectType `json:"type"`
+}
+
+// DBObjectTable stores a result of object_inspect.
+type DBObjectTable struct {
+	ID       int            `json:"id"`
+	Name     string         `json:"name"`
+	Type     DBObjectIDName `json:"type"`
+	Key      DBObjectKey    `json:"key"`
+	Value    DBObjectValue  `json:"value"`
+	NRecords int            `json:"n_records"`
+}
+
+// DBObjectDatabase stores a result of object_inspect.
+type DBObjectDatabase struct {
+	Type      DBObjectIDName `json:"type"`
+	NameTable DBObjectTable  `json:"name_table"`
+}
+
+// ObjectInspect executes object_inspect.
+func (db *DB) ObjectInspect(name string) (interface{}, Response, error) {
+	var params map[string]interface{}
+	if name != "" {
+		params = map[string]interface{}{
+			"name": name,
+		}
+	}
+	resp, err := db.Invoke("object_inspect", params, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return nil, resp, err
+	}
+	switch {
+	case name == "": // Database
+		var result DBObjectDatabase
+		if err := json.Unmarshal(jsonData, &result); err != nil {
+			return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+				"method": "json.Unmarshal",
+				"error":  err.Error(),
+			})
+		}
+		return &result, resp, nil
+	case strings.Contains(name, "."): // Column
+		var result DBObjectColumn
+		if err := json.Unmarshal(jsonData, &result); err != nil {
+			return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+				"method": "json.Unmarshal",
+				"error":  err.Error(),
+			})
+		}
+		return &result, resp, nil
+	default: // Table of type
+		type SizeNRecords struct {
+			Size     *int `json:"size"`
+			NRecords *int `json:"n_records"`
+		}
+		var sizeNRecords SizeNRecords
+		if err := json.Unmarshal(jsonData, &sizeNRecords); err != nil {
+			return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+				"method": "json.Unmarshal",
+				"error":  err.Error(),
+			})
+		}
+		switch {
+		case sizeNRecords.Size != nil:
+			var result DBObjectType
+			if err := json.Unmarshal(jsonData, &result); err != nil {
+				return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+					"method": "json.Unmarshal",
+					"error":  err.Error(),
+				})
+			}
+			return &result, resp, nil
+		case sizeNRecords.NRecords != nil:
+			var result DBObjectTable
+			if err := json.Unmarshal(jsonData, &result); err != nil {
+				return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+					"method": "json.Unmarshal",
+					"error":  err.Error(),
+				})
+			}
+			return &result, resp, nil
+		default:
+			return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+				"command": "object_inspect",
+				"error":   "The response format is not invalid.",
+			})
+		}
+	}
+}
+
+// DBObjectFlags is a part of DBObject.
+type DBObjectFlags struct {
+	Names string `json:"names"`
+	Value int    `json:"value"`
+}
+
+// DBObject stores options for DB.ObjectList.
+type DBObject struct {
+	ID           int              `json:"id"`
+	Name         string           `json:"name"`
+	Opened       bool             `json:"opened"`
+	ValueSize    int              `json:"value_size"`
+	NElements    int              `json:"n_elements"`
+	Type         DBObjectIDName   `json:"type"`
+	Flags        DBObjectFlags    `json:"flags"`
+	Path         string           `json:"path"`
+	Size         int              `json:"size"`
+	PluginID     int              `json:"plugin_id"`
+	Range        *DBObjectIDName  `json:"range"`
+	TokenFilters []DBObjectIDName `json:"token_filters"`
+	Sources      []DBObjectIDName `json:"sources"`
+}
+
+// ObjectList executes object_list.
+func (db *DB) ObjectList() (map[string]*DBObject, Response, error) {
+	resp, err := db.Invoke("object_list", nil, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return nil, resp, err
+	}
+	var result map[string]*DBObject
+	if err := json.Unmarshal(jsonData, &result); err != nil {
+		return nil, resp, NewError(InvalidResponse, map[string]interface{}{
+			"method": "json.Unmarshal",
+			"error":  err.Error(),
+		})
+	}
+	return result, resp, nil
+}
+
 // ObjectRemove executes object_remove.
 func (db *DB) ObjectRemove(name string, force bool) (bool, Response, error) {
 	resp, err := db.Invoke("object_remove", map[string]interface{}{
@@ -720,6 +1271,15 @@ func (db *DB) PluginUnregister(name string) (bool, Response, error) {
 	return db.recvBool(resp)
 }
 
+// Quit executes quit.
+func (db *DB) Quit() (bool, Response, error) {
+	resp, err := db.Invoke("quit", nil, nil)
+	if err != nil {
+		return false, nil, err
+	}
+	return db.recvBool(resp)
+}
+
 // Reindex executes reindex.
 func (db *DB) Reindex(target string) (bool, Response, error) {
 	var params map[string]interface{}
@@ -735,6 +1295,85 @@ func (db *DB) Reindex(target string) (bool, Response, error) {
 	return db.recvBool(resp)
 }
 
+// RequestCancel executes request_cancel.
+func (db *DB) RequestCancel(id int) (bool, Response, error) {
+	resp, err := db.Invoke("request_cancel", map[string]interface{}{
+		"id": id,
+	}, nil)
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return false, resp, err
+	}
+	type Result struct {
+		ID       int  `json:"id"`
+		Canceled bool `json:"canceled"`
+	}
+	var result Result
+	if err := json.Unmarshal(jsonData, &result); err != nil {
+		return false, resp, NewError(InvalidResponse, map[string]interface{}{
+			"method": "json.Unmarshal",
+			"error":  err.Error(),
+		})
+	}
+	return result.Canceled, resp, nil
+}
+
+// RubyEval executes ruby_eval.
+func (db *DB) RubyEval(script string) (interface{}, Response, error) {
+	resp, err := db.Invoke("ruby_eval", map[string]interface{}{
+		"script": script,
+	}, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return nil, resp, err
+	}
+	type Result struct {
+		Value interface{} `json:"vlaue"`
+	}
+	var result Result
+	if err := json.Unmarshal(jsonData, &result); err != nil {
+		return false, resp, NewError(InvalidResponse, map[string]interface{}{
+			"method": "json.Unmarshal",
+			"error":  err.Error(),
+		})
+	}
+	return result.Value, resp, nil
+}
+
+// RubyLoad executes ruby_load.
+func (db *DB) RubyLoad(path string) (interface{}, Response, error) {
+	resp, err := db.Invoke("ruby_load", map[string]interface{}{
+		"path": path,
+	}, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Close()
+	jsonData, err := ioutil.ReadAll(resp)
+	if err != nil {
+		return nil, resp, err
+	}
+	type Result struct {
+		Value interface{} `json:"vlaue"`
+	}
+	var result Result
+	if err := json.Unmarshal(jsonData, &result); err != nil {
+		return false, resp, NewError(InvalidResponse, map[string]interface{}{
+			"method": "json.Unmarshal",
+			"error":  err.Error(),
+		})
+	}
+	return result.Value, resp, nil
+}
+
 // DBSchemaPlugin is a part of DBSchema.
 type DBSchemaPlugin struct {
 	Name string `json:"name"`
@@ -860,9 +1499,12 @@ func (db *DB) Schema() (*DBSchema, Response, error) {
 
 // DBSelectOptionsColumn stores --columns[NAME].
 type DBSelectOptionsColumn struct {
-	Stage string // --columns[NAME].stage
-	Type  string // --columns[NAME].type
-	Value string // --columns[NAME].value
+	Stage           string   // --columns[NAME].stage
+	Flags           []string // --columns[NAME].flags
+	Type            string   // --columns[NAME].type
+	Value           string   // --columns[NAME].value
+	WindowSortKeys  []string // --columns[NAME].window.sort_keys
+	WindowGroupKeys []string // --columns[NAME].window.group_keys
 }
 
 // NewDBSelectOptionsColumn returns the default DBSelectOptionsColumn.
@@ -870,19 +1512,21 @@ func NewDBSelectOptionsColumn() *DBSelectOptionsColumn {
 	return &DBSelectOptionsColumn{}
 }
 
-// DBSelectOptionsDrilldownColumn stores --drilldowns[LABEL].columns[NAME].
-type DBSelectOptionsDrilldownColumn struct {
-	Stage           string   // --drilldowns[LABEL].columns[NAME].stage
-	Flags           string   // --drilldowns[LABEL].columns[NAME].flags
-	Type            string   // --drilldowns[LABEL].columns[NAME].type
-	Value           string   // --drilldowns[LABEL].columns[NAME].value
-	WindowSortKeys  []string // --drilldowns[LABEL].columns[NAME].window.sort_keys
-	WindowGroupKeys []string // --drilldowns[LABEL].columns[NAME].window.group_keys
-}
-
-// NewDBSelectOptionsDrilldownColumn returns the default DBSelectOptionsDrilldownColumn.
-func NewDBSelectOptionsDrilldownColumn() *DBSelectOptionsDrilldownColumn {
-	return &DBSelectOptionsDrilldownColumn{}
+// setParams sets options to params.
+func (options *DBSelectOptionsColumn) setParams(prefix string, params map[string]interface{}) {
+	// FIXME: slice options are not supported.
+	params[prefix+".stage"] = options.Stage
+	if options.Flags != nil {
+		params[prefix+".flags"] = options.Flags
+	}
+	params[prefix+".type"] = options.Type
+	params[prefix+".value"] = options.Value
+	if options.WindowSortKeys != nil {
+		params[prefix+".window.sort_keys"] = options.WindowSortKeys
+	}
+	if options.WindowGroupKeys != nil {
+		params[prefix+".window.group_keys"] = options.WindowGroupKeys
+	}
 }
 
 // DBSelectOptionsDrilldown stores --drilldowns[LABEL].
@@ -895,7 +1539,7 @@ type DBSelectOptionsDrilldown struct {
 	CalcTypes     []string // --drilldowns[LABEL].calc_types
 	CalcTarget    string   // --drilldowns[LABEL].calc_target
 	Filter        string   // --drilldowns[LABEL].filter
-	Columns       map[string]*DBSelectOptionsDrilldownColumn
+	Columns       map[string]*DBSelectOptionsColumn
 }
 
 // NewDBSelectOptionsDrilldown returns the default DBSelectOptionsDrilldown.
@@ -905,6 +1549,28 @@ func NewDBSelectOptionsDrilldown() *DBSelectOptionsDrilldown {
 	}
 }
 
+// setParams sets options to params.
+func (options *DBSelectOptionsDrilldown) setParams(prefix string, params map[string]interface{}) {
+	// FIXME: slice options are not supported.
+	params[prefix+".keys"] = options.Keys
+	if options.SortKeys != nil {
+		params[prefix+".sort_keys"] = options.SortKeys
+	}
+	if options.OutputColumns != nil {
+		params[prefix+".output_columns"] = options.OutputColumns
+	}
+	params[prefix+".offset"] = options.Offset
+	params[prefix+".limit"] = options.Limit
+	if options.CalcTypes != nil {
+		params[prefix+".calc_types"] = options.CalcTypes
+	}
+	params[prefix+".calc_target"] = options.CalcTarget
+	params[prefix+".filter"] = options.Filter
+	for name, col := range options.Columns {
+		col.setParams(prefix+".columns["+name+"]", params)
+	}
+}
+
 // DBSelectOptions stores options for DB.Select.
 // http://groonga.org/docs/reference/commands/select.html
 type DBSelectOptions struct {
@@ -943,13 +1609,14 @@ func NewDBSelectOptions() *DBSelectOptions {
 }
 
 // Select executes select.
-// On success, it is the caller's responsibility to close the response.
-func (db *DB) Select(tbl string, options *DBSelectOptions) (Response, error) {
+// On success, it is the caller's responsibility to close the result.
+func (db *DB) Select(tbl string, options *DBSelectOptions) (io.ReadCloser, Response, error) {
 	if options == nil {
 		options = NewDBSelectOptions()
 	}
 	params := map[string]interface{}{
-		"table": tbl,
+		"command_version": 2,
+		"table":           tbl,
 	}
 	if options.MatchColumns != nil {
 		params["match_columns"] = options.MatchColumns
@@ -1020,11 +1687,21 @@ func (db *DB) Select(tbl string, options *DBSelectOptions) (Response, error) {
 	if options.DrilldownFilter != "" {
 		params["drilldown_filter"] = options.DrilldownFilter
 	}
-	return db.Invoke("select", params, nil)
+	for name, col := range options.Columns {
+		col.setParams("--columns["+name+"]", params)
+	}
+	for label, drilldown := range options.Drilldowns {
+		drilldown.setParams("--drilldowns["+label+"]", params)
+	}
+	resp, err := db.Invoke("select", params, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	return resp, resp, err
 }
 
 // parseRows parses rows.
-func (db *DB) parseRows(rows interface{}, data []byte, fis []*StructFieldInfo) (int, error) {
+func (db *DB) parseRows(rows interface{}, data []byte, cfs []*columnField) (int, error) {
 	var raw [][][]json.RawMessage
 	if err := json.Unmarshal(data, &raw); err != nil {
 		return 0, NewError(InvalidResponse, map[string]interface{}{
@@ -1040,10 +1717,10 @@ func (db *DB) parseRows(rows interface{}, data []byte, fis []*StructFieldInfo) (
 
 	rawCols := raw[0][1]
 	nCols := len(rawCols)
-	if nCols != len(fis) {
+	if nCols != len(cfs) {
 		// Remove _score from fields if _score does not exist in the response.
-		for i, field := range fis {
-			if field.ColumnName == "_score" {
+		for i, cf := range cfs {
+			if cf.Name == "_score" {
 				hasScore := false
 				for _, rawCol := range rawCols {
 					var nameType []string
@@ -1059,17 +1736,17 @@ func (db *DB) parseRows(rows interface{}, data []byte, fis []*StructFieldInfo) (
 					}
 				}
 				if !hasScore {
-					for j := i + 1; j < len(fis); j++ {
-						fis[j-1] = fis[j]
+					for j := i + 1; j < len(cfs); j++ {
+						cfs[j-1] = cfs[j]
 					}
-					fis = fis[:len(fis)-1]
+					cfs = cfs[:len(cfs)-1]
 				}
 				break
 			}
 		}
-		if nCols != len(fis) {
+		if nCols != len(cfs) {
 			return 0, NewError(InvalidResponse, map[string]interface{}{
-				"nFields": len(fis),
+				"nFields": len(cfs),
 				"nCols":   nCols,
 				"error":   "nFields and nColumns must be same.",
 			})
@@ -1094,8 +1771,8 @@ func (db *DB) parseRows(rows interface{}, data []byte, fis []*StructFieldInfo) (
 	recs.Set(reflect.MakeSlice(recs.Type(), nRecs, nRecs))
 	for i := 0; i < nRecs; i++ {
 		rec := recs.Index(i)
-		for j, field := range fis {
-			ptr := rec.Field(field.Index).Addr()
+		for j, cf := range cfs {
+			ptr := rec.Field(cf.Index).Addr()
 			switch v := ptr.Interface().(type) {
 			case *bool:
 				if err := json.Unmarshal(rawRecs[i][j], v); err != nil {
@@ -1326,44 +2003,53 @@ func (db *DB) SelectRows(tbl string, rows interface{}, options *DBSelectOptions)
 	if options == nil {
 		options = NewDBSelectOptions()
 	}
-	si, err := GetStructInfo(rows)
+	rs, err := getRowStruct(rows)
 	if err != nil {
 		return 0, nil, err
 	}
-	var fis []*StructFieldInfo
+	var cfs []*columnField
 	if options.OutputColumns == nil {
-		fis = si.Fields
-		for _, fi := range fis {
-			options.OutputColumns = append(options.OutputColumns, fi.ColumnName)
+		cfs = rs.Columns
+		for _, cf := range cfs {
+			options.OutputColumns = append(options.OutputColumns, cf.Name)
 		}
 	} else {
 		for _, col := range options.OutputColumns {
-			fi, ok := si.FieldsByColumnName[col]
+			cf, ok := rs.ColumnsByName[col]
 			if !ok {
 				return 0, nil, NewError(InvalidCommand, map[string]interface{}{
 					"column": col,
-					"error":  "The column has no assciated field.",
+					"error":  "The column has no associated field.",
 				})
 			}
-			fis = append(fis, fi)
+			cfs = append(cfs, cf)
 		}
 	}
-	resp, err := db.Select(tbl, options)
+	result, resp, err := db.Select(tbl, options)
 	if err != nil {
 		return 0, nil, err
 	}
-	defer resp.Close()
-	data, err := ioutil.ReadAll(resp)
+	defer result.Close()
+	data, err := ioutil.ReadAll(result)
 	if err != nil {
 		return 0, resp, err
 	}
 	if resp.Err() != nil {
 		return 0, resp, err
 	}
-	n, err := db.parseRows(rows, data, fis)
+	n, err := db.parseRows(rows, data, cfs)
 	return n, resp, err
 }
 
+// Shutdown executes shutdown.
+func (db *DB) Shutdown() (bool, Response, error) {
+	resp, err := db.Invoke("shutdown", nil, nil)
+	if err != nil {
+		return false, nil, err
+	}
+	return db.recvBool(resp)
+}
+
 // DBStatus is a response of status.
 type DBStatus struct {
 	AllocCount            int           `json:"alloc_count"`

  Modified: v2/db_test.go (+49 -7)
===================================================================
--- v2/db_test.go    2017-06-20 23:55:39 +0900 (d8baf09)
+++ v2/db_test.go    2017-06-28 16:46:30 +0900 (dc12cab)
@@ -92,15 +92,16 @@ func TestDBDump(t *testing.T) {
 	db := NewDB(client)
 	defer db.Close()
 
-	resp, err := db.Dump(nil)
+	result, resp, err := db.Dump(nil)
 	if err != nil {
 		t.Fatalf("db.Dump failed: %v", err)
 	}
-	result, err := ioutil.ReadAll(resp)
+	body, err := ioutil.ReadAll(result)
 	if err != nil {
 		t.Fatalf("ioutil.ReadAll failed: %v", err)
 	}
-	log.Printf("result = %s", result)
+	result.Close()
+	log.Printf("body = %s", body)
 	log.Printf("resp = %#v", resp)
 	if err := resp.Err(); err != nil {
 		log.Printf("error = %#v", err)
@@ -230,6 +231,44 @@ func TestDBNormalizerList(t *testing.T) {
 	}
 }
 
+func TestDBObjectList(t *testing.T) {
+	client, err := NewHTTPClient("", nil)
+	if err != nil {
+		t.Skipf("NewHTTPClient failed: %v", err)
+	}
+	db := NewDB(client)
+	defer db.Close()
+
+	result, resp, err := db.ObjectList()
+	if err != nil {
+		t.Fatalf("db.ObjectList failed: %v", err)
+	}
+	log.Printf("result = %#v", result)
+	log.Printf("resp = %#v", resp)
+	if err := resp.Err(); err != nil {
+		log.Printf("error = %#v", err)
+	}
+}
+
+func TestDBQuit(t *testing.T) {
+	client, err := NewHTTPClient("", nil)
+	if err != nil {
+		t.Skipf("NewHTTPClient failed: %v", err)
+	}
+	db := NewDB(client)
+	defer db.Close()
+
+	result, resp, err := db.Quit()
+	if err != nil {
+		t.Fatalf("db.Quit failed: %v", err)
+	}
+	log.Printf("result = %#v", result)
+	log.Printf("resp = %#v", resp)
+	if err := resp.Err(); err != nil {
+		log.Printf("error = %#v", err)
+	}
+}
+
 func TestDBSchema(t *testing.T) {
 	client, err := NewHTTPClient("", nil)
 	if err != nil {
@@ -257,15 +296,16 @@ func TestDBSelect(t *testing.T) {
 	db := NewDB(client)
 	defer db.Close()
 
-	resp, err := db.Select("Tbl", nil)
+	result, resp, err := db.Select("Tbl", nil)
 	if err != nil {
 		t.Fatalf("db.Select failed: %v", err)
 	}
-	result, err := ioutil.ReadAll(resp)
+	body, err := ioutil.ReadAll(result)
 	if err != nil {
 		t.Fatalf("ioutil.ReadAll failed: %v", err)
 	}
-	log.Printf("result = %s", result)
+	result.Close()
+	log.Printf("body = %s", body)
 	log.Printf("resp = %#v", resp)
 	if err := resp.Err(); err != nil {
 		log.Printf("error = %#v", err)
@@ -303,7 +343,9 @@ func TestDBSelectRows(t *testing.T) {
 	}
 	log.Printf("n = %d", n)
 	log.Printf("rows = %#v", rows)
-	log.Printf("time = %s", rows[0].Time)
+	if len(rows) != 0 {
+		log.Printf("time = %s", rows[0].Time)
+	}
 	log.Printf("resp = %#v", resp)
 	if err := resp.Err(); err != nil {
 		log.Printf("error = %#v", err)

  Modified: v2/error.go (+18 -22)
===================================================================
--- v2/error.go    2017-06-20 23:55:39 +0900 (fae8b6a)
+++ v2/error.go    2017-06-28 16:46:30 +0900 (a11e486)
@@ -17,7 +17,7 @@ const (
 )
 
 // getCodeText returns a string that briefly describes the specified code.
-// getCodeText supports Groonga return codes (C.grn_rc) [,0],
+// getCodeText supports Groonga result codes (C.grn_rc) [,0],
 // Grnci error codes [1000,] and HTTP status codes [100,999].
 func getCodeText(code int) string {
 	switch code {
@@ -215,38 +215,34 @@ type Error struct {
 }
 
 // NewError returns a new Error.
-func NewError(code int, data map[string]interface{}) *Error {
-	return &Error{
+func NewError(code int, data map[string]interface{}) error {
+	err := &Error{
 		Code: code,
 		Text: getCodeText(code),
-		Data: data,
+		Data: make(map[string]interface{}),
 	}
+	for k, v := range data {
+		err.Data[k] = v
+	}
+	return err
 }
 
-// EnhanceError adds data to err and returns it.
-// Note that the arguments err and data may be modified.
-func EnhanceError(err error, data map[string]interface{}) *Error {
-	if err, ok := err.(*Error); ok {
-		if err.Data == nil {
-			err.Data = data
-		} else {
-			for k, v := range data {
-				err.Data[k] = v
-			}
+// EnhanceError adds data to err and returns the modified Error.
+func EnhanceError(err error, data map[string]interface{}) error {
+	if e, ok := err.(*Error); ok {
+		for k, v := range data {
+			e.Data[k] = v
 		}
-		return err
+		return e
 	}
-	if data == nil {
-		data = map[string]interface{}{
-			"error": err.Error(),
-		}
-	} else if _, ok := data["error"]; !ok {
+	e := NewError(UnknownError, data).(*Error)
+	if _, ok := e.Data["error"]; !ok {
 		data["error"] = err.Error()
 	}
-	return NewError(UnknownError, data)
+	return e
 }
 
-// Error returns a string which describes the Error.
+// Error returns the JSON-encoded error object.
 func (e *Error) Error() string {
 	b, _ := json.Marshal(e)
 	return string(b)

  Modified: v2/error_test.go (+3 -3)
===================================================================
--- v2/error_test.go    2017-06-20 23:55:39 +0900 (8cc4292)
+++ v2/error_test.go    2017-06-28 16:46:30 +0900 (0069ca8)
@@ -7,7 +7,7 @@ func TestNewError(t *testing.T) {
 		"string": "value",
 		"int":    100,
 	}
-	err := NewError(InvalidAddress, data)
+	err := NewError(InvalidAddress, data).(*Error)
 	if err.Code != InvalidAddress {
 		t.Fatalf("NewError failed: Code: actual = %d, want = %d",
 			err.Code, InvalidAddress)
@@ -33,8 +33,8 @@ func TestEnhanceError(t *testing.T) {
 		"int":    1000,
 		"float":  1.0,
 	}
-	err := NewError(InvalidAddress, data)
-	err = EnhanceError(err, newData)
+	err := NewError(InvalidAddress, data).(*Error)
+	err = EnhanceError(err, newData).(*Error)
 	if err.Code != InvalidAddress {
 		t.Fatalf("NewError failed: Code: actual = %d, want = %d",
 			err.Code, InvalidAddress)

  Added: v2/json.go (+195 -0) 100644
===================================================================
--- /dev/null
+++ v2/json.go    2017-06-28 16:46:30 +0900 (3aebdd6)
@@ -0,0 +1,195 @@
+package grnci
+
+import (
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// jsonAppendBool appends the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendBool(buf []byte, v bool) []byte {
+	return strconv.AppendBool(buf, v)
+}
+
+// jsonAppendInt appends the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendInt(buf []byte, v int64) []byte {
+	return strconv.AppendInt(buf, v, 10)
+}
+
+// jsonAppendUint appends the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendUint(buf []byte, v uint64) []byte {
+	return strconv.AppendUint(buf, v, 10)
+}
+
+// jsonAppendFloat appands the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendFloat(buf []byte, v float64, bitSize int) []byte {
+	return strconv.AppendFloat(buf, v, 'g', -1, bitSize)
+}
+
+// jsonAppendString appends the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendString(buf []byte, v string) []byte {
+	buf = append(buf, '"')
+	for i := 0; i < len(v); i++ {
+		switch v[i] {
+		case '\b':
+			buf = append(buf, `\b`...)
+		case '\t':
+			buf = append(buf, `\t`...)
+		case '\n':
+			buf = append(buf, `\n`...)
+		case '\f':
+			buf = append(buf, `\f`...)
+		case '\r':
+			buf = append(buf, `\r`...)
+		case '"':
+			buf = append(buf, `\"`...)
+		case '\\':
+			buf = append(buf, `\\`...)
+		default:
+			buf = append(buf, v[i])
+		}
+	}
+	return append(buf, '"')
+}
+
+// jsonAppendTime appends the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendTime(buf []byte, v time.Time) []byte {
+	buf = strconv.AppendInt(buf, v.Unix(), 10)
+	usec := v.Nanosecond() / 1000
+	if usec != 0 {
+		buf = append(buf, '.')
+		n := len(buf)
+		if cap(buf) < n+6 {
+			newBuf := make([]byte, n+6, cap(buf)*2)
+			copy(newBuf, buf)
+			buf = newBuf
+		} else {
+			buf = buf[:n+6]
+		}
+		for i := 0; i < 6; i++ {
+			buf[n+5-i] = byte('0' + usec%10)
+			usec /= 10
+		}
+	}
+	return buf
+}
+
+// jsonAppendGeo appends the JSON-encoded v to buf and returns the extended buffer.
+func jsonAppendGeo(buf []byte, v Geo) []byte {
+	buf = append(buf, '"')
+	buf = strconv.AppendInt(buf, int64(v.Lat), 10)
+	buf = append(buf, ',')
+	buf = strconv.AppendInt(buf, int64(v.Long), 10)
+	return append(buf, '"')
+}
+
+// jsonAppendValue appends the JSON-encoded v to buf and returns the extended buffer.
+// If the type of v is unsupported, it appends "null".
+func jsonAppendValue(buf []byte, v reflect.Value) []byte {
+	switch v.Kind() {
+	case reflect.Bool:
+		return jsonAppendBool(buf, v.Bool())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return jsonAppendInt(buf, v.Int())
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return jsonAppendUint(buf, v.Uint())
+	case reflect.Float32:
+		return jsonAppendFloat(buf, v.Float(), 32)
+	case reflect.Float64:
+		return jsonAppendFloat(buf, v.Float(), 64)
+	case reflect.String:
+		return jsonAppendString(buf, v.String())
+	case reflect.Struct:
+		switch v := v.Interface().(type) {
+		case time.Time:
+			return jsonAppendTime(buf, v)
+		case Geo:
+			return jsonAppendGeo(buf, v)
+		default:
+			return append(buf, "null"...)
+		}
+	case reflect.Ptr, reflect.Interface:
+		if v.IsNil() {
+			return append(buf, "null"...)
+		}
+		return jsonAppendValue(buf, v.Elem())
+	case reflect.Array:
+		buf = append(buf, '[')
+		n := v.Len()
+		for i := 0; i < n; i++ {
+			if i != 0 {
+				buf = append(buf, ',')
+			}
+			buf = jsonAppendValue(buf, v.Index(i))
+		}
+		return append(buf, ']')
+	case reflect.Slice:
+		if v.IsNil() {
+			return append(buf, "null"...)
+		}
+		buf = append(buf, '[')
+		n := v.Len()
+		for i := 0; i < n; i++ {
+			if i != 0 {
+				buf = append(buf, ',')
+			}
+			buf = jsonAppendValue(buf, v.Index(i))
+		}
+		return append(buf, ']')
+	default:
+		return append(buf, "null"...)
+	}
+}
+
+// jsonAppend appends the JSON-encoded v to buf and returns the extended buffer.
+// If the type of v is unsupported, it appends "null".
+func jsonAppend(buf []byte, v interface{}) []byte {
+	return jsonAppendValue(buf, reflect.ValueOf(v))
+}
+
+// jsonFormatBool returns the JSON-encoded v.
+func jsonFormatBool(v bool) string {
+	return strconv.FormatBool(v)
+}
+
+// jsonFormatInt returns the JSON-encoded v.
+func jsonFormatInt(v int64) string {
+	return strconv.FormatInt(v, 10)
+}
+
+// jsonFormatUint returns the JSON-encoded v.
+func jsonFormatUint(v uint64) string {
+	return strconv.FormatUint(v, 10)
+}
+
+// jsonFormatFloat returns the JSON-encoded v.
+func jsonFormatFloat(v float64, bitSize int) string {
+	return strconv.FormatFloat(v, 'g', -1, bitSize)
+}
+
+// jsonFormatString returns the JSON-encoded v.
+func jsonFormatString(v string) string {
+	return string(jsonAppendString(nil, v))
+}
+
+// jsonFormatTime returns the JSON-encoded v.
+func jsonFormatTime(v time.Time) string {
+	return string(jsonAppendTime(nil, v))
+}
+
+// jsonFormatGeo returns the JSON-encoded v.
+func jsonFormatGeo(v Geo) string {
+	return string(jsonAppendGeo(nil, v))
+}
+
+// jsonFormatValue returns the JSON-encoded v.
+// If the type of v is unsupported, it returns "null".
+func jsonFormatValue(v reflect.Value) string {
+	return string(jsonAppendValue(nil, v))
+}
+
+// jsonFormat returns the JSON-encoded v.
+// If the type of v is unsupported, it returns "null".
+func jsonFormat(v interface{}) string {
+	return jsonFormatValue(reflect.ValueOf(v))
+}

  Added: v2/json_test.go (+390 -0) 100644
===================================================================
--- /dev/null
+++ v2/json_test.go    2017-06-28 16:46:30 +0900 (8cd09c1)
@@ -0,0 +1,390 @@
+package grnci
+
+import (
+	"math"
+	"testing"
+	"time"
+)
+
+func TestJSONAppendBool(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendBool(buf, true)
+	if want += "true"; string(buf) != want {
+		t.Fatalf("jsonAppendBool failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendBool(buf, false)
+	if want += "false"; string(buf) != want {
+		t.Fatalf("jsonAppendBool failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendInt(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendInt(buf, 0)
+	if want += "0"; string(buf) != want {
+		t.Fatalf("jsonAppendInt failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendInt(buf, 9223372036854775807)
+	if want += "9223372036854775807"; string(buf) != want {
+		t.Fatalf("jsonAppendInt failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendInt(buf, -9223372036854775808)
+	if want += "-9223372036854775808"; string(buf) != want {
+		t.Fatalf("jsonAppendInt failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendUint(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendUint(buf, 0)
+	if want += "0"; string(buf) != want {
+		t.Fatalf("jsonAppendUint failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendUint(buf, 18446744073709551615)
+	if want += "18446744073709551615"; string(buf) != want {
+		t.Fatalf("jsonAppendUint failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendFloat(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendFloat(buf, 0.0, 64)
+	if want += "0"; string(buf) != want {
+		t.Fatalf("jsonAppendFloat failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendFloat(buf, 1.25, 64)
+	if want += "1.25"; string(buf) != want {
+		t.Fatalf("jsonAppendFloat failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendFloat(buf, -1.25, 64)
+	if want += "-1.25"; string(buf) != want {
+		t.Fatalf("jsonAppendFloat failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendFloat(buf, math.Pow(2, -16), 64)
+	if want += "1.52587890625e-05"; string(buf) != want {
+		t.Fatalf("jsonAppendFloat failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendFloat32(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendFloat(buf, 1.234567890123456789, 32)
+	if want += "1.2345679"; string(buf) != want {
+		t.Fatalf("jsonAppendFloat failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendFloat64(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendFloat(buf, 1.234567890123456789, 64)
+	if want += "1.2345678901234567"; string(buf) != want {
+		t.Fatalf("jsonAppendFloat failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendString(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendString(buf, "Hello")
+	if want += "\"Hello\""; string(buf) != want {
+		t.Fatalf("jsonAppendString failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendString(buf, "World")
+	if want += "\"World\""; string(buf) != want {
+		t.Fatalf("jsonAppendString failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendString(buf, " \t\n\"")
+	if want += "\" \\t\\n\\\"\""; string(buf) != want {
+		t.Fatalf("jsonAppendString failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendTime(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendTime(buf, time.Unix(1234567890, 0))
+	if want += "1234567890"; string(buf) != want {
+		t.Fatalf("jsonAppendTime failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendTime(buf, time.Unix(1123456789, 987123654))
+	if want += "1123456789.987123"; string(buf) != want {
+		t.Fatalf("jsonAppendTime failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendGeo(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppendGeo(buf, Geo{Lat: 123456, Long: 234567})
+	if want += "\"123456,234567\""; string(buf) != want {
+		t.Fatalf("jsonAppendGeo failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppendGeo(buf, Geo{Lat: -123456, Long: -234567})
+	if want += "\"-123456,-234567\""; string(buf) != want {
+		t.Fatalf("jsonAppendTime failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendScalar(t *testing.T) {
+	var buf []byte
+	var want string
+	buf = jsonAppend(buf, true)
+	if want += "true"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, int8(-128))
+	if want += "-128"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, int16(-32768))
+	if want += "-32768"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, int32(-2147483648))
+	if want += "-2147483648"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, int64(-9223372036854775808))
+	if want += "-9223372036854775808"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, int(-9223372036854775808))
+	if want += "-9223372036854775808"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, uint8(255))
+	if want += "255"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, uint16(65535))
+	if want += "65535"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, uint32(4294967295))
+	if want += "4294967295"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, uint64(18446744073709551615))
+	if want += "18446744073709551615"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, uint(18446744073709551615))
+	if want += "18446744073709551615"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, float32(1.234567890123456789))
+	if want += "1.2345679"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, float64(1.234567890123456789))
+	if want += "1.2345678901234567"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, "String")
+	if want += "\"String\""; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, time.Unix(1234567890, 123456789))
+	if want += "1234567890.123456"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+	buf = jsonAppend(buf, Geo{Lat: 123456, Long: 234567})
+	if want += "\"123456,234567\""; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendPtr(t *testing.T) {
+	var buf []byte
+	var want string
+	v := 123456
+	buf = jsonAppend(buf, &v)
+	if want += "123456"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendArray(t *testing.T) {
+	var buf []byte
+	var want string
+	v := [3]int{123, 456, 789}
+	buf = jsonAppend(buf, v)
+	if want += "[123,456,789]"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONAppendSlice(t *testing.T) {
+	var buf []byte
+	var want string
+	v := []int{987, 654, 321}
+	buf = jsonAppend(buf, v)
+	if want += "[987,654,321]"; string(buf) != want {
+		t.Fatalf("jsonAppend failed: actual = %s, want = %s", buf, want)
+	}
+}
+
+func TestJSONFormatBool(t *testing.T) {
+	if want, actual := "true", jsonFormatBool(true); actual != want {
+		t.Fatalf("jsonFormatBool failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "false", jsonFormatBool(false); actual != want {
+		t.Fatalf("jsonFormatBool failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatInt(t *testing.T) {
+	if want, actual := "0", jsonFormatInt(0); actual != want {
+		t.Fatalf("jsonFormatInt failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "9223372036854775807", jsonFormatInt(9223372036854775807); actual != want {
+		t.Fatalf("jsonFormatInt failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-9223372036854775808", jsonFormatInt(-9223372036854775808); actual != want {
+		t.Fatalf("jsonFormatInt failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatUint(t *testing.T) {
+	if want, actual := "0", jsonFormatUint(0); actual != want {
+		t.Fatalf("jsonFormatUint failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "18446744073709551615", jsonFormatUint(18446744073709551615); actual != want {
+		t.Fatalf("jsonFormatUint failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatFloat(t *testing.T) {
+	if want, actual := "0", jsonFormatFloat(0.0, 64); actual != want {
+		t.Fatalf("jsonFormatFloat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "1.25", jsonFormatFloat(1.25, 64); actual != want {
+		t.Fatalf("jsonFormatFloat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-1.25", jsonFormatFloat(-1.25, 64); actual != want {
+		t.Fatalf("jsonFormatFloat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "1.52587890625e-05", jsonFormatFloat(math.Pow(2, -16), 64); actual != want {
+		t.Fatalf("jsonFormatFloat failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatFloat32(t *testing.T) {
+	if want, actual := "1.2345679", jsonFormatFloat(1.234567890123456789, 32); actual != want {
+		t.Fatalf("jsonFormatFloat failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatFloat64(t *testing.T) {
+	if want, actual := "1.2345678901234567", jsonFormatFloat(1.234567890123456789, 64); actual != want {
+		t.Fatalf("jsonFormatFloat failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatString(t *testing.T) {
+	if want, actual := "\"Hello\"", jsonFormatString("Hello"); actual != want {
+		t.Fatalf("jsonFormatString failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "\"World\"", jsonFormatString("World"); actual != want {
+		t.Fatalf("jsonFormatString failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "\" \\t\\n\\\"\"", jsonFormatString(" \t\n\""); actual != want {
+		t.Fatalf("jsonFormatString failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatTime(t *testing.T) {
+	if want, actual := "1234567890", jsonFormatTime(time.Unix(1234567890, 0)); actual != want {
+		t.Fatalf("jsonFormatTime failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "1123456789.987123", jsonFormatTime(time.Unix(1123456789, 987123654)); actual != want {
+		t.Fatalf("jsonFormatTime failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatGeo(t *testing.T) {
+	if want, actual := "\"123456,234567\"", jsonFormatGeo(Geo{Lat: 123456, Long: 234567}); actual != want {
+		t.Fatalf("jsonFormatGeo failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "\"-123456,-234567\"", jsonFormatGeo(Geo{Lat: -123456, Long: -234567}); actual != want {
+		t.Fatalf("jsonFormatGeo failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatScalar(t *testing.T) {
+	if want, actual := "true", jsonFormat(true); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-128", jsonFormat(int8(-128)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-32768", jsonFormat(int16(-32768)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-2147483648", jsonFormat(int32(-2147483648)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-9223372036854775808", jsonFormat(int64(-9223372036854775808)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "-9223372036854775808", jsonFormat(int(-9223372036854775808)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "255", jsonFormat(uint8(255)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "65535", jsonFormat(uint16(65535)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "4294967295", jsonFormat(uint32(4294967295)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "18446744073709551615", jsonFormat(uint64(18446744073709551615)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "18446744073709551615", jsonFormat(uint(18446744073709551615)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "1.2345679", jsonFormat(float32(1.234567890123456789)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "1.2345678901234567", jsonFormat(1.234567890123456789); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "\"String\"", jsonFormat("String"); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "1234567890.123456", jsonFormatTime(time.Unix(1234567890, 123456789)); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+	if want, actual := "\"123456,234567\"", jsonFormat(Geo{Lat: 123456, Long: 234567}); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatPtr(t *testing.T) {
+	v := 123456
+	if want, actual := "123456", jsonFormat(&v); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatArray(t *testing.T) {
+	v := [3]int{123, 456, 789}
+	if want, actual := "[123,456,789]", jsonFormat(v); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+}
+
+func TestJSONFormatSlice(t *testing.T) {
+	v := []int{987, 654, 321}
+	if want, actual := "[987,654,321]", jsonFormat(v); actual != want {
+		t.Fatalf("jsonFormat failed: actual = %s, want = %s", actual, want)
+	}
+}

  Modified: v2/response.go (+15 -4)
===================================================================
--- v2/response.go    2017-06-20 23:55:39 +0900 (f6cbb2b)
+++ v2/response.go    2017-06-28 16:46:30 +0900 (8ec87db)
@@ -4,12 +4,22 @@ import (
 	"time"
 )
 
-// Response is an interface for responses.
+// Response is the interface of responses.
 type Response interface {
-	// Start returns the start time.
+	// Start returns the start time of the command.
+	// The definition of the start time varies according to the protocol.
+	//
+	// HTTPClient returns a response with the server-side start time,
+	// because an HTTP server returns a result with the start time.
+	//
+	// GQTPConn and GQTPClient return a response with the client-side start time,
+	// because a GQTP server does not return the start time.
+	// libgrn.Conn and libgrn.Client also return the client-side start time.
 	Start() time.Time
 
-	// Elapsed returns the elapsed time.
+	// Elapsed returns the elapsed time of the command.
+	// The definition of the elapsed time varies likewise the start time.
+	// See above for the details.
 	Elapsed() time.Duration
 
 	// Read reads the response body at most len(p) bytes into p.
@@ -19,6 +29,7 @@ type Response interface {
 	// Close closes the response body.
 	Close() error
 
-	// Err returns an error.
+	// Err returns the details of an error response.
+	// If the command was successfully completed, Err returns nil.
 	Err() error
 }

  Modified: v2/type.go (+542 -146)
===================================================================
--- v2/type.go    2017-06-20 23:55:39 +0900 (3c6d48f)
+++ v2/type.go    2017-06-28 16:46:30 +0900 (c85064c)
@@ -3,6 +3,8 @@ package grnci
 import (
 	"reflect"
 	"strconv"
+	"strings"
+	"sync"
 	"time"
 )
 
@@ -12,177 +14,571 @@ type Geo struct {
 	Long int32 // Longitude in milliseconds.
 }
 
-// encodeBool encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeBool(buf []byte, v bool) []byte {
-	return strconv.AppendBool(buf, v)
+// formatBool returns the parameterized v.
+func formatBool(v bool) string {
+	if v {
+		return "yes"
+	}
+	return "no"
+}
+
+// formatInt returns the parameterized v.
+func formatInt(v int64) string {
+	return strconv.FormatInt(v, 10)
+}
+
+// formatUint returns the parameterized v.
+func formatUint(v uint64) string {
+	return strconv.FormatUint(v, 10)
+}
+
+// formatFloat returns the parameterized v.
+func formatFloat(v float64, bitSize int) string {
+	return strconv.FormatFloat(v, 'g', -1, bitSize)
 }
 
-// encodeInt encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeInt(buf []byte, v int64) []byte {
-	return strconv.AppendInt(buf, v, 10)
+// formatString returns the parameterized v.
+func formatString(v string) string {
+	return v
 }
 
-// encodeUint encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeUint(buf []byte, v uint64) []byte {
-	return strconv.AppendUint(buf, v, 10)
+// formatTime returns the parameterized v.
+func formatTime(v time.Time) string {
+	return string(jsonAppendTime(nil, v))
 }
 
-// encodeFloat encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeFloat(buf []byte, v float64) []byte {
-	return strconv.AppendFloat(buf, v, 'g', -1, 64)
+// formatGeo returns the parameterized v.
+func formatGeo(v Geo) string {
+	return string(jsonAppendGeo(nil, v))
 }
 
-// encodeString encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeString(buf []byte, v string) []byte {
-	buf = append(buf, '"')
-	for i := 0; i < len(v); i++ {
-		switch v[i] {
-		case '\b', '\t', '\n', '\f', '\r', '"', '\\':
-			buf = append(buf, '\\')
+const (
+	// columnFieldTagKey is the tag key for a struct field associated with a column.
+	columnFieldTagKey = "grnci"
+	// columnFieldTagDelim is the delimiter in a struct field tag value.
+	columnFieldTagDelim = ";"
+)
+
+// columnField stores the details of a struct field associated with a column.
+// The tag format is as follows:
+//
+//  grnci:"_key;key_type;flags;default_tokenizer;normalizer;token_filters"
+//  grnci:"_value;value_type"
+//  grnci:"name;type;flags"
+//
+// TODO: support dynamic columns (--columns[NAME]).
+type columnField struct {
+	Index            int                  // Index of the struct field
+	Field            *reflect.StructField // Struct field
+	Name             string               // Column name
+	Type             string               // --key_type for _key, --value_type for _value or --type for columns
+	Flags            []string             // --flags for _key and columns
+	DefaultTokenizer string               // --default_tokenizer for _key
+	Normalizer       string               // --normalizer for _key
+	TokenFilters     []string             // --token_filters for _key
+	Loadable         bool                 // Whether or not the column is loadable
+}
+
+// checkTableName checks if s is valid as a table name.
+func checkTableName(s string) error {
+	switch s {
+	case "":
+		return NewError(InvalidType, map[string]interface{}{
+			"name":  s,
+			"error": "A table name must not be empty.",
+		})
+	case "Bool", "Int8", "Int16", "Int32", "Int64", "UInt8", "UInt16", "UInt32", "UInt64",
+		"Float", "ShortText", "Text", "LongText", "Time", "WGS84GeoPoint", "TokyoGeoPoint":
+		return NewError(InvalidType, map[string]interface{}{
+			"name":  s,
+			"error": "The name specifies a built-in type and not available as a table name.",
+		})
+	}
+	if s[0] == '_' {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":  s,
+			"error": "A table name must not start with '_'.",
+		})
+	}
+	for _, c := range s {
+		switch {
+		case c >= '0' && c <= '9':
+		case c >= 'A' && c <= 'Z':
+		case c >= 'a' && c <= 'z':
+		case c == '_':
+		default:
+			return NewError(InvalidType, map[string]interface{}{
+				"name":  s,
+				"error": "A table name must consist of [0-9A-Za-z_].",
+			})
 		}
-		buf = append(buf, v[i])
 	}
-	return append(buf, '"')
+	return nil
 }
 
-// encodeTime encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeTime(buf []byte, v time.Time) []byte {
-	buf = strconv.AppendInt(buf, v.Unix(), 10)
-	usec := v.Nanosecond() / 1000
-	if usec != 0 {
-		buf = append(buf, '.')
-		n := len(buf)
-		if cap(buf) < n+6 {
-			newBuf := make([]byte, n+6, cap(buf)*2)
-			copy(newBuf, buf)
-			buf = newBuf
-		} else {
-			buf = buf[:n+6]
+// parseIDOptions parses options of _id.
+func (cf *columnField) parseIDOptions(options []string) error {
+	if len(options) > 1 {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":    cf.Name,
+			"options": options,
+			"error":   "The tag must not contain more than one option.",
+		})
+	}
+	if len(options) > 0 {
+		cf.Type = options[0]
+	}
+	switch cf.Type {
+	case "":
+		cf.Type = "UInt32"
+	case "UInt32":
+	default:
+		return NewError(InvalidType, map[string]interface{}{
+			"type":  cf.Type,
+			"error": "The type is not supported as _id.",
+		})
+	}
+	return nil
+}
+
+// checkKeyType checks if cf.Type is valid as _key.
+func (cf *columnField) checkKeyType() error {
+	switch cf.Type {
+	case "":
+		// _key must not be a pointer.
+		typ := cf.Field.Type
+		switch typ.Kind() {
+		case reflect.Bool:
+			cf.Type = "Bool"
+		case reflect.Int8:
+			cf.Type = "Int8"
+		case reflect.Int16:
+			cf.Type = "Int16"
+		case reflect.Int32:
+			cf.Type = "Int32"
+		case reflect.Int64, reflect.Int:
+			cf.Type = "Int64"
+		case reflect.Uint8:
+			cf.Type = "UInt8"
+		case reflect.Uint16:
+			cf.Type = "UInt16"
+		case reflect.Uint32:
+			cf.Type = "UInt32"
+		case reflect.Uint64, reflect.Uint:
+			cf.Type = "UInt64"
+		case reflect.Float32, reflect.Float64:
+			cf.Type = "Float"
+		case reflect.String:
+			cf.Type = "ShortText"
+		case reflect.Struct:
+			switch reflect.Zero(typ).Interface().(type) {
+			case time.Time:
+				cf.Type = "Time"
+			case Geo:
+				cf.Type = "WGS84GeoPoint"
+			}
+		}
+		if cf.Type == "" {
+			return NewError(InvalidType, map[string]interface{}{
+				"type":  reflect.TypeOf(cf.Field.Type).Name(),
+				"error": "The type is not supported as _key.",
+			})
+		}
+	case "Bool", "Int8", "Int16", "Int32", "Int64", "UInt8", "UInt16", "UInt32", "UInt64",
+		"Float", "ShortText", "Time", "WGS84GeoPoint", "TokyoGeoPoint":
+	default:
+		if err := checkTableName(cf.Type); err != nil {
+			return NewError(InvalidType, map[string]interface{}{
+				"type":  cf.Type,
+				"error": "The type is not supported as _key.",
+			})
+		}
+	}
+	return nil
+}
+
+// checkKey checks if cf is valid as _key.
+func (cf *columnField) checkKey() error {
+	if err := cf.checkKeyType(); err != nil {
+		return err
+	}
+	// TODO: check Flags, DefaultTokenizer, Normalizer and TokenFilters.
+	return nil
+}
+
+// parseKeyOptions parses options of _key.
+func (cf *columnField) parseKeyOptions(options []string) error {
+	if len(options) > 5 {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":    cf.Name,
+			"options": options,
+			"error":   "The tag must not contain more than 5 options.",
+		})
+	}
+	if len(options) > 0 {
+		cf.Type = options[0]
+	}
+	if len(options) > 1 {
+		cf.Flags = strings.Split(options[1], "|")
+	}
+	if len(options) > 2 {
+		cf.DefaultTokenizer = options[2]
+	}
+	if len(options) > 3 {
+		cf.Normalizer = options[3]
+	}
+	if len(options) > 4 {
+		cf.TokenFilters = strings.Split(options[4], ",")
+	}
+	if err := cf.checkKey(); err != nil {
+		return err
+	}
+	cf.Loadable = true
+	return nil
+}
+
+// checkValue checks if cf is valid as _value.
+func (cf *columnField) checkValue() error {
+	switch cf.Type {
+	case "":
+		typ := cf.Field.Type
+		for typ.Kind() == reflect.Ptr {
+			typ = typ.Elem()
 		}
-		for i := 0; i < 6; i++ {
-			buf[n+5-i] = byte('0' + usec%10)
-			usec /= 10
+		switch typ.Kind() {
+		case reflect.Bool:
+			cf.Type = "Bool"
+		case reflect.Int8:
+			cf.Type = "Int8"
+		case reflect.Int16:
+			cf.Type = "Int16"
+		case reflect.Int32:
+			cf.Type = "Int32"
+		case reflect.Int64, reflect.Int:
+			cf.Type = "Int64"
+		case reflect.Uint8:
+			cf.Type = "UInt8"
+		case reflect.Uint16:
+			cf.Type = "UInt16"
+		case reflect.Uint32:
+			cf.Type = "UInt32"
+		case reflect.Uint64, reflect.Uint:
+			cf.Type = "UInt64"
+		case reflect.Float32, reflect.Float64:
+			cf.Type = "Float"
+		case reflect.Struct:
+			switch reflect.Zero(typ).Interface().(type) {
+			case time.Time:
+				cf.Type = "Time"
+			case Geo:
+				cf.Type = "WGS84GeoPoint"
+			}
+		}
+		if cf.Type == "" {
+			return NewError(InvalidType, map[string]interface{}{
+				"type":  reflect.TypeOf(cf.Field.Type).Name(),
+				"error": "The type is not supported as _value.",
+			})
 		}
+	case "Bool", "Int8", "Int16", "Int32", "Int64", "UInt8", "UInt16", "UInt32", "UInt64",
+		"Float", "Time", "WGS84GeoPoint", "TokyoGeoPoint":
+	default:
+		return NewError(InvalidType, map[string]interface{}{
+			"type":  cf.Type,
+			"error": "The type is not supported as _value.",
+		})
 	}
-	return buf
+	return nil
 }
 
-// encodeGeo encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeGeo(buf []byte, v Geo) []byte {
-	buf = append(buf, '"')
-	buf = strconv.AppendInt(buf, int64(v.Lat), 10)
-	buf = append(buf, ',')
-	buf = strconv.AppendInt(buf, int64(v.Long), 10)
-	return append(buf, '"')
+// parseValueOptions parses options of _value.
+func (cf *columnField) parseValueOptions(options []string) error {
+	if len(options) > 1 {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":    cf.Name,
+			"options": options,
+			"error":   "The tag must not contain more than one option.",
+		})
+	}
+	if len(options) > 0 {
+		cf.Type = options[0]
+	}
+	if err := cf.checkValue(); err != nil {
+		return err
+	}
+	cf.Loadable = true
+	return nil
 }
 
-// encodeValue encodes the JSON-encoded v to buf and returns the extended buffer.
-func encodeValue(buf []byte, v reflect.Value) []byte {
-	switch v.Kind() {
+// parseScoreOptions parses options of _score.
+func (cf *columnField) parseScoreOptions(options []string) error {
+	if len(options) > 1 {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":    cf.Name,
+			"options": options,
+			"error":   "The tag must not contain more than one option.",
+		})
+	}
+	if len(options) > 0 {
+		cf.Type = options[0]
+	}
+	// If the command version is 1, the type of _score is Int32.
+	// Otherwise, the type of _score is Float.
+	switch cf.Type {
+	case "":
+		cf.Type = "Float"
+	case "Int32", "Float":
+	default:
+		return NewError(InvalidType, map[string]interface{}{
+			"type":  cf.Type,
+			"error": "The type is not supported as _score.",
+		})
+	}
+	return nil
+}
+
+// detectColumnType detects cf.Type from cf.Field.Type.
+func (cf *columnField) detectColumnType() error {
+	typ := cf.Field.Type
+	dim := 0
+Loop:
+	for {
+		switch typ.Kind() {
+		case reflect.Ptr:
+			typ = typ.Elem()
+		case reflect.Array, reflect.Slice:
+			dim++
+			typ = typ.Elem()
+		default:
+			break Loop
+		}
+	}
+	switch typ.Kind() {
 	case reflect.Bool:
-		return encodeBool(buf, v.Bool())
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return encodeInt(buf, v.Int())
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-		return encodeUint(buf, v.Uint())
-	case reflect.Float64:
-		return encodeFloat(buf, v.Float())
+		cf.Type = "Bool"
+	case reflect.Int8:
+		cf.Type = "Int8"
+	case reflect.Int16:
+		cf.Type = "Int16"
+	case reflect.Int32:
+		cf.Type = "Int32"
+	case reflect.Int64, reflect.Int:
+		cf.Type = "Int64"
+	case reflect.Uint8:
+		cf.Type = "UInt8"
+	case reflect.Uint16:
+		cf.Type = "UInt16"
+	case reflect.Uint32:
+		cf.Type = "UInt32"
+	case reflect.Uint64, reflect.Uint:
+		cf.Type = "UInt64"
+	case reflect.Float32, reflect.Float64:
+		cf.Type = "Float"
 	case reflect.String:
-		return encodeString(buf, v.String())
+		cf.Type = "ShortText"
 	case reflect.Struct:
-		switch v := v.Interface().(type) {
+		switch reflect.Zero(typ).Interface().(type) {
 		case time.Time:
-			return encodeTime(buf, v)
+			cf.Type = "Time"
 		case Geo:
-			return encodeGeo(buf, v)
+			cf.Type = "WGS84GeoPoint"
+		}
+	}
+	if cf.Type == "" {
+		return NewError(InvalidType, map[string]interface{}{
+			"type":  reflect.TypeOf(cf.Field.Type).Name(),
+			"error": "The type is not supported as a column.",
+		})
+	}
+	cf.Type = strings.Repeat("[]", dim) + cf.Type
+	return nil
+}
+
+// checkColumnType checks if cf.Type is valid as a column.
+func (cf *columnField) checkColumnType() error {
+	if cf.Type == "" {
+		return cf.detectColumnType()
+	}
+	typ := cf.Type
+	for strings.HasPrefix(typ, "[]") {
+		typ = typ[2:]
+	}
+	switch typ {
+	case "Bool", "Int8", "Int16", "Int32", "Int64", "UInt8", "UInt16", "UInt32", "UInt64",
+		"Float", "ShortText", "Text", "LongText", "Time", "WGS84GeoPoint", "TokyoGeoPoint":
+	default:
+		if err := checkTableName(typ); err != nil {
+			return NewError(InvalidType, map[string]interface{}{
+				"type":  cf.Type,
+				"error": "The type is not supported as a column.",
+			})
+		}
+	}
+	return nil
+}
+
+// checkColumnName checks if cf.Name is valid as a column name.
+// If cf.Name specifies a pseudo column, it returns an error.
+func (cf *columnField) checkColumnName() error {
+	s := cf.Name
+	if s == "" {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":  s,
+			"error": "A column name must not be empty.",
+		})
+	}
+	if s[0] == '_' {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":  s,
+			"error": "A column name must not start with '_'.",
+		})
+	}
+	loadable := true
+	for _, c := range s {
+		switch {
+		case c >= '0' && c <= '9':
+		case c >= 'A' && c <= 'Z':
+		case c >= 'a' && c <= 'z':
 		default:
-			return append(buf, "null"...)
-		}
-	case reflect.Ptr:
-		if v.IsNil() {
-			return append(buf, "null"...)
-		}
-		return encodeValue(buf, v.Elem())
-	case reflect.Array:
-		buf = append(buf, '[')
-		n := v.Len()
-		for i := 0; i < n; i++ {
-			if i != 0 {
-				buf = append(buf, ',')
+			switch c {
+			case '_':
+			default:
+				// A column name may contain various symbol characters
+				// because functions such as snippet_html are available.
+				loadable = false
 			}
-			buf = encodeValue(buf, v.Index(i))
-		}
-		return append(buf, ']')
-	case reflect.Slice:
-		if v.IsNil() {
-			return append(buf, "null"...)
-		}
-		buf = append(buf, '[')
-		n := v.Len()
-		for i := 0; i < n; i++ {
-			if i != 0 {
-				buf = append(buf, ',')
-			}
-			buf = encodeValue(buf, v.Index(i))
+
 		}
-		return append(buf, ']')
+	}
+	cf.Loadable = loadable
+	return nil
+}
+
+// checkColumn checks if cf is valid as a column.
+func (cf *columnField) checkColumn() error {
+	if err := cf.checkColumnName(); err != nil {
+		return err
+	}
+	if err := cf.checkColumnType(); err != nil {
+		return err
+	}
+	// TODO: check Flags.
+	return nil
+}
+
+// parseColumnOptions parses options of a column.
+func (cf *columnField) parseColumnOptions(options []string) error {
+	if len(options) > 2 {
+		return NewError(InvalidType, map[string]interface{}{
+			"name":    cf.Name,
+			"options": options,
+			"error":   "The tag must not contain more than 2 options.",
+		})
+	}
+	if len(options) > 0 {
+		cf.Type = options[0]
+	}
+	if len(options) > 1 {
+		cf.Flags = strings.Split(options[1], "|")
+	}
+	return cf.checkColumn()
+}
+
+// parseOptions parses options of a column.
+func (cf *columnField) parseOptions(options []string) error {
+	switch cf.Name {
+	case "_id":
+		return cf.parseIDOptions(options)
+	case "_key":
+		return cf.parseKeyOptions(options)
+	case "_value":
+		return cf.parseValueOptions(options)
+	case "_score":
+		return cf.parseScoreOptions(options)
 	default:
-		return append(buf, "null"...)
-	}
-}
-
-// // encodeBoolPtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeBoolPtr(buf []byte, v *bool) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeBool(buf, *v)
-// }
-
-// // encodeIntPtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeIntPtr(buf []byte, v *int64) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeInt(buf, *v)
-// }
-
-// // encodeUintPtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeUintPtr(buf []byte, v *uint64) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeUint(buf, *v)
-// }
-
-// // encodeFloatPtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeFloatPtr(buf []byte, v *float64) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeFloat(buf, *v)
-// }
-
-// // encodeStringPtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeStringPtr(buf []byte, v *string) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeString(buf, *v)
-// }
-
-// // encodeTimePtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeTimePtr(buf []byte, v *time.Time) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeTime(buf, *v)
-// }
-
-// // encodeGeoPtr encodes the JSON-encoded v to buf and returns the extended buffer.
-// func encodeGeoPtr(buf []byte, v *Geo) []byte {
-// 	if v == nil {
-// 		return append(buf, "null"...)
-// 	}
-// 	return encodeGeo(buf, *v)
-// }
+		return cf.parseColumnOptions(options)
+	}
+}
+
+// newColumnField returns a new columnField.
+func newColumnField(index int, field *reflect.StructField) (*columnField, error) {
+	tag := field.Tag.Get(columnFieldTagKey)
+	values := strings.Split(tag, columnFieldTagDelim)
+	cf := &columnField{
+		Index: index,
+		Field: field,
+		Name:  values[0],
+	}
+	if err := cf.parseOptions(values[1:]); err != nil {
+		return nil, err
+	}
+	return cf, nil
+}
+
+var (
+	rowStructs      = make(map[reflect.Type]*rowStruct)
+	rowStructsMutex sync.Mutex
+)
+
+// rowStruct stores the details of a struct associated with a row.
+type rowStruct struct {
+	Columns       []*columnField
+	ColumnsByName map[string]*columnField
+}
+
+// getRowStruct returns a rowStruct for the terminal type of v.
+func getRowStruct(v interface{}) (*rowStruct, error) {
+	typ := reflect.TypeOf(v)
+Loop:
+	for {
+		switch typ.Kind() {
+		case reflect.Ptr, reflect.Interface, reflect.Array, reflect.Slice:
+			typ = typ.Elem()
+		case reflect.Struct:
+			break Loop
+		default:
+			return nil, NewError(InvalidType, map[string]interface{}{
+				"type":  reflect.TypeOf(v).Name(),
+				"error": "The type is not supported as rows.",
+			})
+		}
+	}
+	rowStructsMutex.Lock()
+	defer rowStructsMutex.Unlock()
+	if rs, ok := rowStructs[typ]; ok {
+		return rs, nil
+	}
+	var cfs []*columnField
+	cfsByName := make(map[string]*columnField)
+	for i := 0; i < typ.NumField(); i++ {
+		field := typ.Field(i)
+		if len(field.PkgPath) != 0 { // Skip unexported fields.
+			continue
+		}
+		if field.Tag.Get(columnFieldTagKey) == "" { // Skip untagged fields.
+			continue
+		}
+		cf, err := newColumnField(i, &field)
+		if err != nil {
+			return nil, err
+		}
+		if cf.Name == "_key" {
+			cfs = append([]*columnField{cf}, cfs...)
+		} else {
+			cfs = append(cfs, cf)
+		}
+		if _, ok := cfsByName[cf.Name]; ok {
+			return nil, NewError(InvalidType, map[string]interface{}{
+				"name":  cf.Name,
+				"error": "The name appears more than once.",
+			})
+		}
+		cfsByName[cf.Name] = cf
+	}
+	rs := &rowStruct{
+		Columns:       cfs,
+		ColumnsByName: cfsByName,
+	}
+	rowStructs[typ] = rs
+	return rs, nil
+}




More information about the Groonga-commit mailing list
Zurück zum Archiv-Index