Skip to content

Latest commit

 

History

History
326 lines (239 loc) · 9.36 KB

mysql_practice.md

File metadata and controls

326 lines (239 loc) · 9.36 KB

Mysql实践

运维

mysql 慢日志查询

set global slow_query_log='ON';
set global long_query_time=1;

show variables like 'slow_query%';
show variables like 'long_query_time';
  • Now check the slow logs at mysql.slow_log

mysql 操作记录查询

SET GLOBAL general_log = 'ON'
SET GLOBAL log_output = 'TABLE'

show variables like '%general%';
  • Now you can find the mysql operation log in mysql.general_log table

mysql 记录 未使用 index的查询

set global log_queries_not_using_indexes=ON;
  • those queries will appears in slow_log

how to check whether mysql reuse the connection

  • in mysql.general_log , if the connection is reused, you should see the connection event only at the very beginning

restore database from dump file

mysql -uroot -ppwd  < dumpfile

Create a new_user and Grant all privileges on db db_test

CREATE USER 'new_user'@'%' IDENTIFIED BY 'new_user_pwd';
GRANT ALL PRIVILEGES  ON db_test.* TO 'new_user'@'%' WITH GRANT OPTION;

Query Tips

select count(*) is very slow on large table

  • Here's a cheap way to get an estimated row count:
> select TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='your_db_name' and TABLE_NAME='your_table_name';
+------------+
| TABLE_ROWS |
+------------+
|      57889 |
+------------+
1 rows in set (0.00 sec)

select ... limit offset , n is slow when offset is higher

  • limit offset ,n -> where id > offset limit n
  • Note:
    • id is the auto incr primary key , and should be continous (that is , you should NOT delete the rows )

return a default value if select get no result

  1. COALESCE returns 1st non-NULL value
    select COALESCE(AVG(distance), 0) as baseline from records where uuid=?
  2. use UNION
    select value from conf where entry = ? UNION (SELECT '' )
    • replace the value '' in right-hand select with your expected value.

CASE ... WHEN ... ELSE... END

CASE is one of the Control Flow Functions.

name Description example
CASE Case operator
IF() if/else construct if(m.title LIKE '%the%', 'THE MOVIE', NULL ) as msg
IFNULL() if expr is null, return the 2nd parameter IFNULL(expression, alt_value), kind of like expression OR alt_value
NULLIF() return NULL if expr1 = expr2 NULLIF(m.genre,2) -- if equals 2, return NULL, or return that m.genre value
  • dynamically replace column values

    SELECT m.movie_id, m_movie_title,
    case m.genra_id
        WHEN 1 THEN 'Cool Stuff'
        WHEN 2 THEN 'Futuristic'
        WHEN 123 THEN 'Something else'
        ELSE CONCAT('GENRE ID ', m.genre_id )
    END as genre
    FROM movies as m
  • another example

    WHERE m.p1 = ? OR m.p2 = ?  ORDER by   CASE 
        when (m.p1=?) and ((withdraw&1)<>0) then match_id - 10000000
        when (m.p2=?) and ((withdraw&2)<>0) then match_id - 10000000
        ELSE  match_id END   DESC LIMIT 20

Convert a timestamp to seconds(GMT)

不同时区的数据库,存放的date 受时区影响, UNIX_TIMESTAMP方法不是我们想要的...

SELECT TIMESTAMPDIFF( SECOND, "1970-01-01 00:00:00" , <TIMESTAMP created by MYSQL> );

Convert any timestamp to +08:00

select CONVERT_TZ( <timestamp column> ,  time_format(timediff( NOW(), UTC_TIMESTAMP) ,'+%H:%i') , '+08:00') from ...

三元表达式

... status=if(status="created","paid",status)

Update RANK while doing query

  • query , and return rank in query

    select @rownum:=@rownum+1 as 'rank', uuid, rank_score
    from pvp_hsw, 
    (SELECT @rownum:=0) as r  -- Every derived table must have its own alias
    order by rank_score desc;
    • here @rownum will safely reset everytime
  • update rank info while doing query

    SET @rownum=0;
    update pvp_hsw as p set week_rank=@rownum:=@rownum+1 order by rank_score desc;
  • how to handle ties ?

    -- 90,80,80,70 will generate rank 1,2,2,3
    select 
        CASE -- increate only if has different score
          WHEN @rowscore = rank_score THEN @rownum
          ELSE @rownum:=@rownum+1 
        END as 'rank', 
        uuid, 
        @rowscore:=rank_score , -- to update the variable
        bonus_honor_points, bonus_coin, bonus_diamond
    from pvp_hsw, 
    (SELECT @rownum:=0, @rowscore:=0 ) as r  -- Every derived table must have its own alias
    order by rank_score desc
    -- 90,80,80,70 will generate rank 1,2,2,4
    select
        @rownum:=@rownum+1 as rowindex,
        CASE -- lastrank increated only if has different score
          WHEN @rowscore = rank_score THEN @lastrank
          ELSE @lastrank:=@rownum
        END as 'rank', 
        uuid, 
        @rowscore:=rank_score , -- to update the variable
        bonus_honor_points, bonus_coin, bonus_diamond
    from pvp_hsw, 
    (SELECT @rownum:=0, @rowscore:=0, @lastrank:=1 ) as r  -- Every derived table must have its own alias
    order by rank_score desc 

Bulk Update

func BulkInsert( tx *sql.Tx, unsavedRows []PVP_HSW_t ) error {
    // nothing update
    if len(unsavedRows)== 0 {
        return nil
    }
    valueStrings := make([]string, 0, len(unsavedRows))
    valueArgs := make([]interface{}, 0, len(unsavedRows) * 3)
    for _, post := range unsavedRows {
        valueStrings = append(valueStrings, "(?, ?, ?, ?, ?, ?)")
        valueArgs = append(valueArgs, post.Uuid)
        valueArgs = append(valueArgs, post.Rank_score)
        valueArgs = append(valueArgs, post.Bonus_honor_points)
        valueArgs = append(valueArgs, post.Bonus_coin)
        valueArgs = append(valueArgs, post.Bonus_diamond)
        valueArgs = append(valueArgs, post.Rank)
    }

    cmd_update := fmt.Sprintf( `INSERT into pvp_hsw ( uuid, rank_score, bonus_honor_points, bonus_coin, bonus_diamond,week_rank ) VALUES %s ON DUPLICATE KEY UPDATE
                            rank_score=VALUES(rank_score),
                            bonus_honor_points=VALUES(bonus_honor_points),
                            bonus_coin=VALUES(bonus_coin),
                            bonus_diamond=VALUES(bonus_diamond),
                            week_rank=VALUES(week_rank)`, strings.Join(valueStrings, ",")  )
    _ , err := tx.Exec(cmd_update, valueArgs...)
    if err != nil {
        return err
    }

    log.Printf( "pvp_hsw bulk updated %d rows", len( unsavedRows ) )

    return nil
}
    unsavedRows := []PVP_HSW_t {}
    for rows.Next() {
        var data PVP_HSW_t
        err := rows.Scan( &data.Rank, &data.Uuid, &data.Rank_score, 
                        &data.Bonus_honor_points, &data.Bonus_coin, &data.Bonus_diamond )
        if err != nil {
            log.Println(err)
            return
        }

        unsavedRows = append( unsavedRows , data )
    }

    batch_size := 10000
    for i:=0; i<len(unsavedRows); i+= batch_size {
        open_end := i+batch_size
        if open_end > len(unsavedRows) {
            open_end = len(unsavedRows)
        }
        err := BulkInsert( tx, unsavedRows[ i:open_end] )
        if err != nil {
            log.Println(err)
            return
        }
    }