Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Filters in MariaDB MaxScale intercept and modify database traffic. Use them to transform, block, or log queries, enabling fine-grained control over your database workload and security.
[BinlogFilter]
type=filter
module=binlogfilter
match=/customers[.]/
exclude=/[.]orders/
[BinlogServer]
type=service
router=binlogrouter
server_id=33
filters=BinlogFilter
[BinlogListener]
type=listener
service=BinlogServer
port=4000INSERT INTO departments VALUES ('d1234', 'NewDepartment'); -- maxscale ccr=ignorematch=.*INSERT.*
ignore=.*UPDATE.*
options=case,extended[CCRFilter]
type=filter
module=ccrfilter
time=5[OptimisticTrx]
type=filter
module=optimistictrx
[MyService]
...
filters=OptimisticTrxLOAD DATA INFILE 'S3://my-bucket/my-data.csv' INTO TABLE t1
FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';[LDI-Filter]
type=filter
module=ldi
host=s3.amazonaws.com
region=us-east-1SET @maxscale.ldi.s3_key='<my-access-key>', @maxscale.ldi.s3_secret='<my-secret-key>';LOAD DATA INFILE 'S3://my-bucket/my-data.csv' INTO TABLE t1;maxctrl create filter OptimisticTrx optimistictrx
maxctrl alter service-filter MyService OptimisticTrx[PsReuse]
type=filter
module=psreuse
[MyService]
...
filters=PsReuse# The --comments flag is needed for the command line client
mariadb --comments -u my-user -psecret -e "SELECT @@hostname -- maxscale route to server db1"-- maxscale <hint body>-- maxscale route to [master | slave | server <server name>]-- maxscale route to master-- maxscale route to slave-- maxscale route to server <server name>-- maxscale route to last-- maxscale <param>=<value>-- maxscale <hint name> prepare <hint content>-- maxscale <hint name> begin-- maxscale end-- maxscale <hint name> begin <hint content>-- maxscale begin <hint content>SELECT user FROM accounts WHERE id = ? -- maxscale route to masterPREPARE my_ps FROM 'SELECT user FROM accounts WHERE id = ?';
EXECUTE my_ps USING 123; -- maxscale route to master[ReadWriteService]
type=service
router=readwritesplit
servers=server1,server2
user=maxuser
password=maxpwd
filters=Hint
[Hint]
type=filter
module=hintfilterINSERT INTO table1 VALUES ("John","Doe",1);
SELECT * FROM table1; -- maxscale route to masterrouteQueryfunction createInstance(name)
end
function newSession(user, host)
end
function closeSession()
end
function routeQuery()
end
function clientReply()
end
function diagnostic()
end[MyLuaFilter]
type=filter
module=luafilter
global_script=/path/to/script.luaf = io.open("/tmp/test.log", "a+")
function createInstance(name)
f:write("createInstance for " .. name .. "\n")
end
function newSession(user, host)
f:write("newSession for: " .. user .. "@" .. host .. "\n")
end
function closeSession()
f:write("closeSession\n")
end
function routeQuery()
f:write("routeQuery: " .. mxs_get_sql() .. " -- type: " .. mxs_qc_get_type_mask() .. " operation: " .. mxs_qc_get_operation() .. "\n")
end
function clientReply()
f:write("clientReply: " .. mxs_get_replier() .. "\n")
end
function diagnostic()
f:write("diagnostics\n")
return "Hello from Lua!"
end[Throttle]
type = filter
module = throttlefilter
max_qps = 500
throttling_duration = 60000
...
[Routing-Service]
type = service
filters = Throttleignorecasecaseextended[NamedServerFilter]
type=filter
module=namedserverfilter
match01=^Select.*TableOne$
target01=server2,server3
match22=^SELECT.*TableTwo$
target22=->master
[MyService]
type=service
router=readwritesplit
servers=server1,server2,server3
user=myuser
password=mypasswd
filters=NamedServerFiltermatch01=^SELECT
options=case,extendedtarget01=MyServer2source=127.0.0.1source=192.%.%.%
source=192.168.%.%
source=192.168.10.%source=192.168.21.3,192.168.10.%user=john[NamedServerFilter]
type=filter
module=namedserverfilter
match02= *from *users
target02=server2
[MyService]
type=service
router=readwritesplit
servers=server1,server2
user=myuser
password=mypasswd
filters=NamedServerFilter%%
# options
regex_grammar: Native
case_sensitive: true
what_if: false
continue_if_matched: false
ignore_whitespace: true
%
# match template
@{1:^}select @{2} from my_table where id = @{3}
%
# replace template
select @{2} from my_table where id = @{3}%%
# use default options by leaving this blank
%
@{1:^}select count(distinct @{2}) from @{3}
%
select count(*) from (select distinct @{1} from @{2}) as t123
Input: select count(distinct author) from books where entity != "AI"
Rewritten: select count(*) from (select distinct author from books where entity != "AI") as t123%%
%
from mytable
%
from mytable force index (myindex)
Input: select name from mytable where id=42
Rewritten: select name from mytable force index (myindex) where id=42%%
%
@{1:^}select @{2} from mytable
%
select @{2} from mytable force index (myindex)%%
%
@{1:^}select zip_code from address_book where str_id = @{1:["]}@{2:[[:digit:]]+}@{3:["]}
%
select zip_code from address_book where id = @{2}
Input: select zip_code from address_book where str_id = "1234"
Rewritten: select zip_code from address_book where id = 1234%%
regex_grammar: EPosix
continue_if_matched: true
%
wrong_table_name
%
correct_table_name
%%
regex_grammar: EPosix
%
wrong_column_name
%
correct_column_name[Rewrite]
type = filter
module = rewritefilter
template_file = /path/to/template_file.rf
...
[Router]
type=service
...
filters=Rewrite%%
options
%
match template
%
replace templatecase_sensitive: true%%
case_sensitive: false
%
@{1:^}select @{2}
from mytable
where user = @{3}
%
select @{2} from mytable where user = @{3}
and @{3} in (select user from approved_users){ "templates" :
[
{
"case_sensitive" : false,
"match_template" : "@{1:^}select @{2} from mytable where user = @{3}",
"replace_template" : "select @{2} from mytable where user = @{3}
and @{3} in (select user from approved_users)"
}
]
}maxctrl alter filter Rewrite log_replacement=falseignorecasecaseextended[MyLogFilter]
type=filter
module=topfilter
[Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyLogFilterfilebase=/tmp/SqlQueryLogcount=30match=select.*from.*customer.*where
exclude=where
options=case,extendedsource=127.0.0.1user=john[ProductsUpdateTop20]
type=filter
module=topfilter
count=20
match=UPDATE.*PRODUCTS.*WHERE
exclude=UPDATE.*PRODUCTS_STOCK.*WHERE
filebase=/var/logs/top/ProductsUpdate[SlowAppServer]
type=filter
module=topfilter
count=20
source=192.168.0.32
filebase=/var/logs/top/SlowAppServer[ControlAppServer]
type=filter
module=topfilter
count=20
source=192.168.0.42
filebase=/var/logs/top/ControlAppServer[App-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=SlowAppServer | ControlAppServer-bash-4.1$ cat /var/logs/top/Employees-top-10.137
Top 10 longest running queries in session.
==========================================
Time (sec) | Query
-----------+-----------------------------------------------------------------
22.985 | select sum(salary), year(from_date) from salaries s, (select distinct year(from_date) as y1 from salaries) y where (makedate(y.y1, 1) between s.from_date and s.to_date) group by y.y1
5.304 | select d.dept_name as "Department", y.y1 as "Year", count(*) as "Count" from departments d, dept_emp de, (select distinct year(from_date) as y1 from dept_emp order by 1) y where d.dept_no = de.dept_no and (makedate(y.y1, 1) between de.from_date and de.to_date) group by y.y1, d.dept_name order by 1, 2
2.896 | select year(now()) - year(birth_date) as age, gender, avg(salary) as "Average Salary" from employees e, salaries s where e.emp_no = s.emp_no and ("1988-08-01" between from_date AND to_date) group by year(now()) - year(birth_date), gender order by 1,2
2.160 | select dept_name as "Department", sum(salary) / 12 as "Salary Bill" from employees e, departments d, dept_emp de, salaries s where e.emp_no = de.emp_no and de.dept_no = d.dept_no and ("1988-08-01" between de.from_date AND de.to_date) and ("1988-08-01" between s.from_date AND s.to_date) and s.emp_no = e.emp_no group by dept_name order by 1
0.845 | select dept_name as "Department", avg(year(now()) - year(birth_date)) as "Average Age", gender from employees e, departments d, dept_emp de where e.emp_no = de.emp_no and de.dept_no = d.dept_no and ("1988-08-01" between from_date AND to_date) group by dept_name, gender
0.668 | select year(hire_date) as "Hired", d.dept_name, count(*) as "Count" from employees e, departments d, dept_emp de where de.emp_no = e.emp_no and de.dept_no = d.dept_no group by d.dept_name, year(hire_date)
0.249 | select moves.n_depts As "No. of Departments", count(moves.emp_no) as "No. of Employees" from (select de1.emp_no as emp_no, count(de1.emp_no) as n_depts from dept_emp de1 group by de1.emp_no) as moves group by moves.n_depts order by 1
0.245 | select year(now()) - year(birth_date) as age, gender, count(*) as "Count" from employees group by year(now()) - year(birth_date), gender order by 1,2
0.179 | select year(hire_date) as "Hired", count(*) as "Count" from employees group by year(hire_date)
0.160 | select year(hire_date) - year(birth_date) as "Age", count(*) as Count from employees group by year(hire_date) - year(birth_date) order by 1
-----------+-----------------------------------------------------------------
Session started Wed Jun 18 18:41:03 2014
Connection from 127.0.0.1
Username massi
Total of 24 statements executed.
Total statement execution time 35.701 seconds
Average statement execution time 1.488 seconds
Total connection time 46.500 seconds
-bash-4.1$[MyLogFilter]
type=filter
module=tpmfilter
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyLogFilterfilename=/tmp/SqlQueryLogsource=127.0.0.1user=johndelimiter=:::query_delimiter=@@@named_pipe=/tmp/tpmfilter$ echo '1' > /tmp/tpmfilter$ echo '0' > /tmp/tpmfilter[PerformanceLogger]
type=filter
module=tpmfilter
delimiter=:::
query_delimiter=@@@
filename=/var/logs/tpm/perf.log
named_pipe=/tmp/tpmfilter
[Product-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=PerformanceLogger1484086477::::server1::::root::::3::::0.165@@@@0.108@@@@0.102@@@@0.092@@@@0.121@@@@0.122@@@@0.110@@@@2.081::::UPDATE WAREHOUSE SET W_YTD = W_YTD + 3630.48 WHERE W_ID = 2 @@@@SELECT W_STREET_1, W_STREET_2, W_CITY, W_STATE, W_ZIP, W_NAME FROM WAREHOUSE WHERE W_ID = 2@@@@UPDATE DISTRICT SET D_YTD = D_YTD + 3630.48 WHERE D_W_ID = 2 AND D_ID = 9@@@@SELECT D_STREET_1, D_STREET_2, D_CITY, D_STATE, D_ZIP, D_NAME FROM DISTRICT WHERE D_W_ID = 2 AND D_ID = 9@@@@SELECT C_FIRST, C_MIDDLE, C_LAST, C_STREET_1, C_STREET_2, C_CITY, C_STATE, C_ZIP, C_PHONE, C_CREDIT, C_CREDIT_LIM, C_DISCOUNT, C_BALANCE, C_YTD_PAYMENT, C_PAYMENT_CNT, C_SINCE FROM CUSTOMER WHERE C_W_ID = 2 AND C_D_ID = 9 AND C_ID = 1025@@@@UPDATE CUSTOMER SET C_BALANCE = 1007749.25, C_YTD_PAYMENT = 465215.47, C_PAYMENT_CNT = 203 WHERE C_W_ID = 2 AND C_D_ID = 9 AND C_ID = 1025@@@@INSERT INTO HISTORY (H_C_D_ID, H_C_W_ID, H_C_ID, H_D_ID, H_W_ID, H_DATE, H_AMOUNT, H_DATA) VALUES (9,2,1025,9,2,'2017-01-10 17:14:37',3630.48,'locfljbe xtnfqn')
1484086477::::server1::::root::::6::::0.123@@@@0.087@@@@0.091@@@@0.098@@@@0.078@@@@0.106@@@@0.094@@@@0.074@@@@0.089@@@@0.073@@@@0.098@@@@0.073@@@@0.088@@@@0.072@@@@0.087@@@@0.071@@@@0.085@@@@0.078@@@@0.088@@@@0.098@@@@0.081@@@@0.076@@@@0.082@@@@0.073@@@@0.077@@@@0.070@@@@0.105@@@@0.093@@@@0.088@@@@0.089@@@@0.087@@@@0.087@@@@0.086@@@@1.883::::SELECT C_DISCOUNT, C_LAST, C_CREDIT, W_TAX FROM CUSTOMER, WAREHOUSE WHERE W_ID = 2 AND C_W_ID = 2 AND C_D_ID = 10 AND C_ID = 1267@@@@SELECT D_NEXT_O_ID, D_TAX FROM DISTRICT WHERE D_W_ID = 2 AND D_ID = 10 FOR UPDATE@@@@UPDATE DISTRICT SET D_NEXT_O_ID = D_NEXT_O_ID + 1 WHERE D_W_ID = 2 AND D_ID = 10@@@@INSERT INTO OORDER (O_ID, O_D_ID, O_W_ID, O_C_ID, O_ENTRY_D, O_OL_CNT, O_ALL_LOCAL) VALUES (286871, 10, 2, 1267, '2017-01-10 17:14:37', 7, 1)@@@@INSERT INTO NEW_ORDER (NO_O_ID, NO_D_ID, NO_W_ID) VALUES ( 286871, 10, 2)@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 24167@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 24167 AND S_W_ID = 2 FOR UPDATE@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 96982@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 96982 AND S_W_ID = 2 FOR UPDATE@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 40679@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 40679 AND S_W_ID = 2 FOR UPDATE@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 31459@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 31459 AND S_W_ID = 2 FOR UPDATE@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 6143@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 6143 AND S_W_ID = 2 FOR UPDATE@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 12001@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 12001 AND S_W_ID = 2 FOR UPDATE@@@@SELECT I_PRICE, I_NAME , I_DATA FROM ITEM WHERE I_ID = 40407@@@@SELECT S_QUANTITY, S_DATA, S_DIST_01, S_DIST_02, S_DIST_03, S_DIST_04, S_DIST_05, S_DIST_06, S_DIST_07, S_DIST_08, S_DIST_09, S_DIST_10 FROM STOCK WHERE S_I_ID = 40407 AND S_W_ID = 2 FOR UPDATE@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,1,24167,2,7,348.31998,'btdyjesowlpzjwnmxdcsion')@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,2,96982,2,1,4.46,'kudpnktydxbrbxibbsyvdiw')@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,3,40679,2,7,528.43,'nhcixumgmosxlwgabvsrcnu')@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,4,31459,2,9,341.82,'qbglbdleljyfzdpfbyziiea')@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,5,6143,2,3,152.67,'tmtnuupaviimdmnvmetmcrc')@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,6,12001,2,5,304.3,'ufytqwvkqxtmalhenrssfon')@@@@INSERT INTO ORDER_LINE (OL_O_ID, OL_D_ID, OL_W_ID, OL_NUMBER, OL_I_ID, OL_SUPPLY_W_ID, OL_QUANTITY, OL_AMOUNT, OL_DIST_INFO) VALUES (286871,10,2,7,40407,2,1,30.32,'hvclpfnblxchbyluumetcqn')@@@@UPDATE STOCK SET S_QUANTITY = 65 , S_YTD = S_YTD + 7, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 24167 AND S_W_ID = 2@@@@UPDATE STOCK SET S_QUANTITY = 97 , S_YTD = S_YTD + 1, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 96982 AND S_W_ID = 2@@@@UPDATE STOCK SET S_QUANTITY = 58 , S_YTD = S_YTD + 7, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 40679 AND S_W_ID = 2@@@@UPDATE STOCK SET S_QUANTITY = 28 , S_YTD = S_YTD + 9, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 31459 AND S_W_ID = 2@@@@UPDATE STOCK SET S_QUANTITY = 86 , S_YTD = S_YTD + 3, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 6143 AND S_W_ID = 2@@@@UPDATE STOCK SET S_QUANTITY = 13 , S_YTD = S_YTD + 5, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 12001 AND S_W_ID = 2@@@@UPDATE STOCK SET S_QUANTITY = 44 , S_YTD = S_YTD + 1, S_ORDER_CNT = S_ORDER_CNT + 1, S_REMOTE_CNT = S_REMOTE_CNT + 0 WHERE S_I_ID = 40407 AND S_W_ID = 2
...[MyRegexFilter]
type=filter
module=regexfilter
match=some string
replace=replacement string
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyRegexfiltermatch=TYPE[ ]*=
options=casereplace=ENGINE =source=127.0.0.1user=johnlog_file=/tmp/regexfilter.loglog_trace=true[CreateTableFilter]
type=filter
module=regexfilter
options=ignorecase
match=TYPE\s*=
replace=ENGINE=
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=CreateTableFilter[MaxRows]
type=filter
module=maxrows
[MaxRows-Routing-Service]
type=service
...
filters=MaxRowsmax_resultset_rows=1000max_resultset_size=128KiMariaDB [(test)]> select * from test.t4;
ERROR 1415 (0A000): Row limit/size exceeded for query: select * from test.t4debug=2[MaxRows]
type=filter
module=maxrows
max_resultset_rows=10000
max_resultset_size=256000> SELECT name, ssn FROM person;+-------+-------------+
+ name | ssn |
+-------+-------------+
| Alice | 721-07-4426 |
| Bob | 435-22-3267 |
...+-------+-------------+
+ name | ssn |
+-------+-------------+
| Alice | XXX-XX-XXXX |
| Bob | XXX-XX-XXXX |
...CREATE TABLE cheat (revealed_ssn TEXT);
INSERT INTO cheat SELECT ssn FROM users;
SELECT revealed_ssn FROM cheat;mysql> set @@sql_mode = 'ANSI_QUOTES';
mysql> select concat("ssn") from managers;[Mask-SSN]
type=filter
module=masking
rules=...
[SomeService]
type=service
...
filters=Mask-SSNrules=/path/to/rules-filewarn_type_mismatch=alwayslarge_payload=ignoreSELECT CONCAT(masked_column) FROM tbl;prevent_function_usage=falserequire_fully_parsed=falsetreat_string_arg_as_field=falseset @a = (select ssn from customer where id = 1);check_user_variables=falseSELECT a FROM t1 UNION SELECT b FROM t2;check_unions=falseSELECT * FROM (SELECT a AS b FROM t1) AS t2;check_subqueries=false{
"rules": [ ... ]
}{
"rules": [
{
"replace": { ... },
"with": { ... },
"applies_to": [ ... ],
"exempted": [ ... ]
}
]
}{
"rules": [
{
"replace": {
"database": "db1",
"table": "person",
"column": "ssn"
},
"with": { ... },
"applies_to": [ ... ],
"exempted": [ ... ]
}
]
}SELECT ssn FROM person2;SELECT ssn FROM person UNION SELECT ssn FROM person2;SELECT ssn FROM person2 UNION SELECT ssn FROM person2;SELECT ssn FROM person UNION SELECT ssn FROM person;"replace": {
"column": "ssn",
"match": "(123)"
},
"with": {
"fill": "X#"
}"obfuscate": {
"column": "name"
}SELECT name from db1.tbl1;`
+------+
| name |
+------+
| $-~) |
+------+{
"rules": [
{
"replace": {
"column": "ssn"
},
"with": {
"value": "XXX-XX-XXXX"
},
"applies_to": [ ... ],
"exempted": [ ... ]
},
{
"replace": {
"column": "age"
},
"with": {
"fill": "*"
},
"applies_to": [ ... ],
"exempted": [ ... ]
},
{
"replace": {
"column": "creditcard"
},
"with": {
"value": "1234123412341234",
"fill": "0"
},
"applies_to": [ ... ],
"exempted": [ ... ]
},
]
}{
"rules": [
{
"replace": { ... },
"with": { ... },
"applies_to": [ "'alice'@'host'", "'bob'@'%'" ],
"exempted": [ ... ]
}
]
}{
"rules": [
{
"replace": { ... },
"with": { ... },
"applies_to": [ ... ],
"exempted": [ "'admin'" ]
}
]
}MaxScale> call command masking reload MyMaskingFilter[MyMasking]
type=filter
module=masking
warn_type_mismatch=always
large_payload=abort
rules=masking_rules.json
[MyService]
type=service
...
filters=MyMasking{
"rules": [
{
"replace": {
"column": "ssn"
},
"with": {
"value": "012345-ABCD",
"fill": "X"
}
}
]
}[MyComment]
type=filter
module=comment
inject="Comment to be injected"
[MyService]
type=service
router=readwritesplit
servers=server1
user=myuser
password=mypasswd
filters=MyComment[IPComment]
type=filter
module=comment
inject="IP=$IP"
[MyService]
type=service
router=readwritesplit
servers=server1
user=myuser
password=mypasswd
filters=IPCommentSELECT user FROM people;/* IP=::ffff:127.0.0.1 */SELECT user FROM people;information_schemastoragestorage_optionshard_ttlsoft_ttlmax_resultset_rowsmax_resultset_sizemax_countmax_sizerulescached_dataselectscache_in_transactionsdebugenabledinvalidateclear_cache_on_parse_errorsuserstimeout@maxscale.cache.populate@maxscale.cache.use@maxscale.cache.soft_ttl@maxscale.cache.hard_ttlstorage_inmemorystorage_memcachedstorage_rediscache_rules.jsonfilebasematchexcludeoptionsusersourceuser_matchuser_excludesource_matchsource_excludelog_typelog_dataduration_unituse_canonical_formflushappendseparatornewline_replacementignorecasecaseextendedfalsecaseignorecaseextendedsessionunifiedstdoutservicesessiondateuserreply_timetotal_reply_timequerydefault_dbnum_rowsreply_sizetransactiontransaction_timenum_warningserror_msgfalsefalsetrue[DataMartFilter]
type=filter
module=tee
target=DataMart
[Data-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=DataMartFiltermatch=/insert.*into.*order*/exclude=/select.*from.*t1/options=case,extendedsource=127.0.0.1user=john[Orders]
type=service
router=readconnroute
servers=server1, server2, server3, server4
user=massi
password=6628C50E07CCE1F0392EDEEB9D1203F3
filters=ReplicateOrders
[ReplicateOrders]
type=filter
module=tee
target=DataMart
match=insert[ ]*into[ ]*orders
[DataMart]
type=service
router=readconnroute
servers=datamartserver
user=massi
password=6628C50E07CCE1F0392EDEEB9D1203F3
filters=QLA-DataMart
[QLA-DataMart]
type=filter
module=qlafilter
options=/var/log/DataMart/InsertsLog
[Orders-Listener]
type=listener
target=Orders
port=4011
[DataMart-Listener]
type=listener
target=DataMart
port=4012[MyLogFilter]
type=filter
module=qlafilter
filebase=/tmp/SqlQueryLog
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyLogFilterfilebase=/tmp/SqlQueryLoguser_match=/(^alice$)|(^bob$)/user_exclude=/(^alice$)|(^bob$)/source_match=/(^127[.]0[.]0[.]1)|(^192[.]168[.]0[.]109)/source_exclude=/(^127[.]0[.]0[.]1)|(^192[.]168[.]0[.]109)/MariaDB [test]> select secret from T where x password="clear text pwd";
ERROR 1064 (42000): You have an error in your SQL syntax; check the manual
that corresponds to your MariaDB server version for the right syntax to
use near 'password="clear text pwd"' at line 1newline_replacement=" NL "[ProductsSelectLogger]
type=filter
module=qlafilter
match=SELECT.*from.*PRODUCTS .*
exclude=WHERE.*PRODUCT_ID.*
filebase=/var/logs/qla/SelectProducts
[Product-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=ProductsSelectLogger07:12:56.324 7/01/2016, SELECT * FROM PRODUCTS0s0s00sharedthread_specificassume_cacheableverify_cacheableFOUND_ROWSGET_LOCKIS_FREE_LOCKIS_USED_LOCKLAST_INSERT_IDLOAD_FILELOCALTIMELOCALTIMESTAMPMASTER_POS_WAITNOWRANDRELEASE_LOCKSESSION_USERSLEEPSYSDATESYSTEM_USERUNIX_TIMESTAMPUSERUUIDUUID_SHORTneverread_only_transactionsall_transactionsall_transactions: The cache will be used and populated inside
explicitly read-only transactions. Inside transactions that are not
explicitly read-only, the cache will be used and populated until the
first non-SELECT statement.2 (0b00010) A non-matching rule is logged.truenevercurrenttruemixedisolated5sunlikefalse""""""SELECT * FROM t WHERE a=1;INSERT INTO t SET a=42;Timeline 1 Timeline 2
Clients execute INSERT ... SELECT COUNT(*) FROM tbl
MaxScale -> DB SELECT COUNT(*) FROM tbl
MaxScale -> DB INSERT ...MaxScale -> Cache Delete invalidated values
MaxScale -> Cache Store result and invalidation key[Cache]
type=filter
module=cache
hard_ttl=30
soft_ttl=20
rules=...
...
[Cached-Routing-Service]
type=service
...
filters=Cachestorage=storage_redishard_ttl=60ssoft_ttl=60smax_resultset_rows=1000max_resultset_size=128Kimax_count=1000max_size=100Mirules=/path/to/rules-filemaxctrl alter filter MyCache rules='/path/to/rules-file'cached_data=sharedselects=verify_cacheablecache_in_transactions=neverdebug=31enabled=false* `never`: No invalidation is performed. This is the default.
* `current`: When a modification is made, entries in the cache used by
the current session are invalidated. Other sessions that use the same
cache will also be affected, but sessions that use another cache will
not.* `mixed`: The data of different users is stored in the same
cache. This is the default and may cause that a user can
access data he should not have access to.
* `isolated`: Each user has a unique cache and there can be
no unintended sharing.timeout=7000msSET @maxscale.cache.populate=TRUE;
SELECT a, b FROM tbl;
SET @maxscale.cache.populate=FALSE;
SELECT a, b FROM tbl;SELECT @maxscale.cache.populate;SET @maxscale.cache.use=TRUE;
SELECT a, b FROM tbl;
SET @maxscale.cache.use=FALSE;
SELECT a, b FROM tbl;SELECT @maxscale.cache.use;SET @maxscale.cache.soft_ttl=600;
SELECT a, b FROM unimportant;
SET @maxscale.cache.soft_ttl=60;
SELECT c, d FROM important;SELECT @maxscale.cache.soft_ttl;SET @maxscale.cache.soft_ttl=600, @maxscale.cache.hard_ttl=610;
SELECT a, b FROM unimportant;
SET @maxscale.cache.soft_ttl=60, @maxscale.cache.hard_ttl=65;
SELECT c, d FROM important;SELECT @maxscale.cache.hard_ttl;[TheCache]
type=filter
module=cache
enabled=falseSET @maxscale.cache.populate=TRUE;
SELECT a, b FROM tbl1;
SELECT c, d FROM tbl2;
SELECT e, f FROM tbl3;
SET @maxscale.cache.populate=FALSE;SET @maxscale.cache.use=TRUE;
SELECT a, b FROM tbl1;
SET @maxscale.cache.use=FALSE;UPDATE tbl1 SET a = ...;
SET @maxscale.cache.populate=TRUE;
SELECT a, b FROM tbl1;
SET @maxscale.cache.populate=FALSE;{
store: [ ... ],
use: [ ... ]
}[
{
store: [ ... ],
use: [ ... ]
},
{ ... }
]{
"attribute": <string>,
"op": <string>
"value": <string>
}SELECT * FROM db1.tblUSE db1;
SELECT * FROM tblSELECT * FROM tbl WHERE a = 2 AND b = 3;SELECT * FROM tbl WHERE b = 3 AND a = 2;SELECT a FROM tbl;SELECT b FROM tbl WHERE a > 5;USE somedb;
SELECT fld FROM tbl;{
"store": [
{
"attribute": "database",
"op": "=",
"value": "db1"
}
]
}{
"store": [
{
"attribute": "table",
"op": "!=",
"value": "tbl1"
}
]
}{
"store": [
{
"attribute": "table",
"op": "!=",
"value": "db1.tbl1"
}
]
}{
"store": [
{
"attribute": "query",
"op": "like",
"value": ".*WHERE.*"
}
]
}{
"attribute": <string>,
"op": <string>
"value": <string>
}{
"attribute": "user",
"op": "=",
"value": "'bob'@'%'"
}
{
"attribute": "user",
"op": "like",
"value": "bob@.*"
}{
"use": [
{
"attribute": "user",
"op": "!=",
"value": "admin"
}
]
}MySQL [testdb]> select * from access;
ERROR 1142 (42000): SELECT command denied to user 'bob'@'localhost' for table 'access'{
"store": [
{
"attribute": "table",
"op": "=",
"value": "access"
}
]
}MySQL [testdb]> select * from access;
+------+------+
| a | b |
+------+------+
| 47 | 11 |
+------+------+MySQL [testdb]> select current_user();
+----------------+
| current_user() |
+----------------+
| bob@127.0.0.1 |
+----------------+
1 row in set (0.00 sec)
MySQL [testdb]> select * from access;
+------+------+
| a | b |
+------+------+
| 47 | 11 |
+------+------+{
"store": [
{
"attribute": "table",
"op": "=",
"value": "access"
}
],
"use": [
{
"attribute": "user",
"op": "=",
"value": "'alice'@'%'"
}
]
}storage=storage_inmemorystorage=storage_memcached[Cache-Filter]
type=filter
module=cache
storage=storage_memcached
storage_memcached.server=192.168.1.31
storage_memcached.max_value_size=10Mstorage_options="server=192.168.1.31,max_value_size=10M"storage=storage_redis[Cache-Filter]
type=filter
module=cache
storage=storage_redis
storage_redis.server=192.168.1.31
storage_redis.username=hello
storage_redis.password=worldstorage_options="server=192.168.1.31,username=hello,password=world"$ redis-cli flushall[MyCache]
type=filter
module=cache
storage=storage_inmemory
soft_ttl=30
hard_ttl=45
cached_data=shared
max_size=50Mi
rules=cache_rules.json
[MyService]
type=service
...
filters=MyCache{
"store": [
{
"attribute": "table",
"op": "=",
"value": "sbtest"
}
]
}{
"store": [
{
"attribute": "database",
"op": "=",
"value": "db1"
}
]
}{
"store": [
{
"attribute": "query",
"op": "like",
"value": "FROM db1\\..*"
}
]
}{
"attribute": "query",
"op": "unlike",
"value": "FROM nomatch"
}{
"attribute": "database",
"op": "!=",
"value": "nomatch"
}maxctrl create filter CAPTURE_FLTR wcar
maxctrl link service RWS-Router CAPTURE_FLTRmaxctrl call command wcar start CAPTURE_FLTR <options>maxctrl call command wcar stop CAPTURE_FLTRmaxctrl unlink service RWS-Router CAPTURE_FLTR
maxctrl destroy filter CAPTURE_FLTR[CAPTURE_FLTR]
type=filter
module=wcar
capture_duration=1h # Limit capture duration to one hour
capture_size=1Gi # Limit capture size to 1GiB
start_capture=true # Start capturing immediately after starting MaxScale[server1]
type=server
address=127.0.0.1
port=3306
[MariaDB-Monitor]
type=monitor
module=mariadbmon
servers=server1
user=maxuser
password=maxpwd
[CAPTURE_FLTR]
type=filter
module=wcar
capture_duration=1h # Limit capture duration to one hour
capture_size=1Gi # Limit capture size to 1GiB
start_capture=true # Start capturing immediately after starting MaxScale
[RWS-Router]
type=service
router=readwritesplit
cluster=MariaDB-Monitor
user=maxuser
password=maxpwd
filters=CAPTURE_FLTR
[RWS-Listener]
type=listener
service=RWS-Router
protocol=MariaDBClient
port=4006maxctrl call command wcar stop CAPTURE_FLTRtar -caf captures.tar.gz -C /var/lib/maxscale wcarmaxctrl call command wcar <command> <filter> [options]maxctrl call command wcar start CAPTURE_FLTR prefix=Scenario1 size=10Gmaxctrl call command wcar stop CAPTURE_FLTRmaxplayer summary /path/to/capture.cxCREATE USER 'maxreplay'@'%' IDENTIFIED BY 'replay-pw';
GRANT ALL ON *.* TO 'maxreplay'@'%';cd /path/to/capture-dir
tar -xaf captures.tar.gzmaxplayer replay --user maxreplay --password replay-pw --host <host:port> --output baseline-result.csv /path/to/capture.cxmaxplayer replay --user maxreplay --password replay-pw --host <host:port> --output comparison-result.csv /path/to/capture.cxmaxplayer canonicals /path/to/capture.cx > canonicals.csvmaxpostprocess canonicals.csv baseline-result.csv -o baseline-summary.json
maxpostprocess canonicals.csv comparison-result.csv -o comparison-summary.jsonmaxvisualize baseline-summary.json comparison-summary.jsonmaxplayer -u user -p pwd --speed 1.5 -i 5s -o baseline.csv capture_2024-09-06_090002.cx --help
Usage: maxplayer [OPTION]... [COMMAND] FILE
Commands: (default: replay)
summary Show a summary of the capture.
replay Replay the capture.
convert Converts the input file (either .cx or .rx) to a replay file (.rx or .csv).
canonicals List the canonical forms of the captured SQL as CSV.
dump-data Dump capture data as SQL.
show Show the SQL of one or more events.
Options:
--user User name for login to the replay server.
-u This version does not support using the actual user names
that were used during capture.
--password Only clear text passwords are supported as of yet.
-p
--host The address of the replay server in <IP>:<port> format.
-h E.g. 127.0.0.1:4006
--output The name of the output file: e.g. baseline.csv.
-o
--report Periodically report statistics of ongoing operations.
-r The option takes a duration, such as 10s.
--report-file The --report option by default writes to stdout.
-R Provide the name of the file to write to. The file will
be truncated every time it is written to, allowing for a
simple status window by running 'watch cat <path-to-file>'
in a terminal.
--speed The value is a multiplier. 2.5 is 2.5x speed and 0.5 is half speed.
-s A value of zero means no limit, or replay as fast as possible.
A multiplier of 2.5 might not have any effect as the actual time spent
depends on many factors, such as the captured volume and replay server.
--idle-wait Relates to playback speed, and can be used together with --speed.
-i During capture there can be long delays where there is no traffic.
One hour of no capture traffic would mean replay waits for one hour.
idle-wait allows to move simulation time forwards when such gaps
occure. A 'gap' starts when all prior queries have fully executed.
--idle-wait takes a duration value. A negative value turns the feature off,
i.e. the one hour wait would happen.
--idle-wait 0s means time moves to the event start-time immediately
when a gap is detected, i.e., all gaps are skipped over.
--idle-wait 10s means time moves to the event start-time 10 seconds
(wall time) after the gap was detected. Shorter
gaps than 10 seconds will thus be fully waited for.
--idle-wait has a default value of 1 second.
Examples: 1h, 60m, 3600s, 3600000ms, which all define the same duration.
--query-filter Options: none, write-only, read-only. Default: none.
-f Replay can optionally apply only writes or only reads. This option is useful
once the databases to be tested have been prepared (see full documentation)
and optionally either a write-only run, or a full replay has been run.
Now multiple read-only runs against the server(s) are simple as no further
data syncronization is needed.
Note that this mode has its limitations as the query results may
be very different than what they were during capture.
--analyze Enabling this option will track the server Rows_read statistic for each query.
-A This will slow down the overall replay time. The query time measurements
are still valid, but currently this option should only be used when
it is of real value to know how many rows the server read for each query.
--verbose Verbose output. The option can be repeated for more verbosity: -vvv
-v
--version Display the version number and copyrights.
-V
input file: capture_2024-09-06_090002.cx
-h --help true
-u --user user
-p --password pwd
-H --host 127.1.1.0:3306
-o --output baseline.csv
-r --report 0ns
-R --report-file
-s --speed 1.5
-i --idle-wait 5s
-f --query-filter none
-A --analyze false
-v --verbose 0
-V --version 0.2