Ruptura_Projetada/promoção/promoção_boti_ciclo07.ipynb
2025-06-04 13:57:32 -03:00

1728 lines
65 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np \n",
"import glob\n",
"import os \n",
"from openpyxl import load_workbook\n",
"from openpyxl.styles import PatternFill, Font\n",
"from datetime import datetime"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"hoje = datetime.today().strftime('%Y-%m-%d')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# O QUE PRECISA PRA RODAR ESSE CÓDIGO:\n",
"\n",
"# Arquivo Draft\n",
"# Arquivo Estoque\n",
"# Arquivo BI preço\n",
"# Arquivo pdv\n",
"# Arquivo Calendario\n",
"# Arquivo tabela de compra\n",
"\n",
"#Atualizar o nome da marca no filtro do estoque."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"df_tabela = pd.read_excel(r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\DB_PROMOÇÕES\\BOTICARIO\\C10\\TABELA DE PEDIDOS\\Pedidos Semanais Especiais - BOT - 202510.xlsx\")\n",
"\n",
"df_tabela = df_tabela[df_tabela['Ação revendedor'].notna() | df_tabela['Ação consumidor'].notna()]\n",
"\n",
"df_tabela = df_tabela[df_tabela['Região'] == 'NNE'] \n",
"\n",
"df_tabela = df_tabela[(df_tabela['Canal'] != 'Ecomm')]\n",
"\n",
"#df_tabela['Canal'] = np.where((df_tabela['Canal'] == \"Loja\") | (df_tabela['Canal'] == \"Todos\") | (df_tabela['Canal'] == \"Loja | VD\"),\"TODOS\",\"VD\")\n",
"\n",
"df_tabela = df_tabela[(df_tabela['Categoria'] != \"EMBALAGENS\") | (df_tabela['Categoria'] != \"SUPORTE À VENDA\")]\n",
"\n",
"df_tabela = df_tabela[df_tabela['Tipo de pedido'] == 'Semanal']\n",
"\n",
"df_tabela = df_tabela[~df_tabela['Descrição'].str.contains('PRM')]\n",
"\n",
"df_tabela = df_tabela[df_tabela['Tipo de produto']!= 'EDICAO LIMITADA']\n",
"\n",
"df_tabela['Ação revendedor'] = np.where(df_tabela['Ação revendedor'].isna(),df_tabela['Ação consumidor'],df_tabela['Ação revendedor'])\n",
"\n",
"df_tabela['Percentual de desconto revendedor'] = np.where(df_tabela['Percentual de desconto revendedor'].isna(),df_tabela['Percentual de desconto consumidor'],df_tabela['Percentual de desconto revendedor'])\n",
"\n",
"df_tabela['MATCH'] = 1\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Index(['Ciclo', 'Região', 'Canal', 'Código', 'Descrição', 'IAF',\n",
" 'Tipo de pedido', 'Foco', 'Unidade de negócio', 'Marca', 'Categoria',\n",
" 'Subcategoria', 'Quantidade por caixa', 'Tipo de promoção', 'Catálogo',\n",
" 'Tipo de produto', 'Ação consumidor',\n",
" 'Percentual de desconto consumidor', 'Ação revendedor',\n",
" 'Percentual de desconto revendedor', 'Sortimento P', 'Sortimento M',\n",
" 'Sortimento G', 'MATCH'],\n",
" dtype='object')"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_tabela.columns"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(702, 24)"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_tabela.shape"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"df_pdv = pd.read_excel(r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\DB_PROMOÇÕES\\BOTICARIO\\C09\\arquivos pra gerar\\pdvs\\PDV_ATT.xlsx\")\n",
"\n",
"df_pdv_origi = pd.read_excel(r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\DB_PROMOÇÕES\\BOTICARIO\\C09\\arquivos pra gerar\\pdvs\\PDV_ATT.xlsx\")\n",
"\n",
"df_pdv = df_pdv.rename(columns={'DESCRIÇÃO':'DESCRIÇÃO PDV'})\n",
"\n",
"df_pdv = df_pdv[df_pdv['STATUS']!=\"INATIVO\"]\n",
"\n",
"df_pdv = df_pdv.drop(columns=['REGIÃO', 'ESTADO','CIDADE','GESTÃO', 'STATUS'])\n",
"\n",
"df_pdv['PDV'] = df_pdv['PDV DESC'].str.split(\"-\").str[0].str.strip()\n",
"\n",
"df_pdv = df_pdv[df_pdv['CANAL']!='MTZ']\n",
"\n",
"#df_pdv['CANAL'] = np.where((df_pdv['CANAL']=='LJ')|(df_pdv['CANAL']=='HIB')|(df_pdv['CANAL']=='CD'),'TODOS','VD')\n",
"\n",
"df_pdv['MATCH'] = 1\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"df_pdv = df_pdv.drop(columns=['pdv como texto','PDV DESC'])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\joao.herculano\\AppData\\Local\\Temp\\ipykernel_54556\\4129764549.py:10: DtypeWarning: Columns (7) have mixed types. Specify dtype option on import or set low_memory=False.\n",
" df_draft = pd.concat([pd.read_csv(file) for file in csv_files], ignore_index=True)\n"
]
},
{
"data": {
"text/plain": [
"(114430, 46)"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Caminho onde estão as subpastas com os arquivos CSV\n",
"\n",
"# Set the path to the folder containing CSV files\n",
"folder_path = r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\BD_LANÇAMENTOS\\BOT\\BOT - C11\\atualização\\DRAFT_\" # arquivo dos drafts\n",
"\n",
"# Pattern to match all CSV files\n",
"csv_files = glob.glob(os.path.join(folder_path, '*.csv'))\n",
"\n",
"# Read and concat all CSVs\n",
"df_draft = pd.concat([pd.read_csv(file) for file in csv_files], ignore_index=True)\n",
"\n",
"df_draft.shape\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"df_draft = df_draft.drop(columns=['Descrição','Compra inteligente semanal/Sugestão de compra',\n",
" 'Compra inteligente Próximo Ciclo',\n",
" 'Compra inteligente Próximo Ciclo + 1','Planograma', 'Quantidade por caixa', 'Preço Sell In', 'Quantidade',\n",
" 'Item analisado', 'Subcategoria',\n",
" 'Lançamento', 'Desativação',\n",
" 'Promoção Próximo Ciclo', 'Categoria'])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"# Caminho onde estão as subpastas com os arquivos CSV\n",
"pasta_entrada = r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\BD_LANÇAMENTOS\\BOT\\BOT - C11\\atualização\\estoque\"\n",
"\n",
"# Lista todas as subpastas dentro de \"ESTOQUE\"\n",
"subpastas = [os.path.join(pasta_entrada, d) for d in os.listdir(pasta_entrada) if os.path.isdir(os.path.join(pasta_entrada, d))]\n",
"\n",
"df_list = []\n",
"\n",
"# Percorre todas as subpastas\n",
"for subpasta in subpastas:\n",
" arquivos = [f for f in os.listdir(subpasta) if f.endswith(\".csv\")]\n",
" nome_pasta = os.path.basename(subpasta) # Obtém o nome da pasta\n",
"\n",
" for arquivo in arquivos:\n",
" caminho_arquivo = os.path.join(subpasta, arquivo)\n",
" try:\n",
" df = pd.read_csv(caminho_arquivo, encoding=\"utf-8\", low_memory=False) # Melhor para grandes volumes de dados\n",
" df[\"Arquivo_Origem\"] = arquivo # Adiciona o nome do arquivo de origem\n",
" df[\"Pasta_Origem\"] = nome_pasta # Adiciona o nome da pasta de origem\n",
" df_list.append(df)\n",
" except Exception as e:\n",
" print(f\"Erro ao ler o arquivo {arquivo}: {e}\")\n",
"\n",
"if df_list:\n",
" df_estoque = pd.concat(df_list, ignore_index=True)\n",
"\n",
"df_estoque['PDV'] = df_estoque['PDV'].astype(str)\n",
"\n",
"df_estoque['SKU_FINAL'] = np.where(df_estoque['SKU_PARA'] == \"-\", df_estoque['SKU'], df_estoque['SKU_PARA'])\n",
"\n",
"df_estoque['SKU_FINAL']=df_estoque['SKU_FINAL'].astype(str)\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Index(['Ciclo', 'Região', 'Canal', 'Código', 'Descrição', 'IAF',\n",
" 'Tipo de pedido', 'Foco', 'Unidade de negócio', 'Marca', 'Categoria',\n",
" 'Subcategoria', 'Quantidade por caixa', 'Tipo de promoção', 'Catálogo',\n",
" 'Tipo de produto', 'Ação consumidor',\n",
" 'Percentual de desconto consumidor', 'Ação revendedor',\n",
" 'Percentual de desconto revendedor', 'Sortimento P', 'Sortimento M',\n",
" 'Sortimento G', 'MATCH'],\n",
" dtype='object')"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_tabela.columns"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"df_tabela = pd.merge(left=df_tabela,right=df_estoque[['SKU','SKU_FINAL']],left_on='Código',right_on='SKU',how='inner')\n",
"\n",
"df_tabela['Código'] = df_tabela['SKU_FINAL']\n",
"\n",
"df_tabela = df_tabela.drop(columns=['SKU','SKU_FINAL'])\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"df_estoque = df_estoque.drop(columns=['DESCRICAO', 'CATEGORIA', 'CLASSE', 'FASES PRODUTO',\n",
" 'LANCAMENTO', 'DESATIVACAO','COBERTURA ALVO',\n",
" 'ESTOQUE DE SEGURANCA','COBERTURA PROJETADA', \n",
" 'Pasta_Origem'])"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>SKU</th>\n",
" <th>SKU_PARA</th>\n",
" <th>PDV</th>\n",
" <th>ESTOQUE ATUAL</th>\n",
" <th>ESTOQUE EM TRANSITO</th>\n",
" <th>PEDIDO PENDENTE</th>\n",
" <th>DDV PREVISTO</th>\n",
" <th>COBERTURA ATUAL</th>\n",
" <th>COBERTURA ATUAL + TRANSITO</th>\n",
" <th>Arquivo_Origem</th>\n",
" <th>SKU_FINAL</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>90495</td>\n",
" <td>-</td>\n",
" <td>20005</td>\n",
" <td>100.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>BOT.csv</td>\n",
" <td>90495</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>90246</td>\n",
" <td>-</td>\n",
" <td>4560</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>BOT.csv</td>\n",
" <td>90246</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>90246</td>\n",
" <td>-</td>\n",
" <td>5699</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>BOT.csv</td>\n",
" <td>90246</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>90246</td>\n",
" <td>-</td>\n",
" <td>12522</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>BOT.csv</td>\n",
" <td>90246</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>90246</td>\n",
" <td>-</td>\n",
" <td>12817</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>BOT.csv</td>\n",
" <td>90246</td>\n",
" </tr>\n",
" <tr>\n",
" <th>...</th>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>441216</th>\n",
" <td>1594</td>\n",
" <td>-</td>\n",
" <td>20995</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>QDB.csv</td>\n",
" <td>1594</td>\n",
" </tr>\n",
" <tr>\n",
" <th>441217</th>\n",
" <td>1594</td>\n",
" <td>-</td>\n",
" <td>20998</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>QDB.csv</td>\n",
" <td>1594</td>\n",
" </tr>\n",
" <tr>\n",
" <th>441218</th>\n",
" <td>1594</td>\n",
" <td>-</td>\n",
" <td>21001</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>QDB.csv</td>\n",
" <td>1594</td>\n",
" </tr>\n",
" <tr>\n",
" <th>441219</th>\n",
" <td>1594</td>\n",
" <td>-</td>\n",
" <td>21278</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>QDB.csv</td>\n",
" <td>1594</td>\n",
" </tr>\n",
" <tr>\n",
" <th>441220</th>\n",
" <td>1594</td>\n",
" <td>-</td>\n",
" <td>21383</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>QDB.csv</td>\n",
" <td>1594</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>441221 rows × 11 columns</p>\n",
"</div>"
],
"text/plain": [
" SKU SKU_PARA PDV ESTOQUE ATUAL ESTOQUE EM TRANSITO \\\n",
"0 90495 - 20005 100.0 0.0 \n",
"1 90246 - 4560 0.0 NaN \n",
"2 90246 - 5699 0.0 NaN \n",
"3 90246 - 12522 0.0 NaN \n",
"4 90246 - 12817 0.0 NaN \n",
"... ... ... ... ... ... \n",
"441216 1594 - 20995 0.0 0.0 \n",
"441217 1594 - 20998 0.0 0.0 \n",
"441218 1594 - 21001 0.0 0.0 \n",
"441219 1594 - 21278 0.0 0.0 \n",
"441220 1594 - 21383 0.0 0.0 \n",
"\n",
" PEDIDO PENDENTE DDV PREVISTO COBERTURA ATUAL \\\n",
"0 0.0 NaN NaN \n",
"1 0.0 NaN NaN \n",
"2 0.0 NaN NaN \n",
"3 0.0 NaN NaN \n",
"4 0.0 NaN NaN \n",
"... ... ... ... \n",
"441216 0.0 NaN NaN \n",
"441217 0.0 NaN NaN \n",
"441218 0.0 NaN NaN \n",
"441219 0.0 NaN NaN \n",
"441220 0.0 NaN NaN \n",
"\n",
" COBERTURA ATUAL + TRANSITO Arquivo_Origem SKU_FINAL \n",
"0 NaN BOT.csv 90495 \n",
"1 NaN BOT.csv 90246 \n",
"2 NaN BOT.csv 90246 \n",
"3 NaN BOT.csv 90246 \n",
"4 NaN BOT.csv 90246 \n",
"... ... ... ... \n",
"441216 NaN QDB.csv 1594 \n",
"441217 NaN QDB.csv 1594 \n",
"441218 NaN QDB.csv 1594 \n",
"441219 NaN QDB.csv 1594 \n",
"441220 NaN QDB.csv 1594 \n",
"\n",
"[441221 rows x 11 columns]"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_estoque"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"c:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\openpyxl\\styles\\stylesheet.py:237: UserWarning: Workbook contains no default style, apply openpyxl's default\n",
" warn(\"Workbook contains no default style, apply openpyxl's default\")\n"
]
}
],
"source": [
"df_bi_preco = pd.read_excel(r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\DB_PROMOÇÕES\\BOTICARIO\\C09\\arquivos pra gerar\\preços bi\\TABELA DE PREÇOS (4).xlsx\")\n",
"\n",
"df_bi_preco = df_bi_preco.drop(columns=['Descrição','Tipo Preço','CATEGORIA','LINHA','MARCA'])\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(44744, 31)"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final = pd.merge(left=df_tabela,right=df_pdv,on='MATCH',how='inner')\n",
"\n",
"df_final = df_final.drop_duplicates()\n",
"\n",
"df_final.shape"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(44744, 62)"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final['PDV'] = df_final['PDV'].astype('Int64')\n",
"df_final['Código'] = df_final['Código'].astype('Int64')\n",
"\n",
"\n",
"df_final = pd.merge(left=df_final,right=df_draft,left_on=['PDV','Código'],right_on=['PDV','SKU'],how='left')\n",
"df_final.shape"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"26447 2.0\n",
"Name: Histórico de Vendas do Ciclo 202505, dtype: float64"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final[(df_final['Código'] == 52023) & (df_final['PDV'] == 23712)]['Histórico de Vendas do Ciclo 202505']"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"df_final = df_final.drop(columns=['Sortimento P', 'Sortimento M',\n",
" 'Sortimento G','MARCA','SKU'])"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"calendario = pd.read_excel(r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\BD_LANÇAMENTOS\\BASE DE DADOS LANÇAMENTO\\BOT\\CICLO 9\\CALENDARIO_CICLO\\Ciclo_Expandido_com_Datas.xlsx\")\n"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"#ignorando a PDV que ainda não está online\n",
"df_pdv = df_pdv[df_pdv['DESCRIÇÃO PDV'] != '23813-COMERCIO-HIB VALENTE']"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**ALTERAR NOME DA COLUNA \"ARQUIVO_ORIGEM\" PARA UMA DAS OPÇÕES ABAIXO:**\n",
"\n",
"*BOT.csv* \n",
"\n",
"*EUD.csv*\n",
"\n",
"*QDB.csv*"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Ciclo</th>\n",
" <th>INICIO CICLO</th>\n",
" <th>FIM CICLO</th>\n",
" <th>DURAÇÃO</th>\n",
" <th>Date</th>\n",
" <th>NUM_CICLO</th>\n",
" <th>ANO_CICLO</th>\n",
" <th>CICLOMAIS2</th>\n",
" <th>dias_ate_inicio</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>2199</th>\n",
" <td>C202510</td>\n",
" <td>2025-06-30</td>\n",
" <td>2025-07-20</td>\n",
" <td>21</td>\n",
" <td>2025-06-30</td>\n",
" <td>10</td>\n",
" <td>C2025</td>\n",
" <td>C202512</td>\n",
" <td>26</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Ciclo INICIO CICLO FIM CICLO DURAÇÃO Date NUM_CICLO \\\n",
"2199 C202510 2025-06-30 2025-07-20 21 2025-06-30 10 \n",
"\n",
" ANO_CICLO CICLOMAIS2 dias_ate_inicio \n",
"2199 C2025 C202512 26 "
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"calendario['Date'] = pd.to_datetime(calendario['Date'])\n",
"\n",
"# Get today (normalized to midnight)\n",
"today = pd.Timestamp(\"today\").normalize()\n",
"\n",
"calendario['NUM_CICLO'] = calendario['Ciclo'].str[-2:].astype(int)\n",
"calendario['ANO_CICLO'] = calendario['Ciclo'].str[0:5]\n",
"\n",
"\n",
"calendario = calendario[calendario['MARCA'] == \"BOTICARIO\"]\n",
"\n",
"calendario = calendario.drop(columns='MARCA')\n",
"\n",
"calendario['CICLOMAIS2'] = calendario['ANO_CICLO'].astype(str) + (calendario['NUM_CICLO'].astype(int) + 2).astype(str).str.zfill(2) # >>>>>>>>> MUDAR PRA CICLO CORRETO \n",
"\n",
"ciclo_mais2 = calendario[calendario['Date'].dt.normalize() == today]['CICLOMAIS2'].iloc[0]\n",
"\n",
"\n",
"# Filter rows where date matches today\n",
"filtered_calendario = calendario[calendario['Ciclo'] == ciclo_mais2][:1]\n",
"\n",
"\n",
"filtered_calendario['dias_ate_inicio'] = filtered_calendario['INICIO CICLO'].iloc[0] - today\n",
"\n",
"filtered_calendario['dias_ate_inicio'] = filtered_calendario['dias_ate_inicio'].dt.days.astype(int)\n",
"\n",
"filtered_calendario\n"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"filtered_calendario['MATCH'] = 1"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"df_pdv['UF'] = np.where(df_pdv['UF'] == 'VDC','BA',df_pdv['UF'])"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [],
"source": [
"#df_tabela = df_tabela[(df_tabela['Tipo de promoção'] == \"Revendedor\" ) | (df_tabela['Tipo de promoção'] == \"Promoções\") |(df_tabela['Tipo de promoção'] == \"Promoções | Revendedor\" )]"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [],
"source": [
"df_draft['PDV'] = df_draft['PDV'].astype(str)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(44744, 62)"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final = pd.merge(left=df_final, right=filtered_calendario[['Ciclo','INICIO CICLO','FIM CICLO','DURAÇÃO','MATCH','dias_ate_inicio']], on='MATCH',how='inner')\n",
"df_final.shape"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(54425, 72)"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final['Código'] = df_final['Código'].astype('Int64') \n",
"df_final['PDV'] = df_final['PDV'].astype('Int64') \n",
"\n",
"df_estoque['PDV'] = df_estoque['PDV'].astype('Int64') \n",
"df_estoque['SKU_FINAL'] = df_estoque['SKU_FINAL'].astype('Int64') \n",
"\n",
"df_final = pd.merge(left=df_final,right=df_estoque,right_on=['PDV','SKU_FINAL'],left_on=['PDV','Código'],how='left')\n",
"df_final.shape"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(54425, 76)"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final['Código'] = df_final['Código'].astype('str')\n",
"\n",
"df_bi_preco['SKU2'] = df_bi_preco['SKU2'].astype('str')\n",
"\n",
"df_final = pd.merge(left=df_final,right=df_bi_preco,right_on=['UF','SKU2'],left_on=['UF','Código'],how='left')\n",
"df_final.shape "
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(54514, 80)"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_bi_preco['SKU1'] = df_bi_preco['SKU1'].astype(str).str.replace('.0','',regex=False) \n",
"\n",
"df_final = pd.merge(left=df_final,right=df_bi_preco[['SKU1', 'SKU2', 'UF', 'PC', 'PV']],right_on=['UF','SKU1'],left_on=['UF','Código'],how='left')\n",
"df_final.shape "
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"df_final['PRECO DE COMPRA'] = np.where(~df_final['PC_x'].isna(),df_final['PC_x'],df_final['PC_y'])\n",
"\n",
"df_final['PRECO DE VENDA'] = np.where(~df_final['PV_x'].isna(),df_final['PV_x'],df_final['PV_y'])\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"CANAL\n",
"TODOS 42336\n",
"VD 12178\n",
"Name: count, dtype: int64"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df_final['CANAL'] = np.where((df_final['CANAL'] == 'LJ') | (df_final['CANAL'] == 'HIB'), \"TODOS\" , np.where((df_final['CANAL'] == 'CD') | (df_final['CANAL'] == 'VD'), \"VD\", df_final['CANAL']))\n",
"\n",
"df_final['CANAL'].value_counts()"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
"\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[34]\u001b[39m\u001b[32m, line 5\u001b[39m\n\u001b[32m 1\u001b[39m df_estoque = df_estoque.rename(columns={\u001b[33m'\u001b[39m\u001b[33mSKU_FINAL\u001b[39m\u001b[33m'\u001b[39m:\u001b[33m'\u001b[39m\u001b[33mSKU_PARA_VALIDACAO\u001b[39m\u001b[33m'\u001b[39m})\n\u001b[32m 3\u001b[39m df_estoque[\u001b[33m'\u001b[39m\u001b[33mSKU_PARA_VALIDACAO\u001b[39m\u001b[33m'\u001b[39m] = df_estoque[\u001b[33m'\u001b[39m\u001b[33mSKU_PARA_VALIDACAO\u001b[39m\u001b[33m'\u001b[39m].astype(\u001b[33m'\u001b[39m\u001b[33mInt64\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m----> \u001b[39m\u001b[32m5\u001b[39m df_final = \u001b[43mpd\u001b[49m\u001b[43m.\u001b[49m\u001b[43mmerge\u001b[49m\u001b[43m(\u001b[49m\u001b[43m \u001b[49m\u001b[43mleft\u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[43mdf_final\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mright\u001b[49m\u001b[43m \u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[43mdf_estoque\u001b[49m\u001b[43m[\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mSKU_PARA_VALIDACAO\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mArquivo_Origem\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mleft_on\u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mSKU\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mright_on\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mSKU_PARA_VALIDACAO\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mhow\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mleft\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[32m 7\u001b[39m df_final = df_final.drop_duplicates()\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\reshape\\merge.py:184\u001b[39m, in \u001b[36mmerge\u001b[39m\u001b[34m(left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate)\u001b[39m\n\u001b[32m 169\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 170\u001b[39m op = _MergeOperation(\n\u001b[32m 171\u001b[39m left_df,\n\u001b[32m 172\u001b[39m right_df,\n\u001b[32m (...)\u001b[39m\u001b[32m 182\u001b[39m validate=validate,\n\u001b[32m 183\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m184\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mop\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\reshape\\merge.py:888\u001b[39m, in \u001b[36m_MergeOperation.get_result\u001b[39m\u001b[34m(self, copy)\u001b[39m\n\u001b[32m 884\u001b[39m \u001b[38;5;28mself\u001b[39m.left, \u001b[38;5;28mself\u001b[39m.right = \u001b[38;5;28mself\u001b[39m._indicator_pre_merge(\u001b[38;5;28mself\u001b[39m.left, \u001b[38;5;28mself\u001b[39m.right)\n\u001b[32m 886\u001b[39m join_index, left_indexer, right_indexer = \u001b[38;5;28mself\u001b[39m._get_join_info()\n\u001b[32m--> \u001b[39m\u001b[32m888\u001b[39m result = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_reindex_and_concat\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 889\u001b[39m \u001b[43m \u001b[49m\u001b[43mjoin_index\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mleft_indexer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mright_indexer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcopy\u001b[49m\n\u001b[32m 890\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 891\u001b[39m result = result.__finalize__(\u001b[38;5;28mself\u001b[39m, method=\u001b[38;5;28mself\u001b[39m._merge_type)\n\u001b[32m 893\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.indicator:\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\reshape\\merge.py:879\u001b[39m, in \u001b[36m_MergeOperation._reindex_and_concat\u001b[39m\u001b[34m(self, join_index, left_indexer, right_indexer, copy)\u001b[39m\n\u001b[32m 877\u001b[39m left.columns = llabels\n\u001b[32m 878\u001b[39m right.columns = rlabels\n\u001b[32m--> \u001b[39m\u001b[32m879\u001b[39m result = \u001b[43mconcat\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mleft\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mright\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 880\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m result\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\reshape\\concat.py:395\u001b[39m, in \u001b[36mconcat\u001b[39m\u001b[34m(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy)\u001b[39m\n\u001b[32m 380\u001b[39m copy = \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[32m 382\u001b[39m op = _Concatenator(\n\u001b[32m 383\u001b[39m objs,\n\u001b[32m 384\u001b[39m axis=axis,\n\u001b[32m (...)\u001b[39m\u001b[32m 392\u001b[39m sort=sort,\n\u001b[32m 393\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m395\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mop\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\reshape\\concat.py:684\u001b[39m, in \u001b[36m_Concatenator.get_result\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 680\u001b[39m indexers[ax] = obj_labels.get_indexer(new_labels)\n\u001b[32m 682\u001b[39m mgrs_indexers.append((obj._mgr, indexers))\n\u001b[32m--> \u001b[39m\u001b[32m684\u001b[39m new_data = \u001b[43mconcatenate_managers\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 685\u001b[39m \u001b[43m \u001b[49m\u001b[43mmgrs_indexers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mnew_axes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconcat_axis\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mbm_axis\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcopy\u001b[49m\n\u001b[32m 686\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 687\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m.copy \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m using_copy_on_write():\n\u001b[32m 688\u001b[39m new_data._consolidate_inplace()\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\internals\\concat.py:131\u001b[39m, in \u001b[36mconcatenate_managers\u001b[39m\u001b[34m(mgrs_indexers, axes, concat_axis, copy)\u001b[39m\n\u001b[32m 124\u001b[39m \u001b[38;5;66;03m# Assertions disabled for performance\u001b[39;00m\n\u001b[32m 125\u001b[39m \u001b[38;5;66;03m# for tup in mgrs_indexers:\u001b[39;00m\n\u001b[32m 126\u001b[39m \u001b[38;5;66;03m# # caller is responsible for ensuring this\u001b[39;00m\n\u001b[32m 127\u001b[39m \u001b[38;5;66;03m# indexers = tup[1]\u001b[39;00m\n\u001b[32m 128\u001b[39m \u001b[38;5;66;03m# assert concat_axis not in indexers\u001b[39;00m\n\u001b[32m 130\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m concat_axis == \u001b[32m0\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m131\u001b[39m mgrs = \u001b[43m_maybe_reindex_columns_na_proxy\u001b[49m\u001b[43m(\u001b[49m\u001b[43maxes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmgrs_indexers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mneeds_copy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 132\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m mgrs[\u001b[32m0\u001b[39m].concat_horizontal(mgrs, axes)\n\u001b[32m 134\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(mgrs_indexers) > \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m mgrs_indexers[\u001b[32m0\u001b[39m][\u001b[32m0\u001b[39m].nblocks > \u001b[32m0\u001b[39m:\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\internals\\concat.py:230\u001b[39m, in \u001b[36m_maybe_reindex_columns_na_proxy\u001b[39m\u001b[34m(axes, mgrs_indexers, needs_copy)\u001b[39m\n\u001b[32m 220\u001b[39m mgr = mgr.reindex_indexer(\n\u001b[32m 221\u001b[39m axes[i],\n\u001b[32m 222\u001b[39m indexers[i],\n\u001b[32m (...)\u001b[39m\u001b[32m 227\u001b[39m use_na_proxy=\u001b[38;5;28;01mTrue\u001b[39;00m, \u001b[38;5;66;03m# only relevant for i==0\u001b[39;00m\n\u001b[32m 228\u001b[39m )\n\u001b[32m 229\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m needs_copy \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m indexers:\n\u001b[32m--> \u001b[39m\u001b[32m230\u001b[39m mgr = \u001b[43mmgr\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 232\u001b[39m new_mgrs.append(mgr)\n\u001b[32m 233\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m new_mgrs\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\internals\\managers.py:604\u001b[39m, in \u001b[36mBaseBlockManager.copy\u001b[39m\u001b[34m(self, deep)\u001b[39m\n\u001b[32m 601\u001b[39m res._blklocs = \u001b[38;5;28mself\u001b[39m._blklocs.copy()\n\u001b[32m 603\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m deep:\n\u001b[32m--> \u001b[39m\u001b[32m604\u001b[39m \u001b[43mres\u001b[49m\u001b[43m.\u001b[49m\u001b[43m_consolidate_inplace\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 605\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m res\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\internals\\managers.py:1791\u001b[39m, in \u001b[36mBlockManager._consolidate_inplace\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 1789\u001b[39m \u001b[38;5;28mself\u001b[39m._is_consolidated = \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[32m 1790\u001b[39m \u001b[38;5;28mself\u001b[39m._known_consolidated = \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1791\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_rebuild_blknos_and_blklocs\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32minternals.pyx:755\u001b[39m, in \u001b[36mpandas._libs.internals.BlockManager._rebuild_blknos_and_blklocs\u001b[39m\u001b[34m()\u001b[39m\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\internals\\base.py:84\u001b[39m, in \u001b[36mDataManager.shape\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 82\u001b[39m \u001b[38;5;129m@property\u001b[39m\n\u001b[32m 83\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mshape\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Shape:\n\u001b[32m---> \u001b[39m\u001b[32m84\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mtuple\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43max\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43max\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43maxes\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\joao.herculano\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\pandas\\core\\internals\\base.py:84\u001b[39m, in \u001b[36m<genexpr>\u001b[39m\u001b[34m(.0)\u001b[39m\n\u001b[32m 82\u001b[39m \u001b[38;5;129m@property\u001b[39m\n\u001b[32m 83\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mshape\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Shape:\n\u001b[32m---> \u001b[39m\u001b[32m84\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mtuple\u001b[39m(\u001b[38;5;28mlen\u001b[39m(ax) \u001b[38;5;28;01mfor\u001b[39;00m ax \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.axes)\n",
"\u001b[31mKeyboardInterrupt\u001b[39m: "
]
}
],
"source": [
"df_estoque = df_estoque.rename(columns={'SKU_FINAL':'SKU_PARA_VALIDACAO'})\n",
"\n",
"df_estoque['SKU_PARA_VALIDACAO'] = df_estoque['SKU_PARA_VALIDACAO'].astype('Int64')\n",
"\n",
"df_final = pd.merge( left= df_final, right = df_estoque[['SKU_PARA_VALIDACAO','Arquivo_Origem']], left_on= 'SKU', right_on='SKU_PARA_VALIDACAO', how='left')\n",
"\n",
"df_final = df_final.drop_duplicates()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final = df_final.drop(columns=['SKU1_x','SKU1_y','SKU2_x','SKU2_y','PC_x', 'PV_x','PC_y', 'PV_y','Subcategoria',\n",
"'Carteira Bloqueada Para Novos Pedidos',\n",
"'Quantidade por caixa'\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final['UFPRODUTO'] = df_final['UF'].astype(str) + df_final['SKU'].astype(str)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final['Projeção Próximo Ciclo + 1'] =df_final['Projeção Próximo Ciclo + 1'] - df_final['Projeção Próximo Ciclo'] # projeção do ciclo em estudo"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final['Data Prevista Regularização'] = df_final['Data Prevista Regularização'].astype(str).replace('0','REGULAR')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final = df_final.rename(columns={'Compra inteligente Próximo Ciclo + 1':'Compra inteligente Próximo Ciclo','Arquivo_Origem': 'MARCA'})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final = df_final[~df_final['Marca'].isna()]\n",
"df_final['Marca'].isna().sum()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final = df_final.drop_duplicates()\n",
"df_final.shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final[df_final.columns[26:43]] = df_final[df_final.columns[26:43]].fillna(0)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final.columns[26:44]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define list of target columns\n",
"sales_2024_cols = df_final.columns[26:44]\n",
"# Create a new column with the row-wise max\n",
"df_final['PICO DE VENDAS 2024'] = df_final[sales_2024_cols].max(axis=1)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final.columns[37:44]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vendas_6_meses = df_final.columns[37:44]\n",
"\n",
"df_final['Pico Vendas Ultimos 6 ciclos'] = df_final[vendas_6_meses].max(axis=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final.columns[37:44]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define as colunas mensais\n",
"colunas_mensais = df_final.columns[26:43]\n",
"\n",
"# Agrupa por PDV e calcula crescimento médio por PDV\n",
"def calcular_crescimento(grupo):\n",
" soma_mensal = grupo[colunas_mensais].sum() # soma por mês\n",
" variacao_mensal = soma_mensal.pct_change().dropna() # variação percentual mês a mês\n",
" variacao_mensal = variacao_mensal[np.isfinite(variacao_mensal)]\n",
"\n",
" if len(variacao_mensal) == 0:\n",
" return pd.Series({'CRESCIMENTO': np.nan})\n",
"\n",
" media = variacao_mensal.mean()\n",
" desvio = variacao_mensal.std()\n",
"\n",
" limite_sup = media + 2 * desvio\n",
" limite_inf = media - 2 * desvio\n",
"\n",
" variacoes_filtradas = variacao_mensal[variacao_mensal.between(limite_inf, limite_sup)]\n",
" crescimento = round(variacoes_filtradas.mean(), 4)\n",
" return pd.Series({'CRESCIMENTO': crescimento})\n",
"\n",
"# Aplica a função por PDV\n",
"crescimento_por_pdv = df_final.groupby('PDV').apply(calcular_crescimento)\n",
"\n",
"# Merge do resultado de volta no dataframe original\n",
"df_final = df_final.merge(crescimento_por_pdv, on='PDV', how='left')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Suponha que os meses estão nas colunas 10 a 26 (17 colunas = 17 meses)\n",
"colunas_mensais = df_final.columns[26:43]\n",
"\n",
"# Passo 1: Soma todas as linhas (itens) por mês → resultado: total por mês\n",
"soma_mensal = df_final[colunas_mensais].sum()\n",
"\n",
"# Passo 2: Calcula a variação percentual de um mês para o outro\n",
"variacao_mensal = soma_mensal.pct_change()\n",
"variacao_mensal = variacao_mensal.dropna()\n",
"\n",
"variacao_mensal = variacao_mensal[np.isfinite(variacao_mensal)]\n",
"\n",
"# Passo 3: Calcula a média da variação (ignorando o primeiro NaN)\n",
"media_variacao = variacao_mensal[1:].mean()\n",
"\n",
"# Calcula média e desvio padrão\n",
"media = variacao_mensal.mean()\n",
"desvio = variacao_mensal.std()\n",
"\n",
"# Define limite (ex: 2 desvios padrão)\n",
"limite_superior = media + 2 * desvio\n",
"limite_inferior = media - 2 * desvio\n",
"\n",
"# Filtra dados dentro do limite\n",
"filtro = variacao_mensal.between(limite_inferior, limite_superior)\n",
"df_filtrado = variacao_mensal[filtro]\n",
"CRESCIMENTO = round(df_filtrado.mean(),4)\n",
"\n",
"df_final['CRESCIMENTO_GERAL'] = CRESCIMENTO\n",
"\n",
"CRESCIMENTO\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vendas_todos_historicos = df_final.columns[26:44]\n",
"\n",
"df_final['MEDIANA DO HISTÓRICO'] = df_final[vendas_todos_historicos].median(axis=1)\n",
"\n",
"df_final['MEDIA DO HISTÓRICO'] = df_final[vendas_todos_historicos].mean(axis=1)\n",
"\n",
"medi = df_final.groupby(['CANAL'])['MEDIANA DO HISTÓRICO'].max().reset_index()\n",
"medi = medi.rename(columns={'MEDIANA DO HISTÓRICO':'med_por_canal'})\n",
"\n",
"df_final = pd.merge(left=df_final, right=medi,on='CANAL',how='inner')\n",
"\n",
"medi"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final.columns[28:29].str.split(\" \")[0][-1]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"mesmo_ciclo_ano_passado = df_final.columns[28:29]\n",
"ciclo_ano_passado = df_final.columns[28:29].str.split(\" \")[0][-1]\n",
"df_final[ciclo_ano_passado] = df_final[mesmo_ciclo_ano_passado]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final['CRESCIMENTO_FINAL'] = df_final['CRESCIMENTO_GERAL'] + df_final['CRESCIMENTO'] #crescimento do pdv\n",
"\n",
"df_final['CRESCIMENTO_FINAL'] = np.where(df_final['CRESCIMENTO_GERAL'] + df_final['CRESCIMENTO']>0.8,0.8,df_final['CRESCIMENTO_GERAL'] + df_final['CRESCIMENTO'])\n",
"\n",
"df_final['CRESCIMENTO_FINAL'] = np.where(df_final['CRESCIMENTO_GERAL'] + df_final['CRESCIMENTO']<0,0,df_final['CRESCIMENTO_GERAL'] + df_final['CRESCIMENTO'])\n",
"\n",
"df_final['MEDIANA DO HISTÓRICO'] = np.where(df_final['MEDIANA DO HISTÓRICO']==0,df_final['MEDIA DO HISTÓRICO'] ,df_final['MEDIANA DO HISTÓRICO'])\n",
"\n",
"# Primeiro cálculo intermediário\n",
"df_final['PV GINSENG'] = np.where(df_final['CRESCIMENTO_FINAL'] * df_final[ciclo_ano_passado] + df_final[ciclo_ano_passado] <1,\n",
" round(df_final['CRESCIMENTO_FINAL'] * df_final['MEDIANA DO HISTÓRICO']+ df_final['MEDIANA DO HISTÓRICO'],0), \n",
" round(df_final['CRESCIMENTO_FINAL']*df_final[ciclo_ano_passado]+df_final[ciclo_ano_passado],0))\n",
"\n",
"\n",
"df_final.shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final.columns[26:39]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final = df_final.rename(columns={df_final.columns[39]: \"C-4\", df_final.columns[40]: \"C-3\",df_final.columns[41]: \"C-2\",df_final.columns[42]: \"C-1\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final.drop(columns=df_final.columns[26:39], inplace=True)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# List all columns except the two\n",
"cols_to_group_by = df_final.columns.difference(['DDV PREVISTO', 'COBERTURA ATUAL'])\n",
"\n",
"# Group and aggregate\n",
"df_final_dedup = (\n",
" df_final\n",
" .groupby(list(cols_to_group_by), dropna=False)[['DDV PREVISTO', 'COBERTURA ATUAL']]\n",
" .max()\n",
" .reset_index()\n",
")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['DDV PREVISTO'] = df_final_dedup['DDV PREVISTO'].fillna(0.01)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup[(df_final['PDV'] == 23712)]['C-3']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['DDV PREVISTO'] = np.where(\n",
" df_final_dedup['DDV PREVISTO'] == 0,\n",
" 0.01,\n",
" df_final_dedup['DDV PREVISTO']\n",
")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup.columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['EST PROJE FINAL CICLO ATUAL'] = (df_final_dedup['Estoque Atual'] + df_final_dedup['Estoque em Transito']) - round(df_final_dedup['dias_ate_inicio'] * df_final_dedup['DDV PREVISTO'],0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['EST PROJE FINAL CICLO ATUAL'] = np.where(df_final_dedup['EST PROJE FINAL CICLO ATUAL']<0,0,df_final_dedup['EST PROJE FINAL CICLO ATUAL'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['VENDAS R$ PV GINSENG'] = df_final_dedup['PRECO DE VENDA'] * df_final_dedup['PV GINSENG']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_estoque.columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Columns to bring up front\n",
"priority_cols = [\n",
" 'SKU',\t'Marca',\t'INICIO CICLO',\n",
" 'FIM CICLO',\t'DURAÇÃO',\t'PRECO DE COMPRA',\t'PRECO DE VENDA',\n",
" 'UFPRODUTO',\t'Item Desativado',\t'Data Prevista Regularização',\n",
" 'ANALISTA',\t'UF',\t'CANAL',\t'PDV',\t'DESCRIÇÃO PDV',\t'Classe',\n",
" 'Descrição',\t'Categoria',\n",
"\t'ESTOQUE ATUAL', 'ESTOQUE EM TRANSITO',\t'COBERTURA ATUAL',\n",
" 'Pedido Pendente',\t'PICO DE VENDAS 2024','Pico Vendas Ultimos 6 ciclos',\n",
" 'C-4',\t'C-3',\t'C-2',\t'C-1',\t'Histórico de Vendas do Ciclo Atual',\n",
" 'Dias sem venda'\n",
"]\n",
"\n",
"# All remaining columns\n",
"other_cols = [col for col in df_final_dedup.columns if col not in priority_cols]\n",
"\n",
"# Reorder\n",
"'Ação consumidor', 'Percentual de desconto consumidor', 'Ação revendedor', 'Percentual de desconto revendedor', '202408'\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#df_final_dedup = df_final_dedup[priority_cols + other_cols]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['RBV 202406'] = df_final_dedup['PRECO DE VENDA'] * df_final_dedup[ciclo_ano_passado] "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['COB PROJETADA'] = np.where(\n",
" df_final_dedup['DDV PREVISTO'] != 0,\n",
" (df_final_dedup['EST PROJE FINAL CICLO ATUAL'] + df_final_dedup['PV GINSENG']) / df_final_dedup['DDV PREVISTO'],\n",
" 999)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup.drop(df_final_dedup.columns[39:40], axis=1, inplace=True)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup.drop(columns=['dias_ate_inicio','SKU_FINAL',\n",
" 'CRESCIMENTO'],inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['MARCA'] = df_final_dedup['MARCA'].str.replace('.csv','',regex=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['COB PROJETADA'] = df_final_dedup['COB PROJETADA'].fillna(999)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"marca_promo = df_estoque['Arquivo_Origem'].iloc[0].replace('.csv','')\n",
"marca_promo"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['INICIO CICLO'] = pd.to_datetime(df_final_dedup['INICIO CICLO'], dayfirst=True).dt.strftime('%d/%m/%Y')\n",
"\n",
"df_final_dedup['FIM CICLO'] = pd.to_datetime(df_final_dedup['FIM CICLO'], dayfirst=True).dt.strftime('%d/%m/%Y')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['Percentual de desconto revendedor'] = np.where((df_final_dedup['Percentual de desconto revendedor'].isna()) & (~df_final_dedup['Percentual de desconto consumidor'].isna()),df_final_dedup['Percentual de desconto consumidor'],df_final_dedup['Percentual de desconto revendedor'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_pdv_origi['PDV'] = df_pdv_origi['PDV'].astype('Int64')\n",
"df_final_dedup['PDV'] = df_final_dedup['PDV'].astype('Int64')\n",
"\n",
"\n",
"df_final_dedup = pd.merge(left=df_final_dedup,right=df_pdv_origi[['PDV','CANAL','UF']],how='inner',on='PDV')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_vdc = pd.read_csv(r\"C:\\Users\\joao.herculano\\GRUPO GINSENG\\Assistência Suprimentos - 2025\\SUPRIMENTOS\\DB_PROMOÇÕES\\BOTICARIO\\C10\\VENDA VITORIA 2024\\VENDA VITORIA.csv\")\n",
"\n",
"df_vdc['PRODUTO'] = df_vdc['PRODUTO'].astype('Int64')\n",
"\n",
"df_final_dedup['Código'] = df_final_dedup['Código'].astype('Int64')\n",
"\n",
"df_final_dedup =pd.merge(left=df_final_dedup,right=df_vdc[['PDV GINSENG','PRODUTO',ciclo_ano_passado]],left_on= ['PDV','Código'],right_on= ['PDV GINSENG','PRODUTO'],how='left' )\n",
"\n",
"df_final_dedup['202410_y'] = df_final_dedup['202410_y'].fillna(0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['202410_x'] = np.where(df_final_dedup['202410_y']>0,df_final_dedup['202410_y'],df_final_dedup['202410_x'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup = df_final_dedup.drop(columns=['Arquivo_Origem_x','Arquivo_Origem_y','CANAL_x','Canal',\n",
" 'Ciclo_x','Ciclo_y','DURAÇÃO','FIM CICLO','Foco','INICIO CICLO','MATCH',\n",
" 'PRECO DE COMPRA','SKU','SKU_PARA','SKU_PARA_VALIDACAO',\n",
" 'Tipo de pedido',\t'Tipo de produto','UFPRODUTO','Unidade de negócio','EST PROJE FINAL CICLO ATUAL',\n",
" 'UF_x','RBV 202406','Região','Catálogo','SKU','VENDAS R$ PV GINSENG','Data Prevista Regularização',\n",
" 'IAF', 'Item Desativado','Tipo de promoção','PDV GINSENG','PRODUTO','202410_y',\n",
" 'ESTOQUE ATUAL', 'ESTOQUE EM TRANSITO','COBERTURA ATUAL + TRANSITO',\n",
" 'DDV PREVISTO',\t'COB PROJETADA','COBERTURA ATUAL',\n",
" 'CRESCIMENTO_FINAL',\t'CRESCIMENTO_GERAL','med_por_canal','PEDIDO PENDENTE'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup = df_final_dedup.rename(columns={'CANAL_y':'CANAL','UF_y':'UF','Marca':'LINHA','202410_x':'202410'})\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup.columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup[(df_final['PDV'] == 23712)]['C-3']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"priority_cols = ['SUPERVISOR','ANALISTA','UF','CANAL','LINHA','PDV','DESCRIÇÃO PDV','Código','Descrição','Categoria', \n",
"'Classe','Percentual de desconto consumidor','Ação consumidor','Percentual de desconto revendedor','Ação revendedor','C-1',\n",
"'C-2', 'C-3', 'C-4','Histórico de Vendas do Ciclo Atual','Estoque Atual','Estoque em Transito','Pedido Pendente',\n",
"'Projeção Próximo Ciclo + 1','Projeção Próximo Ciclo']\n",
"\n",
"# All remaining columns\n",
"other_cols = [col for col in df_final_dedup.columns if col not in priority_cols]\n",
"\n",
"# Reorder\n",
"df_final_dedup = df_final_dedup[priority_cols + other_cols]\n",
"\n",
"df_final_dedup['SUGESTÃO ABASTECIMENTO'] = ''\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup = df_final_dedup.drop_duplicates()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup['PV GINSENG'].sum()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_final_dedup[df_final_dedup['PDV'] == 23712]['PICO DE VENDAS 2024'].isna().sum()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Export to Excel\n",
"output_file = f'C:\\\\Users\\\\joao.herculano\\\\Documents\\\\promoção_{marca_promo}_{ciclo_mais2}.{hoje}.xlsx'\n",
"with pd.ExcelWriter(output_file, engine='openpyxl') as writer:\n",
" df_final_dedup.to_excel(writer, index=False)\n",
"\n",
"# Apply styles\n",
"wb = load_workbook(output_file)\n",
"ws = wb['Sheet1']\n",
"\n",
"# Style header\n",
"header_fill = PatternFill(start_color='ADD8E6', end_color='ADD8E6', fill_type='solid') # Light Blue\n",
"header_font = Font(color='FFFFFF', bold=True) # White & Bold\n",
"\n",
"for cell in ws[1]:\n",
" cell.fill = header_fill\n",
" cell.font = header_font\n",
"\n",
"# Style rows: gray/white alternating\n",
"gray_fill = PatternFill(start_color='DDDDDD', end_color='DDDDDD', fill_type='solid') # Light gray\n",
"\n",
"for i, row in enumerate(ws.iter_rows(min_row=2, max_row=ws.max_row), start=2):\n",
" if i % 2 == 0:\n",
" for cell in row:\n",
" cell.fill = gray_fill\n",
"\n",
"# Save styled workbook\n",
"wb.save(output_file)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"coisas q saem\n",
"\n",
"Lançamento\n",
"Subcategoria\n",
"Projeção Próximo Ciclo \n",
"Promoção Próximo Ciclo\n",
"Compra inteligente semanal/Sugestão de compra\n",
"Compra inteligente Próximo Ciclo\n",
"Planograma\n",
"Carteira Bloqueada Para Novos Pedidos\n",
"Quantidade por caixa\n",
"Preço Sell In\n",
"Quantidade\n",
"Item analisado\n",
"Tipo Preço\n",
"\n",
">>>>>>>>>>>>>NAO ESTÁ PEGANDO O MERGE COM O DF_ESTOQUE\n",
"\n",
"\n",
"CRIAR PROJEÇÃO DE VENDA DO CICLO ATUAL\n",
"Compra inteligente Próximo Ciclo + 1 >>>>>>> RENAME PRA NOROMAL"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}